]> bbs.cooldavid.org Git - net-next-2.6.git/blame_incremental - drivers/net/bnx2x_main.c
bnx2x: Configurable pause scheme
[net-next-2.6.git] / drivers / net / bnx2x_main.c
... / ...
CommitLineData
1/* bnx2x_main.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#include <linux/if_vlan.h>
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
45#include <net/ip6_checksum.h>
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
48#include <linux/crc32c.h>
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
51#include <linux/io.h>
52
53
54#include "bnx2x.h"
55#include "bnx2x_init.h"
56#include "bnx2x_init_ops.h"
57#include "bnx2x_dump.h"
58
59#define DRV_MODULE_VERSION "1.48.114-1"
60#define DRV_MODULE_RELDATE "2009/07/29"
61#define BNX2X_BC_VER 0x040200
62
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
71
72static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76MODULE_AUTHOR("Eliezer Tamir");
77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
80
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
95
96static int disable_tpa;
97module_param(disable_tpa, int, 0);
98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
104static int dropless_fc;
105module_param(dropless_fc, int, 0);
106MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
108static int poll;
109module_param(poll, int, 0);
110MODULE_PARM_DESC(poll, " Use polling (for debug)");
111
112static int mrrs = -1;
113module_param(mrrs, int, 0);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
116static int debug;
117module_param(debug, int, 0);
118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
121
122static struct workqueue_struct *bnx2x_wq;
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
126 BCM57711 = 1,
127 BCM57711E = 2,
128};
129
130/* indexed by board_type, above */
131static struct {
132 char *name;
133} board_info[] __devinitdata = {
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
137};
138
139
140static const struct pci_device_id bnx2x_pci_tbl[] = {
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
143 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
145 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
146 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
159static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
205{
206 struct dmae_command *dmae = &bp->init_dmae;
207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
219 mutex_lock(&bp->dmae_mutex);
220
221 memset(dmae, 0, sizeof(struct dmae_command));
222
223 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
224 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
225 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
226#ifdef __BIG_ENDIAN
227 DMAE_CMD_ENDIANITY_B_DW_SWAP |
228#else
229 DMAE_CMD_ENDIANITY_DW_SWAP |
230#endif
231 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
232 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
233 dmae->src_addr_lo = U64_LO(dma_addr);
234 dmae->src_addr_hi = U64_HI(dma_addr);
235 dmae->dst_addr_lo = dst_addr >> 2;
236 dmae->dst_addr_hi = 0;
237 dmae->len = len32;
238 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
239 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
240 dmae->comp_val = DMAE_COMP_VAL;
241
242 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
243 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
244 "dst_addr [%x:%08x (%08x)]\n"
245 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
246 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
247 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
248 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
249 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
250 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
251 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
252
253 *wb_comp = 0;
254
255 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
256
257 udelay(5);
258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
262 if (!cnt) {
263 BNX2X_ERR("DMAE timeout!\n");
264 break;
265 }
266 cnt--;
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
272 }
273
274 mutex_unlock(&bp->dmae_mutex);
275}
276
277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
278{
279 struct dmae_command *dmae = &bp->init_dmae;
280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
294 mutex_lock(&bp->dmae_mutex);
295
296 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
297 memset(dmae, 0, sizeof(struct dmae_command));
298
299 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
300 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
301 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
302#ifdef __BIG_ENDIAN
303 DMAE_CMD_ENDIANITY_B_DW_SWAP |
304#else
305 DMAE_CMD_ENDIANITY_DW_SWAP |
306#endif
307 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
308 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
309 dmae->src_addr_lo = src_addr >> 2;
310 dmae->src_addr_hi = 0;
311 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
312 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
313 dmae->len = len32;
314 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
315 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
316 dmae->comp_val = DMAE_COMP_VAL;
317
318 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
319 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
320 "dst_addr [%x:%08x (%08x)]\n"
321 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
322 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
323 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
324 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
325
326 *wb_comp = 0;
327
328 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
329
330 udelay(5);
331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
334 if (!cnt) {
335 BNX2X_ERR("DMAE timeout!\n");
336 break;
337 }
338 cnt--;
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
344 }
345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
348
349 mutex_unlock(&bp->dmae_mutex);
350}
351
352/* used only for slowpath so not inlined */
353static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
354{
355 u32 wb_write[2];
356
357 wb_write[0] = val_hi;
358 wb_write[1] = val_lo;
359 REG_WR_DMAE(bp, reg, wb_write, 2);
360}
361
362#ifdef USE_WB_RD
363static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
364{
365 u32 wb_data[2];
366
367 REG_RD_DMAE(bp, reg, wb_data, 2);
368
369 return HILO_U64(wb_data[0], wb_data[1]);
370}
371#endif
372
373static int bnx2x_mc_assert(struct bnx2x *bp)
374{
375 char last_idx;
376 int i, rc = 0;
377 u32 row0, row1, row2, row3;
378
379 /* XSTORM */
380 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
381 XSTORM_ASSERT_LIST_INDEX_OFFSET);
382 if (last_idx)
383 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
384
385 /* print the asserts */
386 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
387
388 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i));
390 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
392 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
394 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
395 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
396
397 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
398 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
399 " 0x%08x 0x%08x 0x%08x\n",
400 i, row3, row2, row1, row0);
401 rc++;
402 } else {
403 break;
404 }
405 }
406
407 /* TSTORM */
408 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
409 TSTORM_ASSERT_LIST_INDEX_OFFSET);
410 if (last_idx)
411 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
412
413 /* print the asserts */
414 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
415
416 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i));
418 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
420 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
422 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
423 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
424
425 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
426 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
427 " 0x%08x 0x%08x 0x%08x\n",
428 i, row3, row2, row1, row0);
429 rc++;
430 } else {
431 break;
432 }
433 }
434
435 /* CSTORM */
436 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
437 CSTORM_ASSERT_LIST_INDEX_OFFSET);
438 if (last_idx)
439 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
440
441 /* print the asserts */
442 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
443
444 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i));
446 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
448 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
450 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
451 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
452
453 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
454 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
455 " 0x%08x 0x%08x 0x%08x\n",
456 i, row3, row2, row1, row0);
457 rc++;
458 } else {
459 break;
460 }
461 }
462
463 /* USTORM */
464 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
465 USTORM_ASSERT_LIST_INDEX_OFFSET);
466 if (last_idx)
467 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
468
469 /* print the asserts */
470 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
471
472 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i));
474 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 4);
476 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_OFFSET(i) + 8);
478 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
479 USTORM_ASSERT_LIST_OFFSET(i) + 12);
480
481 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
482 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
483 " 0x%08x 0x%08x 0x%08x\n",
484 i, row3, row2, row1, row0);
485 rc++;
486 } else {
487 break;
488 }
489 }
490
491 return rc;
492}
493
494static void bnx2x_fw_dump(struct bnx2x *bp)
495{
496 u32 mark, offset;
497 __be32 data[9];
498 int word;
499
500 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
501 mark = ((mark + 0x3) & ~0x3);
502 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
503
504 printk(KERN_ERR PFX);
505 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
506 for (word = 0; word < 8; word++)
507 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
508 offset + 4*word));
509 data[8] = 0x0;
510 printk(KERN_CONT "%s", (char *)data);
511 }
512 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
513 for (word = 0; word < 8; word++)
514 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
515 offset + 4*word));
516 data[8] = 0x0;
517 printk(KERN_CONT "%s", (char *)data);
518 }
519 printk(KERN_ERR PFX "end of fw dump\n");
520}
521
522static void bnx2x_panic_dump(struct bnx2x *bp)
523{
524 int i;
525 u16 j, start, end;
526
527 bp->stats_state = STATS_STATE_DISABLED;
528 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
529
530 BNX2X_ERR("begin crash dump -----------------\n");
531
532 /* Indices */
533 /* Common */
534 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
535 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
536 " spq_prod_idx(%u)\n",
537 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
538 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
539
540 /* Rx */
541 for_each_rx_queue(bp, i) {
542 struct bnx2x_fastpath *fp = &bp->fp[i];
543
544 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
545 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
546 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
547 i, fp->rx_bd_prod, fp->rx_bd_cons,
548 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
549 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
550 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
551 " fp_u_idx(%x) *sb_u_idx(%x)\n",
552 fp->rx_sge_prod, fp->last_max_sge,
553 le16_to_cpu(fp->fp_u_idx),
554 fp->status_blk->u_status_block.status_block_index);
555 }
556
557 /* Tx */
558 for_each_tx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
560
561 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
562 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
563 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
564 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
565 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
566 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
567 fp->status_blk->c_status_block.status_block_index,
568 fp->tx_db.data.prod);
569 }
570
571 /* Rings */
572 /* Rx */
573 for_each_rx_queue(bp, i) {
574 struct bnx2x_fastpath *fp = &bp->fp[i];
575
576 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
577 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
578 for (j = start; j != end; j = RX_BD(j + 1)) {
579 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
580 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
581
582 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
583 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
584 }
585
586 start = RX_SGE(fp->rx_sge_prod);
587 end = RX_SGE(fp->last_max_sge);
588 for (j = start; j != end; j = RX_SGE(j + 1)) {
589 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
590 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
591
592 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
593 i, j, rx_sge[1], rx_sge[0], sw_page->page);
594 }
595
596 start = RCQ_BD(fp->rx_comp_cons - 10);
597 end = RCQ_BD(fp->rx_comp_cons + 503);
598 for (j = start; j != end; j = RCQ_BD(j + 1)) {
599 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
600
601 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
602 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
603 }
604 }
605
606 /* Tx */
607 for_each_tx_queue(bp, i) {
608 struct bnx2x_fastpath *fp = &bp->fp[i];
609
610 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
611 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
612 for (j = start; j != end; j = TX_BD(j + 1)) {
613 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
614
615 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
616 i, j, sw_bd->skb, sw_bd->first_bd);
617 }
618
619 start = TX_BD(fp->tx_bd_cons - 10);
620 end = TX_BD(fp->tx_bd_cons + 254);
621 for (j = start; j != end; j = TX_BD(j + 1)) {
622 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
623
624 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
625 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
626 }
627 }
628
629 bnx2x_fw_dump(bp);
630 bnx2x_mc_assert(bp);
631 BNX2X_ERR("end crash dump -----------------\n");
632}
633
634static void bnx2x_int_enable(struct bnx2x *bp)
635{
636 int port = BP_PORT(bp);
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
639 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
640 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
641
642 if (msix) {
643 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0);
645 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
646 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
647 } else if (msi) {
648 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
651 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
652 } else {
653 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
654 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
655 HC_CONFIG_0_REG_INT_LINE_EN_0 |
656 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
657
658 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
659 val, port, addr);
660
661 REG_WR(bp, addr, val);
662
663 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
664 }
665
666 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
667 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
668
669 REG_WR(bp, addr, val);
670 /*
671 * Ensure that HC_CONFIG is written before leading/trailing edge config
672 */
673 mmiowb();
674 barrier();
675
676 if (CHIP_IS_E1H(bp)) {
677 /* init leading/trailing edge */
678 if (IS_E1HMF(bp)) {
679 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
680 if (bp->port.pmf)
681 /* enable nig and gpio3 attention */
682 val |= 0x1100;
683 } else
684 val = 0xffff;
685
686 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
687 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
688 }
689
690 /* Make sure that interrupts are indeed enabled from here on */
691 mmiowb();
692}
693
694static void bnx2x_int_disable(struct bnx2x *bp)
695{
696 int port = BP_PORT(bp);
697 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
698 u32 val = REG_RD(bp, addr);
699
700 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
701 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
702 HC_CONFIG_0_REG_INT_LINE_EN_0 |
703 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
704
705 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
706 val, port, addr);
707
708 /* flush all outstanding writes */
709 mmiowb();
710
711 REG_WR(bp, addr, val);
712 if (REG_RD(bp, addr) != val)
713 BNX2X_ERR("BUG! proper val not read from IGU!\n");
714
715}
716
717static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
718{
719 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
720 int i, offset;
721
722 /* disable interrupt handling */
723 atomic_inc(&bp->intr_sem);
724 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
725
726 if (disable_hw)
727 /* prevent the HW from sending interrupts */
728 bnx2x_int_disable(bp);
729
730 /* make sure all ISRs are done */
731 if (msix) {
732 synchronize_irq(bp->msix_table[0].vector);
733 offset = 1;
734 for_each_queue(bp, i)
735 synchronize_irq(bp->msix_table[i + offset].vector);
736 } else
737 synchronize_irq(bp->pdev->irq);
738
739 /* make sure sp_task is not running */
740 cancel_delayed_work(&bp->sp_task);
741 flush_workqueue(bnx2x_wq);
742}
743
744/* fast path */
745
746/*
747 * General service functions
748 */
749
750static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
751 u8 storm, u16 index, u8 op, u8 update)
752{
753 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
754 COMMAND_REG_INT_ACK);
755 struct igu_ack_register igu_ack;
756
757 igu_ack.status_block_index = index;
758 igu_ack.sb_id_and_flags =
759 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
760 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
761 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
762 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
763
764 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
765 (*(u32 *)&igu_ack), hc_addr);
766 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
767
768 /* Make sure that ACK is written */
769 mmiowb();
770 barrier();
771}
772
773static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
774{
775 struct host_status_block *fpsb = fp->status_blk;
776 u16 rc = 0;
777
778 barrier(); /* status block is written to by the chip */
779 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
780 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
781 rc |= 1;
782 }
783 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
784 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
785 rc |= 2;
786 }
787 return rc;
788}
789
790static u16 bnx2x_ack_int(struct bnx2x *bp)
791{
792 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
793 COMMAND_REG_SIMD_MASK);
794 u32 result = REG_RD(bp, hc_addr);
795
796 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
797 result, hc_addr);
798
799 return result;
800}
801
802
803/*
804 * fast path service functions
805 */
806
807static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
808{
809 /* Tell compiler that consumer and producer can change */
810 barrier();
811 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
812}
813
814/* free skb in the packet ring at pos idx
815 * return idx of last bd freed
816 */
817static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
818 u16 idx)
819{
820 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
821 struct eth_tx_start_bd *tx_start_bd;
822 struct eth_tx_bd *tx_data_bd;
823 struct sk_buff *skb = tx_buf->skb;
824 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
825 int nbd;
826
827 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
828 idx, tx_buf, skb);
829
830 /* unmap first bd */
831 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
832 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
833 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
834 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
835
836 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
837#ifdef BNX2X_STOP_ON_ERROR
838 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
839 BNX2X_ERR("BAD nbd!\n");
840 bnx2x_panic();
841 }
842#endif
843 new_cons = nbd + tx_buf->first_bd;
844
845 /* Get the next bd */
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847
848 /* Skip a parse bd... */
849 --nbd;
850 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
851
852 /* ...and the TSO split header bd since they have no mapping */
853 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
854 --nbd;
855 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
856 }
857
858 /* now free frags */
859 while (nbd > 0) {
860
861 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
862 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
863 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
864 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
865 if (--nbd)
866 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
867 }
868
869 /* release skb */
870 WARN_ON(!skb);
871 dev_kfree_skb_any(skb);
872 tx_buf->first_bd = 0;
873 tx_buf->skb = NULL;
874
875 return new_cons;
876}
877
878static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
879{
880 s16 used;
881 u16 prod;
882 u16 cons;
883
884 barrier(); /* Tell compiler that prod and cons can change */
885 prod = fp->tx_bd_prod;
886 cons = fp->tx_bd_cons;
887
888 /* NUM_TX_RINGS = number of "next-page" entries
889 It will be used as a threshold */
890 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
891
892#ifdef BNX2X_STOP_ON_ERROR
893 WARN_ON(used < 0);
894 WARN_ON(used > fp->bp->tx_ring_size);
895 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
896#endif
897
898 return (s16)(fp->bp->tx_ring_size) - used;
899}
900
901static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
902{
903 struct bnx2x *bp = fp->bp;
904 struct netdev_queue *txq;
905 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
906 int done = 0;
907
908#ifdef BNX2X_STOP_ON_ERROR
909 if (unlikely(bp->panic))
910 return;
911#endif
912
913 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
914 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
915 sw_cons = fp->tx_pkt_cons;
916
917 while (sw_cons != hw_cons) {
918 u16 pkt_cons;
919
920 pkt_cons = TX_BD(sw_cons);
921
922 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
923
924 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
925 hw_cons, sw_cons, pkt_cons);
926
927/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
928 rmb();
929 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
930 }
931*/
932 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
933 sw_cons++;
934 done++;
935 }
936
937 fp->tx_pkt_cons = sw_cons;
938 fp->tx_bd_cons = bd_cons;
939
940 /* TBD need a thresh? */
941 if (unlikely(netif_tx_queue_stopped(txq))) {
942
943 /* Need to make the tx_bd_cons update visible to start_xmit()
944 * before checking for netif_tx_queue_stopped(). Without the
945 * memory barrier, there is a small possibility that
946 * start_xmit() will miss it and cause the queue to be stopped
947 * forever.
948 */
949 smp_mb();
950
951 if ((netif_tx_queue_stopped(txq)) &&
952 (bp->state == BNX2X_STATE_OPEN) &&
953 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
954 netif_tx_wake_queue(txq);
955 }
956}
957
958
959static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
960 union eth_rx_cqe *rr_cqe)
961{
962 struct bnx2x *bp = fp->bp;
963 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
964 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
965
966 DP(BNX2X_MSG_SP,
967 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
968 fp->index, cid, command, bp->state,
969 rr_cqe->ramrod_cqe.ramrod_type);
970
971 bp->spq_left++;
972
973 if (fp->index) {
974 switch (command | fp->state) {
975 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
976 BNX2X_FP_STATE_OPENING):
977 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
978 cid);
979 fp->state = BNX2X_FP_STATE_OPEN;
980 break;
981
982 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
983 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
984 cid);
985 fp->state = BNX2X_FP_STATE_HALTED;
986 break;
987
988 default:
989 BNX2X_ERR("unexpected MC reply (%d) "
990 "fp->state is %x\n", command, fp->state);
991 break;
992 }
993 mb(); /* force bnx2x_wait_ramrod() to see the change */
994 return;
995 }
996
997 switch (command | bp->state) {
998 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
999 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1000 bp->state = BNX2X_STATE_OPEN;
1001 break;
1002
1003 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1004 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1005 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1006 fp->state = BNX2X_FP_STATE_HALTED;
1007 break;
1008
1009 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1010 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1011 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1012 break;
1013
1014
1015 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1016 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1017 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1018 bp->set_mac_pending = 0;
1019 break;
1020
1021 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1022 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1023 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1024 break;
1025
1026 default:
1027 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1028 command, bp->state);
1029 break;
1030 }
1031 mb(); /* force bnx2x_wait_ramrod() to see the change */
1032}
1033
1034static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1035 struct bnx2x_fastpath *fp, u16 index)
1036{
1037 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1038 struct page *page = sw_buf->page;
1039 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1040
1041 /* Skip "next page" elements */
1042 if (!page)
1043 return;
1044
1045 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1046 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1047 __free_pages(page, PAGES_PER_SGE_SHIFT);
1048
1049 sw_buf->page = NULL;
1050 sge->addr_hi = 0;
1051 sge->addr_lo = 0;
1052}
1053
1054static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1055 struct bnx2x_fastpath *fp, int last)
1056{
1057 int i;
1058
1059 for (i = 0; i < last; i++)
1060 bnx2x_free_rx_sge(bp, fp, i);
1061}
1062
1063static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1064 struct bnx2x_fastpath *fp, u16 index)
1065{
1066 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1067 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1068 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1069 dma_addr_t mapping;
1070
1071 if (unlikely(page == NULL))
1072 return -ENOMEM;
1073
1074 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1075 PCI_DMA_FROMDEVICE);
1076 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1077 __free_pages(page, PAGES_PER_SGE_SHIFT);
1078 return -ENOMEM;
1079 }
1080
1081 sw_buf->page = page;
1082 pci_unmap_addr_set(sw_buf, mapping, mapping);
1083
1084 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1085 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1086
1087 return 0;
1088}
1089
1090static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1091 struct bnx2x_fastpath *fp, u16 index)
1092{
1093 struct sk_buff *skb;
1094 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1095 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1096 dma_addr_t mapping;
1097
1098 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1099 if (unlikely(skb == NULL))
1100 return -ENOMEM;
1101
1102 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1103 PCI_DMA_FROMDEVICE);
1104 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1105 dev_kfree_skb(skb);
1106 return -ENOMEM;
1107 }
1108
1109 rx_buf->skb = skb;
1110 pci_unmap_addr_set(rx_buf, mapping, mapping);
1111
1112 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1113 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1114
1115 return 0;
1116}
1117
1118/* note that we are not allocating a new skb,
1119 * we are just moving one from cons to prod
1120 * we are not creating a new mapping,
1121 * so there is no need to check for dma_mapping_error().
1122 */
1123static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1124 struct sk_buff *skb, u16 cons, u16 prod)
1125{
1126 struct bnx2x *bp = fp->bp;
1127 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1128 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1129 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1130 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1131
1132 pci_dma_sync_single_for_device(bp->pdev,
1133 pci_unmap_addr(cons_rx_buf, mapping),
1134 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1135
1136 prod_rx_buf->skb = cons_rx_buf->skb;
1137 pci_unmap_addr_set(prod_rx_buf, mapping,
1138 pci_unmap_addr(cons_rx_buf, mapping));
1139 *prod_bd = *cons_bd;
1140}
1141
1142static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1143 u16 idx)
1144{
1145 u16 last_max = fp->last_max_sge;
1146
1147 if (SUB_S16(idx, last_max) > 0)
1148 fp->last_max_sge = idx;
1149}
1150
1151static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1152{
1153 int i, j;
1154
1155 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1156 int idx = RX_SGE_CNT * i - 1;
1157
1158 for (j = 0; j < 2; j++) {
1159 SGE_MASK_CLEAR_BIT(fp, idx);
1160 idx--;
1161 }
1162 }
1163}
1164
1165static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1166 struct eth_fast_path_rx_cqe *fp_cqe)
1167{
1168 struct bnx2x *bp = fp->bp;
1169 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1170 le16_to_cpu(fp_cqe->len_on_bd)) >>
1171 SGE_PAGE_SHIFT;
1172 u16 last_max, last_elem, first_elem;
1173 u16 delta = 0;
1174 u16 i;
1175
1176 if (!sge_len)
1177 return;
1178
1179 /* First mark all used pages */
1180 for (i = 0; i < sge_len; i++)
1181 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1182
1183 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1184 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186 /* Here we assume that the last SGE index is the biggest */
1187 prefetch((void *)(fp->sge_mask));
1188 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1189
1190 last_max = RX_SGE(fp->last_max_sge);
1191 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1192 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1193
1194 /* If ring is not full */
1195 if (last_elem + 1 != first_elem)
1196 last_elem++;
1197
1198 /* Now update the prod */
1199 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1200 if (likely(fp->sge_mask[i]))
1201 break;
1202
1203 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1204 delta += RX_SGE_MASK_ELEM_SZ;
1205 }
1206
1207 if (delta > 0) {
1208 fp->rx_sge_prod += delta;
1209 /* clear page-end entries */
1210 bnx2x_clear_sge_mask_next_elems(fp);
1211 }
1212
1213 DP(NETIF_MSG_RX_STATUS,
1214 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1215 fp->last_max_sge, fp->rx_sge_prod);
1216}
1217
1218static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1219{
1220 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1221 memset(fp->sge_mask, 0xff,
1222 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1223
1224 /* Clear the two last indices in the page to 1:
1225 these are the indices that correspond to the "next" element,
1226 hence will never be indicated and should be removed from
1227 the calculations. */
1228 bnx2x_clear_sge_mask_next_elems(fp);
1229}
1230
1231static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1232 struct sk_buff *skb, u16 cons, u16 prod)
1233{
1234 struct bnx2x *bp = fp->bp;
1235 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1236 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1237 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1238 dma_addr_t mapping;
1239
1240 /* move empty skb from pool to prod and map it */
1241 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1242 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1243 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1244 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1245
1246 /* move partial skb from cons to pool (don't unmap yet) */
1247 fp->tpa_pool[queue] = *cons_rx_buf;
1248
1249 /* mark bin state as start - print error if current state != stop */
1250 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1251 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1252
1253 fp->tpa_state[queue] = BNX2X_TPA_START;
1254
1255 /* point prod_bd to new skb */
1256 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1257 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1258
1259#ifdef BNX2X_STOP_ON_ERROR
1260 fp->tpa_queue_used |= (1 << queue);
1261#ifdef __powerpc64__
1262 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1263#else
1264 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1265#endif
1266 fp->tpa_queue_used);
1267#endif
1268}
1269
1270static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1271 struct sk_buff *skb,
1272 struct eth_fast_path_rx_cqe *fp_cqe,
1273 u16 cqe_idx)
1274{
1275 struct sw_rx_page *rx_pg, old_rx_pg;
1276 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1277 u32 i, frag_len, frag_size, pages;
1278 int err;
1279 int j;
1280
1281 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1282 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1283
1284 /* This is needed in order to enable forwarding support */
1285 if (frag_size)
1286 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1287 max(frag_size, (u32)len_on_bd));
1288
1289#ifdef BNX2X_STOP_ON_ERROR
1290 if (pages >
1291 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1292 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1293 pages, cqe_idx);
1294 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1295 fp_cqe->pkt_len, len_on_bd);
1296 bnx2x_panic();
1297 return -EINVAL;
1298 }
1299#endif
1300
1301 /* Run through the SGL and compose the fragmented skb */
1302 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1303 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1304
1305 /* FW gives the indices of the SGE as if the ring is an array
1306 (meaning that "next" element will consume 2 indices) */
1307 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1308 rx_pg = &fp->rx_page_ring[sge_idx];
1309 old_rx_pg = *rx_pg;
1310
1311 /* If we fail to allocate a substitute page, we simply stop
1312 where we are and drop the whole packet */
1313 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1314 if (unlikely(err)) {
1315 fp->eth_q_stats.rx_skb_alloc_failed++;
1316 return err;
1317 }
1318
1319 /* Unmap the page as we r going to pass it to the stack */
1320 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1321 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1322
1323 /* Add one frag and update the appropriate fields in the skb */
1324 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1325
1326 skb->data_len += frag_len;
1327 skb->truesize += frag_len;
1328 skb->len += frag_len;
1329
1330 frag_size -= frag_len;
1331 }
1332
1333 return 0;
1334}
1335
1336static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1337 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1338 u16 cqe_idx)
1339{
1340 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1341 struct sk_buff *skb = rx_buf->skb;
1342 /* alloc new skb */
1343 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1344
1345 /* Unmap skb in the pool anyway, as we are going to change
1346 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1347 fails. */
1348 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1349 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1350
1351 if (likely(new_skb)) {
1352 /* fix ip xsum and give it to the stack */
1353 /* (no need to map the new skb) */
1354#ifdef BCM_VLAN
1355 int is_vlan_cqe =
1356 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1357 PARSING_FLAGS_VLAN);
1358 int is_not_hwaccel_vlan_cqe =
1359 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1360#endif
1361
1362 prefetch(skb);
1363 prefetch(((char *)(skb)) + 128);
1364
1365#ifdef BNX2X_STOP_ON_ERROR
1366 if (pad + len > bp->rx_buf_size) {
1367 BNX2X_ERR("skb_put is about to fail... "
1368 "pad %d len %d rx_buf_size %d\n",
1369 pad, len, bp->rx_buf_size);
1370 bnx2x_panic();
1371 return;
1372 }
1373#endif
1374
1375 skb_reserve(skb, pad);
1376 skb_put(skb, len);
1377
1378 skb->protocol = eth_type_trans(skb, bp->dev);
1379 skb->ip_summed = CHECKSUM_UNNECESSARY;
1380
1381 {
1382 struct iphdr *iph;
1383
1384 iph = (struct iphdr *)skb->data;
1385#ifdef BCM_VLAN
1386 /* If there is no Rx VLAN offloading -
1387 take VLAN tag into an account */
1388 if (unlikely(is_not_hwaccel_vlan_cqe))
1389 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1390#endif
1391 iph->check = 0;
1392 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1393 }
1394
1395 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1396 &cqe->fast_path_cqe, cqe_idx)) {
1397#ifdef BCM_VLAN
1398 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1399 (!is_not_hwaccel_vlan_cqe))
1400 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1401 le16_to_cpu(cqe->fast_path_cqe.
1402 vlan_tag));
1403 else
1404#endif
1405 netif_receive_skb(skb);
1406 } else {
1407 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1408 " - dropping packet!\n");
1409 dev_kfree_skb(skb);
1410 }
1411
1412
1413 /* put new skb in bin */
1414 fp->tpa_pool[queue].skb = new_skb;
1415
1416 } else {
1417 /* else drop the packet and keep the buffer in the bin */
1418 DP(NETIF_MSG_RX_STATUS,
1419 "Failed to allocate new skb - dropping packet!\n");
1420 fp->eth_q_stats.rx_skb_alloc_failed++;
1421 }
1422
1423 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1424}
1425
1426static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1427 struct bnx2x_fastpath *fp,
1428 u16 bd_prod, u16 rx_comp_prod,
1429 u16 rx_sge_prod)
1430{
1431 struct ustorm_eth_rx_producers rx_prods = {0};
1432 int i;
1433
1434 /* Update producers */
1435 rx_prods.bd_prod = bd_prod;
1436 rx_prods.cqe_prod = rx_comp_prod;
1437 rx_prods.sge_prod = rx_sge_prod;
1438
1439 /*
1440 * Make sure that the BD and SGE data is updated before updating the
1441 * producers since FW might read the BD/SGE right after the producer
1442 * is updated.
1443 * This is only applicable for weak-ordered memory model archs such
1444 * as IA-64. The following barrier is also mandatory since FW will
1445 * assumes BDs must have buffers.
1446 */
1447 wmb();
1448
1449 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1450 REG_WR(bp, BAR_USTRORM_INTMEM +
1451 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1452 ((u32 *)&rx_prods)[i]);
1453
1454 mmiowb(); /* keep prod updates ordered */
1455
1456 DP(NETIF_MSG_RX_STATUS,
1457 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1458 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1459}
1460
1461static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1462{
1463 struct bnx2x *bp = fp->bp;
1464 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1465 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1466 int rx_pkt = 0;
1467
1468#ifdef BNX2X_STOP_ON_ERROR
1469 if (unlikely(bp->panic))
1470 return 0;
1471#endif
1472
1473 /* CQ "next element" is of the size of the regular element,
1474 that's why it's ok here */
1475 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1476 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1477 hw_comp_cons++;
1478
1479 bd_cons = fp->rx_bd_cons;
1480 bd_prod = fp->rx_bd_prod;
1481 bd_prod_fw = bd_prod;
1482 sw_comp_cons = fp->rx_comp_cons;
1483 sw_comp_prod = fp->rx_comp_prod;
1484
1485 /* Memory barrier necessary as speculative reads of the rx
1486 * buffer can be ahead of the index in the status block
1487 */
1488 rmb();
1489
1490 DP(NETIF_MSG_RX_STATUS,
1491 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1492 fp->index, hw_comp_cons, sw_comp_cons);
1493
1494 while (sw_comp_cons != hw_comp_cons) {
1495 struct sw_rx_bd *rx_buf = NULL;
1496 struct sk_buff *skb;
1497 union eth_rx_cqe *cqe;
1498 u8 cqe_fp_flags;
1499 u16 len, pad;
1500
1501 comp_ring_cons = RCQ_BD(sw_comp_cons);
1502 bd_prod = RX_BD(bd_prod);
1503 bd_cons = RX_BD(bd_cons);
1504
1505 /* Prefetch the page containing the BD descriptor
1506 at producer's index. It will be needed when new skb is
1507 allocated */
1508 prefetch((void *)(PAGE_ALIGN((unsigned long)
1509 (&fp->rx_desc_ring[bd_prod])) -
1510 PAGE_SIZE + 1));
1511
1512 cqe = &fp->rx_comp_ring[comp_ring_cons];
1513 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1514
1515 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1516 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1517 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1518 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1519 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1520 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1521
1522 /* is this a slowpath msg? */
1523 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1524 bnx2x_sp_event(fp, cqe);
1525 goto next_cqe;
1526
1527 /* this is an rx packet */
1528 } else {
1529 rx_buf = &fp->rx_buf_ring[bd_cons];
1530 skb = rx_buf->skb;
1531 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1532 pad = cqe->fast_path_cqe.placement_offset;
1533
1534 /* If CQE is marked both TPA_START and TPA_END
1535 it is a non-TPA CQE */
1536 if ((!fp->disable_tpa) &&
1537 (TPA_TYPE(cqe_fp_flags) !=
1538 (TPA_TYPE_START | TPA_TYPE_END))) {
1539 u16 queue = cqe->fast_path_cqe.queue_index;
1540
1541 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1542 DP(NETIF_MSG_RX_STATUS,
1543 "calling tpa_start on queue %d\n",
1544 queue);
1545
1546 bnx2x_tpa_start(fp, queue, skb,
1547 bd_cons, bd_prod);
1548 goto next_rx;
1549 }
1550
1551 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1552 DP(NETIF_MSG_RX_STATUS,
1553 "calling tpa_stop on queue %d\n",
1554 queue);
1555
1556 if (!BNX2X_RX_SUM_FIX(cqe))
1557 BNX2X_ERR("STOP on none TCP "
1558 "data\n");
1559
1560 /* This is a size of the linear data
1561 on this skb */
1562 len = le16_to_cpu(cqe->fast_path_cqe.
1563 len_on_bd);
1564 bnx2x_tpa_stop(bp, fp, queue, pad,
1565 len, cqe, comp_ring_cons);
1566#ifdef BNX2X_STOP_ON_ERROR
1567 if (bp->panic)
1568 return 0;
1569#endif
1570
1571 bnx2x_update_sge_prod(fp,
1572 &cqe->fast_path_cqe);
1573 goto next_cqe;
1574 }
1575 }
1576
1577 pci_dma_sync_single_for_device(bp->pdev,
1578 pci_unmap_addr(rx_buf, mapping),
1579 pad + RX_COPY_THRESH,
1580 PCI_DMA_FROMDEVICE);
1581 prefetch(skb);
1582 prefetch(((char *)(skb)) + 128);
1583
1584 /* is this an error packet? */
1585 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1586 DP(NETIF_MSG_RX_ERR,
1587 "ERROR flags %x rx packet %u\n",
1588 cqe_fp_flags, sw_comp_cons);
1589 fp->eth_q_stats.rx_err_discard_pkt++;
1590 goto reuse_rx;
1591 }
1592
1593 /* Since we don't have a jumbo ring
1594 * copy small packets if mtu > 1500
1595 */
1596 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1597 (len <= RX_COPY_THRESH)) {
1598 struct sk_buff *new_skb;
1599
1600 new_skb = netdev_alloc_skb(bp->dev,
1601 len + pad);
1602 if (new_skb == NULL) {
1603 DP(NETIF_MSG_RX_ERR,
1604 "ERROR packet dropped "
1605 "because of alloc failure\n");
1606 fp->eth_q_stats.rx_skb_alloc_failed++;
1607 goto reuse_rx;
1608 }
1609
1610 /* aligned copy */
1611 skb_copy_from_linear_data_offset(skb, pad,
1612 new_skb->data + pad, len);
1613 skb_reserve(new_skb, pad);
1614 skb_put(new_skb, len);
1615
1616 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1617
1618 skb = new_skb;
1619
1620 } else
1621 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1622 pci_unmap_single(bp->pdev,
1623 pci_unmap_addr(rx_buf, mapping),
1624 bp->rx_buf_size,
1625 PCI_DMA_FROMDEVICE);
1626 skb_reserve(skb, pad);
1627 skb_put(skb, len);
1628
1629 } else {
1630 DP(NETIF_MSG_RX_ERR,
1631 "ERROR packet dropped because "
1632 "of alloc failure\n");
1633 fp->eth_q_stats.rx_skb_alloc_failed++;
1634reuse_rx:
1635 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1636 goto next_rx;
1637 }
1638
1639 skb->protocol = eth_type_trans(skb, bp->dev);
1640
1641 skb->ip_summed = CHECKSUM_NONE;
1642 if (bp->rx_csum) {
1643 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1644 skb->ip_summed = CHECKSUM_UNNECESSARY;
1645 else
1646 fp->eth_q_stats.hw_csum_err++;
1647 }
1648 }
1649
1650 skb_record_rx_queue(skb, fp->index);
1651#ifdef BCM_VLAN
1652 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1653 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1654 PARSING_FLAGS_VLAN))
1655 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1656 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1657 else
1658#endif
1659 netif_receive_skb(skb);
1660
1661
1662next_rx:
1663 rx_buf->skb = NULL;
1664
1665 bd_cons = NEXT_RX_IDX(bd_cons);
1666 bd_prod = NEXT_RX_IDX(bd_prod);
1667 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1668 rx_pkt++;
1669next_cqe:
1670 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1671 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1672
1673 if (rx_pkt == budget)
1674 break;
1675 } /* while */
1676
1677 fp->rx_bd_cons = bd_cons;
1678 fp->rx_bd_prod = bd_prod_fw;
1679 fp->rx_comp_cons = sw_comp_cons;
1680 fp->rx_comp_prod = sw_comp_prod;
1681
1682 /* Update producers */
1683 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1684 fp->rx_sge_prod);
1685
1686 fp->rx_pkt += rx_pkt;
1687 fp->rx_calls++;
1688
1689 return rx_pkt;
1690}
1691
1692static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1693{
1694 struct bnx2x_fastpath *fp = fp_cookie;
1695 struct bnx2x *bp = fp->bp;
1696
1697 /* Return here if interrupt is disabled */
1698 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1699 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1700 return IRQ_HANDLED;
1701 }
1702
1703 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1704 fp->index, fp->sb_id);
1705 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1706
1707#ifdef BNX2X_STOP_ON_ERROR
1708 if (unlikely(bp->panic))
1709 return IRQ_HANDLED;
1710#endif
1711 /* Handle Rx or Tx according to MSI-X vector */
1712 if (fp->is_rx_queue) {
1713 prefetch(fp->rx_cons_sb);
1714 prefetch(&fp->status_blk->u_status_block.status_block_index);
1715
1716 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1717
1718 } else {
1719 prefetch(fp->tx_cons_sb);
1720 prefetch(&fp->status_blk->c_status_block.status_block_index);
1721
1722 bnx2x_update_fpsb_idx(fp);
1723 rmb();
1724 bnx2x_tx_int(fp);
1725
1726 /* Re-enable interrupts */
1727 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1728 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1729 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1730 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1731 }
1732
1733 return IRQ_HANDLED;
1734}
1735
1736static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1737{
1738 struct bnx2x *bp = netdev_priv(dev_instance);
1739 u16 status = bnx2x_ack_int(bp);
1740 u16 mask;
1741 int i;
1742
1743 /* Return here if interrupt is shared and it's not for us */
1744 if (unlikely(status == 0)) {
1745 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1746 return IRQ_NONE;
1747 }
1748 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1749
1750 /* Return here if interrupt is disabled */
1751 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1752 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1753 return IRQ_HANDLED;
1754 }
1755
1756#ifdef BNX2X_STOP_ON_ERROR
1757 if (unlikely(bp->panic))
1758 return IRQ_HANDLED;
1759#endif
1760
1761 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1762 struct bnx2x_fastpath *fp = &bp->fp[i];
1763
1764 mask = 0x2 << fp->sb_id;
1765 if (status & mask) {
1766 /* Handle Rx or Tx according to SB id */
1767 if (fp->is_rx_queue) {
1768 prefetch(fp->rx_cons_sb);
1769 prefetch(&fp->status_blk->u_status_block.
1770 status_block_index);
1771
1772 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1773
1774 } else {
1775 prefetch(fp->tx_cons_sb);
1776 prefetch(&fp->status_blk->c_status_block.
1777 status_block_index);
1778
1779 bnx2x_update_fpsb_idx(fp);
1780 rmb();
1781 bnx2x_tx_int(fp);
1782
1783 /* Re-enable interrupts */
1784 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1785 le16_to_cpu(fp->fp_u_idx),
1786 IGU_INT_NOP, 1);
1787 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1788 le16_to_cpu(fp->fp_c_idx),
1789 IGU_INT_ENABLE, 1);
1790 }
1791 status &= ~mask;
1792 }
1793 }
1794
1795
1796 if (unlikely(status & 0x1)) {
1797 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1798
1799 status &= ~0x1;
1800 if (!status)
1801 return IRQ_HANDLED;
1802 }
1803
1804 if (status)
1805 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1806 status);
1807
1808 return IRQ_HANDLED;
1809}
1810
1811/* end of fast path */
1812
1813static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1814
1815/* Link */
1816
1817/*
1818 * General service functions
1819 */
1820
1821static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1822{
1823 u32 lock_status;
1824 u32 resource_bit = (1 << resource);
1825 int func = BP_FUNC(bp);
1826 u32 hw_lock_control_reg;
1827 int cnt;
1828
1829 /* Validating that the resource is within range */
1830 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1831 DP(NETIF_MSG_HW,
1832 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1833 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1834 return -EINVAL;
1835 }
1836
1837 if (func <= 5) {
1838 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1839 } else {
1840 hw_lock_control_reg =
1841 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1842 }
1843
1844 /* Validating that the resource is not already taken */
1845 lock_status = REG_RD(bp, hw_lock_control_reg);
1846 if (lock_status & resource_bit) {
1847 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1848 lock_status, resource_bit);
1849 return -EEXIST;
1850 }
1851
1852 /* Try for 5 second every 5ms */
1853 for (cnt = 0; cnt < 1000; cnt++) {
1854 /* Try to acquire the lock */
1855 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1856 lock_status = REG_RD(bp, hw_lock_control_reg);
1857 if (lock_status & resource_bit)
1858 return 0;
1859
1860 msleep(5);
1861 }
1862 DP(NETIF_MSG_HW, "Timeout\n");
1863 return -EAGAIN;
1864}
1865
1866static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1867{
1868 u32 lock_status;
1869 u32 resource_bit = (1 << resource);
1870 int func = BP_FUNC(bp);
1871 u32 hw_lock_control_reg;
1872
1873 /* Validating that the resource is within range */
1874 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1875 DP(NETIF_MSG_HW,
1876 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1877 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1878 return -EINVAL;
1879 }
1880
1881 if (func <= 5) {
1882 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1883 } else {
1884 hw_lock_control_reg =
1885 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1886 }
1887
1888 /* Validating that the resource is currently taken */
1889 lock_status = REG_RD(bp, hw_lock_control_reg);
1890 if (!(lock_status & resource_bit)) {
1891 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1892 lock_status, resource_bit);
1893 return -EFAULT;
1894 }
1895
1896 REG_WR(bp, hw_lock_control_reg, resource_bit);
1897 return 0;
1898}
1899
1900/* HW Lock for shared dual port PHYs */
1901static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1902{
1903 mutex_lock(&bp->port.phy_mutex);
1904
1905 if (bp->port.need_hw_lock)
1906 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1907}
1908
1909static void bnx2x_release_phy_lock(struct bnx2x *bp)
1910{
1911 if (bp->port.need_hw_lock)
1912 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1913
1914 mutex_unlock(&bp->port.phy_mutex);
1915}
1916
1917int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1918{
1919 /* The GPIO should be swapped if swap register is set and active */
1920 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1921 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1922 int gpio_shift = gpio_num +
1923 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1924 u32 gpio_mask = (1 << gpio_shift);
1925 u32 gpio_reg;
1926 int value;
1927
1928 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1929 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1930 return -EINVAL;
1931 }
1932
1933 /* read GPIO value */
1934 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1935
1936 /* get the requested pin value */
1937 if ((gpio_reg & gpio_mask) == gpio_mask)
1938 value = 1;
1939 else
1940 value = 0;
1941
1942 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1943
1944 return value;
1945}
1946
1947int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1948{
1949 /* The GPIO should be swapped if swap register is set and active */
1950 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1951 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1952 int gpio_shift = gpio_num +
1953 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1954 u32 gpio_mask = (1 << gpio_shift);
1955 u32 gpio_reg;
1956
1957 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1958 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1959 return -EINVAL;
1960 }
1961
1962 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1963 /* read GPIO and mask except the float bits */
1964 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1965
1966 switch (mode) {
1967 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1968 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1969 gpio_num, gpio_shift);
1970 /* clear FLOAT and set CLR */
1971 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1972 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1973 break;
1974
1975 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1976 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1977 gpio_num, gpio_shift);
1978 /* clear FLOAT and set SET */
1979 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1980 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1981 break;
1982
1983 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1984 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1985 gpio_num, gpio_shift);
1986 /* set FLOAT */
1987 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1988 break;
1989
1990 default:
1991 break;
1992 }
1993
1994 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1995 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1996
1997 return 0;
1998}
1999
2000int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2001{
2002 /* The GPIO should be swapped if swap register is set and active */
2003 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2004 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2005 int gpio_shift = gpio_num +
2006 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2007 u32 gpio_mask = (1 << gpio_shift);
2008 u32 gpio_reg;
2009
2010 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2011 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2012 return -EINVAL;
2013 }
2014
2015 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2016 /* read GPIO int */
2017 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2018
2019 switch (mode) {
2020 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2021 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2022 "output low\n", gpio_num, gpio_shift);
2023 /* clear SET and set CLR */
2024 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2025 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2026 break;
2027
2028 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2029 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2030 "output high\n", gpio_num, gpio_shift);
2031 /* clear CLR and set SET */
2032 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2033 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2034 break;
2035
2036 default:
2037 break;
2038 }
2039
2040 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2041 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2042
2043 return 0;
2044}
2045
2046static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2047{
2048 u32 spio_mask = (1 << spio_num);
2049 u32 spio_reg;
2050
2051 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2052 (spio_num > MISC_REGISTERS_SPIO_7)) {
2053 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2054 return -EINVAL;
2055 }
2056
2057 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2058 /* read SPIO and mask except the float bits */
2059 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2060
2061 switch (mode) {
2062 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2063 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2064 /* clear FLOAT and set CLR */
2065 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2066 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2067 break;
2068
2069 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2070 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2071 /* clear FLOAT and set SET */
2072 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2073 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2074 break;
2075
2076 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2077 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2078 /* set FLOAT */
2079 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2080 break;
2081
2082 default:
2083 break;
2084 }
2085
2086 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2087 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2088
2089 return 0;
2090}
2091
2092static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2093{
2094 switch (bp->link_vars.ieee_fc &
2095 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2096 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2097 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2098 ADVERTISED_Pause);
2099 break;
2100
2101 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2102 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2103 ADVERTISED_Pause);
2104 break;
2105
2106 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2107 bp->port.advertising |= ADVERTISED_Asym_Pause;
2108 break;
2109
2110 default:
2111 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2112 ADVERTISED_Pause);
2113 break;
2114 }
2115}
2116
2117static void bnx2x_link_report(struct bnx2x *bp)
2118{
2119 if (bp->state == BNX2X_STATE_DISABLED) {
2120 netif_carrier_off(bp->dev);
2121 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2122 return;
2123 }
2124
2125 if (bp->link_vars.link_up) {
2126 if (bp->state == BNX2X_STATE_OPEN)
2127 netif_carrier_on(bp->dev);
2128 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2129
2130 printk("%d Mbps ", bp->link_vars.line_speed);
2131
2132 if (bp->link_vars.duplex == DUPLEX_FULL)
2133 printk("full duplex");
2134 else
2135 printk("half duplex");
2136
2137 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2138 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2139 printk(", receive ");
2140 if (bp->link_vars.flow_ctrl &
2141 BNX2X_FLOW_CTRL_TX)
2142 printk("& transmit ");
2143 } else {
2144 printk(", transmit ");
2145 }
2146 printk("flow control ON");
2147 }
2148 printk("\n");
2149
2150 } else { /* link_down */
2151 netif_carrier_off(bp->dev);
2152 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2153 }
2154}
2155
2156static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2157{
2158 if (!BP_NOMCP(bp)) {
2159 u8 rc;
2160
2161 /* Initialize link parameters structure variables */
2162 /* It is recommended to turn off RX FC for jumbo frames
2163 for better performance */
2164 if (bp->dev->mtu > 5000)
2165 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2166 else
2167 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2168
2169 bnx2x_acquire_phy_lock(bp);
2170
2171 if (load_mode == LOAD_DIAG)
2172 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2173
2174 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2175
2176 bnx2x_release_phy_lock(bp);
2177
2178 bnx2x_calc_fc_adv(bp);
2179
2180 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2181 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2182 bnx2x_link_report(bp);
2183 }
2184
2185 return rc;
2186 }
2187 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2188 return -EINVAL;
2189}
2190
2191static void bnx2x_link_set(struct bnx2x *bp)
2192{
2193 if (!BP_NOMCP(bp)) {
2194 bnx2x_acquire_phy_lock(bp);
2195 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2196 bnx2x_release_phy_lock(bp);
2197
2198 bnx2x_calc_fc_adv(bp);
2199 } else
2200 BNX2X_ERR("Bootcode is missing - can not set link\n");
2201}
2202
2203static void bnx2x__link_reset(struct bnx2x *bp)
2204{
2205 if (!BP_NOMCP(bp)) {
2206 bnx2x_acquire_phy_lock(bp);
2207 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2208 bnx2x_release_phy_lock(bp);
2209 } else
2210 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2211}
2212
2213static u8 bnx2x_link_test(struct bnx2x *bp)
2214{
2215 u8 rc;
2216
2217 bnx2x_acquire_phy_lock(bp);
2218 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2219 bnx2x_release_phy_lock(bp);
2220
2221 return rc;
2222}
2223
2224static void bnx2x_init_port_minmax(struct bnx2x *bp)
2225{
2226 u32 r_param = bp->link_vars.line_speed / 8;
2227 u32 fair_periodic_timeout_usec;
2228 u32 t_fair;
2229
2230 memset(&(bp->cmng.rs_vars), 0,
2231 sizeof(struct rate_shaping_vars_per_port));
2232 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2233
2234 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2235 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2236
2237 /* this is the threshold below which no timer arming will occur
2238 1.25 coefficient is for the threshold to be a little bigger
2239 than the real time, to compensate for timer in-accuracy */
2240 bp->cmng.rs_vars.rs_threshold =
2241 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2242
2243 /* resolution of fairness timer */
2244 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2245 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2246 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2247
2248 /* this is the threshold below which we won't arm the timer anymore */
2249 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2250
2251 /* we multiply by 1e3/8 to get bytes/msec.
2252 We don't want the credits to pass a credit
2253 of the t_fair*FAIR_MEM (algorithm resolution) */
2254 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2255 /* since each tick is 4 usec */
2256 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2257}
2258
2259/* Calculates the sum of vn_min_rates.
2260 It's needed for further normalizing of the min_rates.
2261 Returns:
2262 sum of vn_min_rates.
2263 or
2264 0 - if all the min_rates are 0.
2265 In the later case fainess algorithm should be deactivated.
2266 If not all min_rates are zero then those that are zeroes will be set to 1.
2267 */
2268static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2269{
2270 int all_zero = 1;
2271 int port = BP_PORT(bp);
2272 int vn;
2273
2274 bp->vn_weight_sum = 0;
2275 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2276 int func = 2*vn + port;
2277 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2278 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2279 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2280
2281 /* Skip hidden vns */
2282 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2283 continue;
2284
2285 /* If min rate is zero - set it to 1 */
2286 if (!vn_min_rate)
2287 vn_min_rate = DEF_MIN_RATE;
2288 else
2289 all_zero = 0;
2290
2291 bp->vn_weight_sum += vn_min_rate;
2292 }
2293
2294 /* ... only if all min rates are zeros - disable fairness */
2295 if (all_zero)
2296 bp->vn_weight_sum = 0;
2297}
2298
2299static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2300{
2301 struct rate_shaping_vars_per_vn m_rs_vn;
2302 struct fairness_vars_per_vn m_fair_vn;
2303 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2304 u16 vn_min_rate, vn_max_rate;
2305 int i;
2306
2307 /* If function is hidden - set min and max to zeroes */
2308 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2309 vn_min_rate = 0;
2310 vn_max_rate = 0;
2311
2312 } else {
2313 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2314 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2315 /* If fairness is enabled (not all min rates are zeroes) and
2316 if current min rate is zero - set it to 1.
2317 This is a requirement of the algorithm. */
2318 if (bp->vn_weight_sum && (vn_min_rate == 0))
2319 vn_min_rate = DEF_MIN_RATE;
2320 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2321 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2322 }
2323
2324 DP(NETIF_MSG_IFUP,
2325 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2326 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2327
2328 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2329 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2330
2331 /* global vn counter - maximal Mbps for this vn */
2332 m_rs_vn.vn_counter.rate = vn_max_rate;
2333
2334 /* quota - number of bytes transmitted in this period */
2335 m_rs_vn.vn_counter.quota =
2336 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2337
2338 if (bp->vn_weight_sum) {
2339 /* credit for each period of the fairness algorithm:
2340 number of bytes in T_FAIR (the vn share the port rate).
2341 vn_weight_sum should not be larger than 10000, thus
2342 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2343 than zero */
2344 m_fair_vn.vn_credit_delta =
2345 max((u32)(vn_min_rate * (T_FAIR_COEF /
2346 (8 * bp->vn_weight_sum))),
2347 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2348 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2349 m_fair_vn.vn_credit_delta);
2350 }
2351
2352 /* Store it to internal memory */
2353 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2354 REG_WR(bp, BAR_XSTRORM_INTMEM +
2355 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2356 ((u32 *)(&m_rs_vn))[i]);
2357
2358 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2359 REG_WR(bp, BAR_XSTRORM_INTMEM +
2360 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2361 ((u32 *)(&m_fair_vn))[i]);
2362}
2363
2364
2365/* This function is called upon link interrupt */
2366static void bnx2x_link_attn(struct bnx2x *bp)
2367{
2368 /* Make sure that we are synced with the current statistics */
2369 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2370
2371 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2372
2373 if (bp->link_vars.link_up) {
2374
2375 /* dropless flow control */
2376 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2377 int port = BP_PORT(bp);
2378 u32 pause_enabled = 0;
2379
2380 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2381 pause_enabled = 1;
2382
2383 REG_WR(bp, BAR_USTRORM_INTMEM +
2384 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2385 pause_enabled);
2386 }
2387
2388 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2389 struct host_port_stats *pstats;
2390
2391 pstats = bnx2x_sp(bp, port_stats);
2392 /* reset old bmac stats */
2393 memset(&(pstats->mac_stx[0]), 0,
2394 sizeof(struct mac_stx));
2395 }
2396 if ((bp->state == BNX2X_STATE_OPEN) ||
2397 (bp->state == BNX2X_STATE_DISABLED))
2398 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2399 }
2400
2401 /* indicate link status */
2402 bnx2x_link_report(bp);
2403
2404 if (IS_E1HMF(bp)) {
2405 int port = BP_PORT(bp);
2406 int func;
2407 int vn;
2408
2409 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2410 if (vn == BP_E1HVN(bp))
2411 continue;
2412
2413 func = ((vn << 1) | port);
2414
2415 /* Set the attention towards other drivers
2416 on the same port */
2417 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2418 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2419 }
2420
2421 if (bp->link_vars.link_up) {
2422 int i;
2423
2424 /* Init rate shaping and fairness contexts */
2425 bnx2x_init_port_minmax(bp);
2426
2427 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2428 bnx2x_init_vn_minmax(bp, 2*vn + port);
2429
2430 /* Store it to internal memory */
2431 for (i = 0;
2432 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2433 REG_WR(bp, BAR_XSTRORM_INTMEM +
2434 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2435 ((u32 *)(&bp->cmng))[i]);
2436 }
2437 }
2438}
2439
2440static void bnx2x__link_status_update(struct bnx2x *bp)
2441{
2442 int func = BP_FUNC(bp);
2443
2444 if (bp->state != BNX2X_STATE_OPEN)
2445 return;
2446
2447 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2448
2449 if (bp->link_vars.link_up)
2450 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2451 else
2452 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2453
2454 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2455 bnx2x_calc_vn_weight_sum(bp);
2456
2457 /* indicate link status */
2458 bnx2x_link_report(bp);
2459}
2460
2461static void bnx2x_pmf_update(struct bnx2x *bp)
2462{
2463 int port = BP_PORT(bp);
2464 u32 val;
2465
2466 bp->port.pmf = 1;
2467 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2468
2469 /* enable nig attention */
2470 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2471 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2472 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2473
2474 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2475}
2476
2477/* end of Link */
2478
2479/* slow path */
2480
2481/*
2482 * General service functions
2483 */
2484
2485/* send the MCP a request, block until there is a reply */
2486u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2487{
2488 int func = BP_FUNC(bp);
2489 u32 seq = ++bp->fw_seq;
2490 u32 rc = 0;
2491 u32 cnt = 1;
2492 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2493
2494 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2495 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2496
2497 do {
2498 /* let the FW do it's magic ... */
2499 msleep(delay);
2500
2501 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2502
2503 /* Give the FW up to 2 second (200*10ms) */
2504 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2505
2506 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2507 cnt*delay, rc, seq);
2508
2509 /* is this a reply to our command? */
2510 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2511 rc &= FW_MSG_CODE_MASK;
2512 else {
2513 /* FW BUG! */
2514 BNX2X_ERR("FW failed to respond!\n");
2515 bnx2x_fw_dump(bp);
2516 rc = 0;
2517 }
2518
2519 return rc;
2520}
2521
2522static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2523static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2524static void bnx2x_set_rx_mode(struct net_device *dev);
2525
2526static void bnx2x_e1h_disable(struct bnx2x *bp)
2527{
2528 int port = BP_PORT(bp);
2529 int i;
2530
2531 bp->rx_mode = BNX2X_RX_MODE_NONE;
2532 bnx2x_set_storm_rx_mode(bp);
2533
2534 netif_tx_disable(bp->dev);
2535 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2536
2537 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2538
2539 bnx2x_set_mac_addr_e1h(bp, 0);
2540
2541 for (i = 0; i < MC_HASH_SIZE; i++)
2542 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2543
2544 netif_carrier_off(bp->dev);
2545}
2546
2547static void bnx2x_e1h_enable(struct bnx2x *bp)
2548{
2549 int port = BP_PORT(bp);
2550
2551 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2552
2553 bnx2x_set_mac_addr_e1h(bp, 1);
2554
2555 /* Tx queue should be only reenabled */
2556 netif_tx_wake_all_queues(bp->dev);
2557
2558 /* Initialize the receive filter. */
2559 bnx2x_set_rx_mode(bp->dev);
2560}
2561
2562static void bnx2x_update_min_max(struct bnx2x *bp)
2563{
2564 int port = BP_PORT(bp);
2565 int vn, i;
2566
2567 /* Init rate shaping and fairness contexts */
2568 bnx2x_init_port_minmax(bp);
2569
2570 bnx2x_calc_vn_weight_sum(bp);
2571
2572 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2573 bnx2x_init_vn_minmax(bp, 2*vn + port);
2574
2575 if (bp->port.pmf) {
2576 int func;
2577
2578 /* Set the attention towards other drivers on the same port */
2579 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2580 if (vn == BP_E1HVN(bp))
2581 continue;
2582
2583 func = ((vn << 1) | port);
2584 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2585 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2586 }
2587
2588 /* Store it to internal memory */
2589 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2590 REG_WR(bp, BAR_XSTRORM_INTMEM +
2591 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2592 ((u32 *)(&bp->cmng))[i]);
2593 }
2594}
2595
2596static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2597{
2598 int func = BP_FUNC(bp);
2599
2600 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2601 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2602
2603 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2604
2605 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2606 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2607 bp->state = BNX2X_STATE_DISABLED;
2608
2609 bnx2x_e1h_disable(bp);
2610 } else {
2611 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2612 bp->state = BNX2X_STATE_OPEN;
2613
2614 bnx2x_e1h_enable(bp);
2615 }
2616 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2617 }
2618 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2619
2620 bnx2x_update_min_max(bp);
2621 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2622 }
2623
2624 /* Report results to MCP */
2625 if (dcc_event)
2626 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2627 else
2628 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2629}
2630
2631/* the slow path queue is odd since completions arrive on the fastpath ring */
2632static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2633 u32 data_hi, u32 data_lo, int common)
2634{
2635 int func = BP_FUNC(bp);
2636
2637 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2638 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2639 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2640 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2641 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2642
2643#ifdef BNX2X_STOP_ON_ERROR
2644 if (unlikely(bp->panic))
2645 return -EIO;
2646#endif
2647
2648 spin_lock_bh(&bp->spq_lock);
2649
2650 if (!bp->spq_left) {
2651 BNX2X_ERR("BUG! SPQ ring full!\n");
2652 spin_unlock_bh(&bp->spq_lock);
2653 bnx2x_panic();
2654 return -EBUSY;
2655 }
2656
2657 /* CID needs port number to be encoded int it */
2658 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2659 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2660 HW_CID(bp, cid)));
2661 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2662 if (common)
2663 bp->spq_prod_bd->hdr.type |=
2664 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2665
2666 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2667 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2668
2669 bp->spq_left--;
2670
2671 if (bp->spq_prod_bd == bp->spq_last_bd) {
2672 bp->spq_prod_bd = bp->spq;
2673 bp->spq_prod_idx = 0;
2674 DP(NETIF_MSG_TIMER, "end of spq\n");
2675
2676 } else {
2677 bp->spq_prod_bd++;
2678 bp->spq_prod_idx++;
2679 }
2680
2681 /* Make sure that BD data is updated before writing the producer */
2682 wmb();
2683
2684 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2685 bp->spq_prod_idx);
2686
2687 mmiowb();
2688
2689 spin_unlock_bh(&bp->spq_lock);
2690 return 0;
2691}
2692
2693/* acquire split MCP access lock register */
2694static int bnx2x_acquire_alr(struct bnx2x *bp)
2695{
2696 u32 i, j, val;
2697 int rc = 0;
2698
2699 might_sleep();
2700 i = 100;
2701 for (j = 0; j < i*10; j++) {
2702 val = (1UL << 31);
2703 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2704 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2705 if (val & (1L << 31))
2706 break;
2707
2708 msleep(5);
2709 }
2710 if (!(val & (1L << 31))) {
2711 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2712 rc = -EBUSY;
2713 }
2714
2715 return rc;
2716}
2717
2718/* release split MCP access lock register */
2719static void bnx2x_release_alr(struct bnx2x *bp)
2720{
2721 u32 val = 0;
2722
2723 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2724}
2725
2726static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2727{
2728 struct host_def_status_block *def_sb = bp->def_status_blk;
2729 u16 rc = 0;
2730
2731 barrier(); /* status block is written to by the chip */
2732 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2733 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2734 rc |= 1;
2735 }
2736 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2737 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2738 rc |= 2;
2739 }
2740 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2741 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2742 rc |= 4;
2743 }
2744 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2745 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2746 rc |= 8;
2747 }
2748 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2749 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2750 rc |= 16;
2751 }
2752 return rc;
2753}
2754
2755/*
2756 * slow path service functions
2757 */
2758
2759static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2760{
2761 int port = BP_PORT(bp);
2762 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2763 COMMAND_REG_ATTN_BITS_SET);
2764 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2765 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2766 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2767 NIG_REG_MASK_INTERRUPT_PORT0;
2768 u32 aeu_mask;
2769 u32 nig_mask = 0;
2770
2771 if (bp->attn_state & asserted)
2772 BNX2X_ERR("IGU ERROR\n");
2773
2774 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2775 aeu_mask = REG_RD(bp, aeu_addr);
2776
2777 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2778 aeu_mask, asserted);
2779 aeu_mask &= ~(asserted & 0xff);
2780 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2781
2782 REG_WR(bp, aeu_addr, aeu_mask);
2783 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2784
2785 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2786 bp->attn_state |= asserted;
2787 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2788
2789 if (asserted & ATTN_HARD_WIRED_MASK) {
2790 if (asserted & ATTN_NIG_FOR_FUNC) {
2791
2792 bnx2x_acquire_phy_lock(bp);
2793
2794 /* save nig interrupt mask */
2795 nig_mask = REG_RD(bp, nig_int_mask_addr);
2796 REG_WR(bp, nig_int_mask_addr, 0);
2797
2798 bnx2x_link_attn(bp);
2799
2800 /* handle unicore attn? */
2801 }
2802 if (asserted & ATTN_SW_TIMER_4_FUNC)
2803 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2804
2805 if (asserted & GPIO_2_FUNC)
2806 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2807
2808 if (asserted & GPIO_3_FUNC)
2809 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2810
2811 if (asserted & GPIO_4_FUNC)
2812 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2813
2814 if (port == 0) {
2815 if (asserted & ATTN_GENERAL_ATTN_1) {
2816 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2817 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2818 }
2819 if (asserted & ATTN_GENERAL_ATTN_2) {
2820 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2821 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2822 }
2823 if (asserted & ATTN_GENERAL_ATTN_3) {
2824 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2825 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2826 }
2827 } else {
2828 if (asserted & ATTN_GENERAL_ATTN_4) {
2829 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2830 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2831 }
2832 if (asserted & ATTN_GENERAL_ATTN_5) {
2833 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2834 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2835 }
2836 if (asserted & ATTN_GENERAL_ATTN_6) {
2837 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2838 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2839 }
2840 }
2841
2842 } /* if hardwired */
2843
2844 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2845 asserted, hc_addr);
2846 REG_WR(bp, hc_addr, asserted);
2847
2848 /* now set back the mask */
2849 if (asserted & ATTN_NIG_FOR_FUNC) {
2850 REG_WR(bp, nig_int_mask_addr, nig_mask);
2851 bnx2x_release_phy_lock(bp);
2852 }
2853}
2854
2855static inline void bnx2x_fan_failure(struct bnx2x *bp)
2856{
2857 int port = BP_PORT(bp);
2858
2859 /* mark the failure */
2860 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2861 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2862 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2863 bp->link_params.ext_phy_config);
2864
2865 /* log the failure */
2866 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2867 " the driver to shutdown the card to prevent permanent"
2868 " damage. Please contact Dell Support for assistance\n",
2869 bp->dev->name);
2870}
2871static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2872{
2873 int port = BP_PORT(bp);
2874 int reg_offset;
2875 u32 val, swap_val, swap_override;
2876
2877 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2878 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2879
2880 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2881
2882 val = REG_RD(bp, reg_offset);
2883 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2884 REG_WR(bp, reg_offset, val);
2885
2886 BNX2X_ERR("SPIO5 hw attention\n");
2887
2888 /* Fan failure attention */
2889 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2890 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2891 /* Low power mode is controlled by GPIO 2 */
2892 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2893 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2894 /* The PHY reset is controlled by GPIO 1 */
2895 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2896 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2897 break;
2898
2899 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2900 /* The PHY reset is controlled by GPIO 1 */
2901 /* fake the port number to cancel the swap done in
2902 set_gpio() */
2903 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2904 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2905 port = (swap_val && swap_override) ^ 1;
2906 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2907 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2908 break;
2909
2910 default:
2911 break;
2912 }
2913 bnx2x_fan_failure(bp);
2914 }
2915
2916 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2917 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2918 bnx2x_acquire_phy_lock(bp);
2919 bnx2x_handle_module_detect_int(&bp->link_params);
2920 bnx2x_release_phy_lock(bp);
2921 }
2922
2923 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2924
2925 val = REG_RD(bp, reg_offset);
2926 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2927 REG_WR(bp, reg_offset, val);
2928
2929 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2930 (attn & HW_INTERRUT_ASSERT_SET_0));
2931 bnx2x_panic();
2932 }
2933}
2934
2935static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2936{
2937 u32 val;
2938
2939 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2940
2941 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2942 BNX2X_ERR("DB hw attention 0x%x\n", val);
2943 /* DORQ discard attention */
2944 if (val & 0x2)
2945 BNX2X_ERR("FATAL error from DORQ\n");
2946 }
2947
2948 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2949
2950 int port = BP_PORT(bp);
2951 int reg_offset;
2952
2953 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2954 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2955
2956 val = REG_RD(bp, reg_offset);
2957 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2958 REG_WR(bp, reg_offset, val);
2959
2960 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2961 (attn & HW_INTERRUT_ASSERT_SET_1));
2962 bnx2x_panic();
2963 }
2964}
2965
2966static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2967{
2968 u32 val;
2969
2970 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2971
2972 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2973 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2974 /* CFC error attention */
2975 if (val & 0x2)
2976 BNX2X_ERR("FATAL error from CFC\n");
2977 }
2978
2979 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2980
2981 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2982 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2983 /* RQ_USDMDP_FIFO_OVERFLOW */
2984 if (val & 0x18000)
2985 BNX2X_ERR("FATAL error from PXP\n");
2986 }
2987
2988 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2989
2990 int port = BP_PORT(bp);
2991 int reg_offset;
2992
2993 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2994 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2995
2996 val = REG_RD(bp, reg_offset);
2997 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2998 REG_WR(bp, reg_offset, val);
2999
3000 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3001 (attn & HW_INTERRUT_ASSERT_SET_2));
3002 bnx2x_panic();
3003 }
3004}
3005
3006static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3007{
3008 u32 val;
3009
3010 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3011
3012 if (attn & BNX2X_PMF_LINK_ASSERT) {
3013 int func = BP_FUNC(bp);
3014
3015 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3016 val = SHMEM_RD(bp, func_mb[func].drv_status);
3017 if (val & DRV_STATUS_DCC_EVENT_MASK)
3018 bnx2x_dcc_event(bp,
3019 (val & DRV_STATUS_DCC_EVENT_MASK));
3020 bnx2x__link_status_update(bp);
3021 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3022 bnx2x_pmf_update(bp);
3023
3024 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3025
3026 BNX2X_ERR("MC assert!\n");
3027 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3028 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3029 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3030 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3031 bnx2x_panic();
3032
3033 } else if (attn & BNX2X_MCP_ASSERT) {
3034
3035 BNX2X_ERR("MCP assert!\n");
3036 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3037 bnx2x_fw_dump(bp);
3038
3039 } else
3040 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3041 }
3042
3043 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3044 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3045 if (attn & BNX2X_GRC_TIMEOUT) {
3046 val = CHIP_IS_E1H(bp) ?
3047 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3048 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3049 }
3050 if (attn & BNX2X_GRC_RSV) {
3051 val = CHIP_IS_E1H(bp) ?
3052 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3053 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3054 }
3055 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3056 }
3057}
3058
3059static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3060{
3061 struct attn_route attn;
3062 struct attn_route group_mask;
3063 int port = BP_PORT(bp);
3064 int index;
3065 u32 reg_addr;
3066 u32 val;
3067 u32 aeu_mask;
3068
3069 /* need to take HW lock because MCP or other port might also
3070 try to handle this event */
3071 bnx2x_acquire_alr(bp);
3072
3073 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3074 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3075 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3076 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3077 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3078 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3079
3080 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3081 if (deasserted & (1 << index)) {
3082 group_mask = bp->attn_group[index];
3083
3084 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3085 index, group_mask.sig[0], group_mask.sig[1],
3086 group_mask.sig[2], group_mask.sig[3]);
3087
3088 bnx2x_attn_int_deasserted3(bp,
3089 attn.sig[3] & group_mask.sig[3]);
3090 bnx2x_attn_int_deasserted1(bp,
3091 attn.sig[1] & group_mask.sig[1]);
3092 bnx2x_attn_int_deasserted2(bp,
3093 attn.sig[2] & group_mask.sig[2]);
3094 bnx2x_attn_int_deasserted0(bp,
3095 attn.sig[0] & group_mask.sig[0]);
3096
3097 if ((attn.sig[0] & group_mask.sig[0] &
3098 HW_PRTY_ASSERT_SET_0) ||
3099 (attn.sig[1] & group_mask.sig[1] &
3100 HW_PRTY_ASSERT_SET_1) ||
3101 (attn.sig[2] & group_mask.sig[2] &
3102 HW_PRTY_ASSERT_SET_2))
3103 BNX2X_ERR("FATAL HW block parity attention\n");
3104 }
3105 }
3106
3107 bnx2x_release_alr(bp);
3108
3109 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3110
3111 val = ~deasserted;
3112 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3113 val, reg_addr);
3114 REG_WR(bp, reg_addr, val);
3115
3116 if (~bp->attn_state & deasserted)
3117 BNX2X_ERR("IGU ERROR\n");
3118
3119 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3120 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3121
3122 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3123 aeu_mask = REG_RD(bp, reg_addr);
3124
3125 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3126 aeu_mask, deasserted);
3127 aeu_mask |= (deasserted & 0xff);
3128 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3129
3130 REG_WR(bp, reg_addr, aeu_mask);
3131 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3132
3133 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3134 bp->attn_state &= ~deasserted;
3135 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3136}
3137
3138static void bnx2x_attn_int(struct bnx2x *bp)
3139{
3140 /* read local copy of bits */
3141 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3142 attn_bits);
3143 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3144 attn_bits_ack);
3145 u32 attn_state = bp->attn_state;
3146
3147 /* look for changed bits */
3148 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3149 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3150
3151 DP(NETIF_MSG_HW,
3152 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3153 attn_bits, attn_ack, asserted, deasserted);
3154
3155 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3156 BNX2X_ERR("BAD attention state\n");
3157
3158 /* handle bits that were raised */
3159 if (asserted)
3160 bnx2x_attn_int_asserted(bp, asserted);
3161
3162 if (deasserted)
3163 bnx2x_attn_int_deasserted(bp, deasserted);
3164}
3165
3166static void bnx2x_sp_task(struct work_struct *work)
3167{
3168 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3169 u16 status;
3170
3171
3172 /* Return here if interrupt is disabled */
3173 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3174 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3175 return;
3176 }
3177
3178 status = bnx2x_update_dsb_idx(bp);
3179/* if (status == 0) */
3180/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3181
3182 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3183
3184 /* HW attentions */
3185 if (status & 0x1)
3186 bnx2x_attn_int(bp);
3187
3188 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3189 IGU_INT_NOP, 1);
3190 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3191 IGU_INT_NOP, 1);
3192 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3193 IGU_INT_NOP, 1);
3194 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3195 IGU_INT_NOP, 1);
3196 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3197 IGU_INT_ENABLE, 1);
3198
3199}
3200
3201static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3202{
3203 struct net_device *dev = dev_instance;
3204 struct bnx2x *bp = netdev_priv(dev);
3205
3206 /* Return here if interrupt is disabled */
3207 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3208 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3209 return IRQ_HANDLED;
3210 }
3211
3212 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3213
3214#ifdef BNX2X_STOP_ON_ERROR
3215 if (unlikely(bp->panic))
3216 return IRQ_HANDLED;
3217#endif
3218
3219 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3220
3221 return IRQ_HANDLED;
3222}
3223
3224/* end of slow path */
3225
3226/* Statistics */
3227
3228/****************************************************************************
3229* Macros
3230****************************************************************************/
3231
3232/* sum[hi:lo] += add[hi:lo] */
3233#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3234 do { \
3235 s_lo += a_lo; \
3236 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3237 } while (0)
3238
3239/* difference = minuend - subtrahend */
3240#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3241 do { \
3242 if (m_lo < s_lo) { \
3243 /* underflow */ \
3244 d_hi = m_hi - s_hi; \
3245 if (d_hi > 0) { \
3246 /* we can 'loan' 1 */ \
3247 d_hi--; \
3248 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3249 } else { \
3250 /* m_hi <= s_hi */ \
3251 d_hi = 0; \
3252 d_lo = 0; \
3253 } \
3254 } else { \
3255 /* m_lo >= s_lo */ \
3256 if (m_hi < s_hi) { \
3257 d_hi = 0; \
3258 d_lo = 0; \
3259 } else { \
3260 /* m_hi >= s_hi */ \
3261 d_hi = m_hi - s_hi; \
3262 d_lo = m_lo - s_lo; \
3263 } \
3264 } \
3265 } while (0)
3266
3267#define UPDATE_STAT64(s, t) \
3268 do { \
3269 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3270 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3271 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3272 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3273 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3274 pstats->mac_stx[1].t##_lo, diff.lo); \
3275 } while (0)
3276
3277#define UPDATE_STAT64_NIG(s, t) \
3278 do { \
3279 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3280 diff.lo, new->s##_lo, old->s##_lo); \
3281 ADD_64(estats->t##_hi, diff.hi, \
3282 estats->t##_lo, diff.lo); \
3283 } while (0)
3284
3285/* sum[hi:lo] += add */
3286#define ADD_EXTEND_64(s_hi, s_lo, a) \
3287 do { \
3288 s_lo += a; \
3289 s_hi += (s_lo < a) ? 1 : 0; \
3290 } while (0)
3291
3292#define UPDATE_EXTEND_STAT(s) \
3293 do { \
3294 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3295 pstats->mac_stx[1].s##_lo, \
3296 new->s); \
3297 } while (0)
3298
3299#define UPDATE_EXTEND_TSTAT(s, t) \
3300 do { \
3301 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3302 old_tclient->s = tclient->s; \
3303 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3304 } while (0)
3305
3306#define UPDATE_EXTEND_USTAT(s, t) \
3307 do { \
3308 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3309 old_uclient->s = uclient->s; \
3310 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3311 } while (0)
3312
3313#define UPDATE_EXTEND_XSTAT(s, t) \
3314 do { \
3315 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3316 old_xclient->s = xclient->s; \
3317 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3318 } while (0)
3319
3320/* minuend -= subtrahend */
3321#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3322 do { \
3323 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3324 } while (0)
3325
3326/* minuend[hi:lo] -= subtrahend */
3327#define SUB_EXTEND_64(m_hi, m_lo, s) \
3328 do { \
3329 SUB_64(m_hi, 0, m_lo, s); \
3330 } while (0)
3331
3332#define SUB_EXTEND_USTAT(s, t) \
3333 do { \
3334 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3335 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3336 } while (0)
3337
3338/*
3339 * General service functions
3340 */
3341
3342static inline long bnx2x_hilo(u32 *hiref)
3343{
3344 u32 lo = *(hiref + 1);
3345#if (BITS_PER_LONG == 64)
3346 u32 hi = *hiref;
3347
3348 return HILO_U64(hi, lo);
3349#else
3350 return lo;
3351#endif
3352}
3353
3354/*
3355 * Init service functions
3356 */
3357
3358static void bnx2x_storm_stats_post(struct bnx2x *bp)
3359{
3360 if (!bp->stats_pending) {
3361 struct eth_query_ramrod_data ramrod_data = {0};
3362 int i, rc;
3363
3364 ramrod_data.drv_counter = bp->stats_counter++;
3365 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3366 for_each_queue(bp, i)
3367 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3368
3369 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3370 ((u32 *)&ramrod_data)[1],
3371 ((u32 *)&ramrod_data)[0], 0);
3372 if (rc == 0) {
3373 /* stats ramrod has it's own slot on the spq */
3374 bp->spq_left++;
3375 bp->stats_pending = 1;
3376 }
3377 }
3378}
3379
3380static void bnx2x_hw_stats_post(struct bnx2x *bp)
3381{
3382 struct dmae_command *dmae = &bp->stats_dmae;
3383 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3384
3385 *stats_comp = DMAE_COMP_VAL;
3386 if (CHIP_REV_IS_SLOW(bp))
3387 return;
3388
3389 /* loader */
3390 if (bp->executer_idx) {
3391 int loader_idx = PMF_DMAE_C(bp);
3392
3393 memset(dmae, 0, sizeof(struct dmae_command));
3394
3395 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3396 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3397 DMAE_CMD_DST_RESET |
3398#ifdef __BIG_ENDIAN
3399 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3400#else
3401 DMAE_CMD_ENDIANITY_DW_SWAP |
3402#endif
3403 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3404 DMAE_CMD_PORT_0) |
3405 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3406 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3407 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3408 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3409 sizeof(struct dmae_command) *
3410 (loader_idx + 1)) >> 2;
3411 dmae->dst_addr_hi = 0;
3412 dmae->len = sizeof(struct dmae_command) >> 2;
3413 if (CHIP_IS_E1(bp))
3414 dmae->len--;
3415 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3416 dmae->comp_addr_hi = 0;
3417 dmae->comp_val = 1;
3418
3419 *stats_comp = 0;
3420 bnx2x_post_dmae(bp, dmae, loader_idx);
3421
3422 } else if (bp->func_stx) {
3423 *stats_comp = 0;
3424 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3425 }
3426}
3427
3428static int bnx2x_stats_comp(struct bnx2x *bp)
3429{
3430 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3431 int cnt = 10;
3432
3433 might_sleep();
3434 while (*stats_comp != DMAE_COMP_VAL) {
3435 if (!cnt) {
3436 BNX2X_ERR("timeout waiting for stats finished\n");
3437 break;
3438 }
3439 cnt--;
3440 msleep(1);
3441 }
3442 return 1;
3443}
3444
3445/*
3446 * Statistics service functions
3447 */
3448
3449static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3450{
3451 struct dmae_command *dmae;
3452 u32 opcode;
3453 int loader_idx = PMF_DMAE_C(bp);
3454 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3455
3456 /* sanity */
3457 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3458 BNX2X_ERR("BUG!\n");
3459 return;
3460 }
3461
3462 bp->executer_idx = 0;
3463
3464 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3465 DMAE_CMD_C_ENABLE |
3466 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3467#ifdef __BIG_ENDIAN
3468 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3469#else
3470 DMAE_CMD_ENDIANITY_DW_SWAP |
3471#endif
3472 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3473 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3474
3475 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3476 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3477 dmae->src_addr_lo = bp->port.port_stx >> 2;
3478 dmae->src_addr_hi = 0;
3479 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3480 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3481 dmae->len = DMAE_LEN32_RD_MAX;
3482 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3483 dmae->comp_addr_hi = 0;
3484 dmae->comp_val = 1;
3485
3486 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3487 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3488 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3489 dmae->src_addr_hi = 0;
3490 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3491 DMAE_LEN32_RD_MAX * 4);
3492 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3493 DMAE_LEN32_RD_MAX * 4);
3494 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3495 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3496 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3497 dmae->comp_val = DMAE_COMP_VAL;
3498
3499 *stats_comp = 0;
3500 bnx2x_hw_stats_post(bp);
3501 bnx2x_stats_comp(bp);
3502}
3503
3504static void bnx2x_port_stats_init(struct bnx2x *bp)
3505{
3506 struct dmae_command *dmae;
3507 int port = BP_PORT(bp);
3508 int vn = BP_E1HVN(bp);
3509 u32 opcode;
3510 int loader_idx = PMF_DMAE_C(bp);
3511 u32 mac_addr;
3512 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3513
3514 /* sanity */
3515 if (!bp->link_vars.link_up || !bp->port.pmf) {
3516 BNX2X_ERR("BUG!\n");
3517 return;
3518 }
3519
3520 bp->executer_idx = 0;
3521
3522 /* MCP */
3523 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3524 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3525 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3526#ifdef __BIG_ENDIAN
3527 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3528#else
3529 DMAE_CMD_ENDIANITY_DW_SWAP |
3530#endif
3531 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3532 (vn << DMAE_CMD_E1HVN_SHIFT));
3533
3534 if (bp->port.port_stx) {
3535
3536 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3537 dmae->opcode = opcode;
3538 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3539 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3540 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3541 dmae->dst_addr_hi = 0;
3542 dmae->len = sizeof(struct host_port_stats) >> 2;
3543 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3544 dmae->comp_addr_hi = 0;
3545 dmae->comp_val = 1;
3546 }
3547
3548 if (bp->func_stx) {
3549
3550 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3551 dmae->opcode = opcode;
3552 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3553 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3554 dmae->dst_addr_lo = bp->func_stx >> 2;
3555 dmae->dst_addr_hi = 0;
3556 dmae->len = sizeof(struct host_func_stats) >> 2;
3557 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3558 dmae->comp_addr_hi = 0;
3559 dmae->comp_val = 1;
3560 }
3561
3562 /* MAC */
3563 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3564 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3565 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3566#ifdef __BIG_ENDIAN
3567 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3568#else
3569 DMAE_CMD_ENDIANITY_DW_SWAP |
3570#endif
3571 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3572 (vn << DMAE_CMD_E1HVN_SHIFT));
3573
3574 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3575
3576 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3577 NIG_REG_INGRESS_BMAC0_MEM);
3578
3579 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3580 BIGMAC_REGISTER_TX_STAT_GTBYT */
3581 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3582 dmae->opcode = opcode;
3583 dmae->src_addr_lo = (mac_addr +
3584 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3585 dmae->src_addr_hi = 0;
3586 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3587 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3588 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3589 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3590 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3591 dmae->comp_addr_hi = 0;
3592 dmae->comp_val = 1;
3593
3594 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3595 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3596 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3597 dmae->opcode = opcode;
3598 dmae->src_addr_lo = (mac_addr +
3599 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3600 dmae->src_addr_hi = 0;
3601 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3602 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3603 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3604 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3605 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3606 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3607 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3608 dmae->comp_addr_hi = 0;
3609 dmae->comp_val = 1;
3610
3611 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3612
3613 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3614
3615 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3616 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617 dmae->opcode = opcode;
3618 dmae->src_addr_lo = (mac_addr +
3619 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3620 dmae->src_addr_hi = 0;
3621 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3622 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3623 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3624 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3625 dmae->comp_addr_hi = 0;
3626 dmae->comp_val = 1;
3627
3628 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3629 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3630 dmae->opcode = opcode;
3631 dmae->src_addr_lo = (mac_addr +
3632 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3633 dmae->src_addr_hi = 0;
3634 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3635 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3636 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3637 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3638 dmae->len = 1;
3639 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640 dmae->comp_addr_hi = 0;
3641 dmae->comp_val = 1;
3642
3643 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3644 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3645 dmae->opcode = opcode;
3646 dmae->src_addr_lo = (mac_addr +
3647 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3648 dmae->src_addr_hi = 0;
3649 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3650 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3651 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3652 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3653 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3654 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3655 dmae->comp_addr_hi = 0;
3656 dmae->comp_val = 1;
3657 }
3658
3659 /* NIG */
3660 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3661 dmae->opcode = opcode;
3662 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3663 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3664 dmae->src_addr_hi = 0;
3665 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3666 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3667 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3668 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3669 dmae->comp_addr_hi = 0;
3670 dmae->comp_val = 1;
3671
3672 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3673 dmae->opcode = opcode;
3674 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3675 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3676 dmae->src_addr_hi = 0;
3677 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3678 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3679 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3680 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3681 dmae->len = (2*sizeof(u32)) >> 2;
3682 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3683 dmae->comp_addr_hi = 0;
3684 dmae->comp_val = 1;
3685
3686 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3687 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3688 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3689 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3690#ifdef __BIG_ENDIAN
3691 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3692#else
3693 DMAE_CMD_ENDIANITY_DW_SWAP |
3694#endif
3695 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3696 (vn << DMAE_CMD_E1HVN_SHIFT));
3697 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3698 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3699 dmae->src_addr_hi = 0;
3700 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3701 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3702 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3703 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3704 dmae->len = (2*sizeof(u32)) >> 2;
3705 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3706 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3707 dmae->comp_val = DMAE_COMP_VAL;
3708
3709 *stats_comp = 0;
3710}
3711
3712static void bnx2x_func_stats_init(struct bnx2x *bp)
3713{
3714 struct dmae_command *dmae = &bp->stats_dmae;
3715 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3716
3717 /* sanity */
3718 if (!bp->func_stx) {
3719 BNX2X_ERR("BUG!\n");
3720 return;
3721 }
3722
3723 bp->executer_idx = 0;
3724 memset(dmae, 0, sizeof(struct dmae_command));
3725
3726 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3727 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3728 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3729#ifdef __BIG_ENDIAN
3730 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3731#else
3732 DMAE_CMD_ENDIANITY_DW_SWAP |
3733#endif
3734 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3735 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3736 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3737 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3738 dmae->dst_addr_lo = bp->func_stx >> 2;
3739 dmae->dst_addr_hi = 0;
3740 dmae->len = sizeof(struct host_func_stats) >> 2;
3741 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3742 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3743 dmae->comp_val = DMAE_COMP_VAL;
3744
3745 *stats_comp = 0;
3746}
3747
3748static void bnx2x_stats_start(struct bnx2x *bp)
3749{
3750 if (bp->port.pmf)
3751 bnx2x_port_stats_init(bp);
3752
3753 else if (bp->func_stx)
3754 bnx2x_func_stats_init(bp);
3755
3756 bnx2x_hw_stats_post(bp);
3757 bnx2x_storm_stats_post(bp);
3758}
3759
3760static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3761{
3762 bnx2x_stats_comp(bp);
3763 bnx2x_stats_pmf_update(bp);
3764 bnx2x_stats_start(bp);
3765}
3766
3767static void bnx2x_stats_restart(struct bnx2x *bp)
3768{
3769 bnx2x_stats_comp(bp);
3770 bnx2x_stats_start(bp);
3771}
3772
3773static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3774{
3775 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3776 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3777 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3778 struct {
3779 u32 lo;
3780 u32 hi;
3781 } diff;
3782
3783 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3784 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3785 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3786 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3787 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3788 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3789 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3790 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3791 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3792 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3793 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3794 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3795 UPDATE_STAT64(tx_stat_gt127,
3796 tx_stat_etherstatspkts65octetsto127octets);
3797 UPDATE_STAT64(tx_stat_gt255,
3798 tx_stat_etherstatspkts128octetsto255octets);
3799 UPDATE_STAT64(tx_stat_gt511,
3800 tx_stat_etherstatspkts256octetsto511octets);
3801 UPDATE_STAT64(tx_stat_gt1023,
3802 tx_stat_etherstatspkts512octetsto1023octets);
3803 UPDATE_STAT64(tx_stat_gt1518,
3804 tx_stat_etherstatspkts1024octetsto1522octets);
3805 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3806 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3807 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3808 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3809 UPDATE_STAT64(tx_stat_gterr,
3810 tx_stat_dot3statsinternalmactransmiterrors);
3811 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3812
3813 estats->pause_frames_received_hi =
3814 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3815 estats->pause_frames_received_lo =
3816 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3817
3818 estats->pause_frames_sent_hi =
3819 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3820 estats->pause_frames_sent_lo =
3821 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3822}
3823
3824static void bnx2x_emac_stats_update(struct bnx2x *bp)
3825{
3826 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3827 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3828 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3829
3830 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3831 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3832 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3833 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3834 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3835 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3836 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3837 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3838 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3839 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3840 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3841 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3842 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3843 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3844 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3845 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3846 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3847 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3848 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3849 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3850 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3851 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3852 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3853 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3854 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3855 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3856 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3857 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3858 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3859 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3860 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3861
3862 estats->pause_frames_received_hi =
3863 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3864 estats->pause_frames_received_lo =
3865 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3866 ADD_64(estats->pause_frames_received_hi,
3867 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3868 estats->pause_frames_received_lo,
3869 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3870
3871 estats->pause_frames_sent_hi =
3872 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3873 estats->pause_frames_sent_lo =
3874 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3875 ADD_64(estats->pause_frames_sent_hi,
3876 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3877 estats->pause_frames_sent_lo,
3878 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3879}
3880
3881static int bnx2x_hw_stats_update(struct bnx2x *bp)
3882{
3883 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3884 struct nig_stats *old = &(bp->port.old_nig_stats);
3885 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3886 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3887 struct {
3888 u32 lo;
3889 u32 hi;
3890 } diff;
3891 u32 nig_timer_max;
3892
3893 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3894 bnx2x_bmac_stats_update(bp);
3895
3896 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3897 bnx2x_emac_stats_update(bp);
3898
3899 else { /* unreached */
3900 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3901 return -1;
3902 }
3903
3904 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3905 new->brb_discard - old->brb_discard);
3906 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3907 new->brb_truncate - old->brb_truncate);
3908
3909 UPDATE_STAT64_NIG(egress_mac_pkt0,
3910 etherstatspkts1024octetsto1522octets);
3911 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3912
3913 memcpy(old, new, sizeof(struct nig_stats));
3914
3915 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3916 sizeof(struct mac_stx));
3917 estats->brb_drop_hi = pstats->brb_drop_hi;
3918 estats->brb_drop_lo = pstats->brb_drop_lo;
3919
3920 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3921
3922 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3923 if (nig_timer_max != estats->nig_timer_max) {
3924 estats->nig_timer_max = nig_timer_max;
3925 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3926 }
3927
3928 return 0;
3929}
3930
3931static int bnx2x_storm_stats_update(struct bnx2x *bp)
3932{
3933 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3934 struct tstorm_per_port_stats *tport =
3935 &stats->tstorm_common.port_statistics;
3936 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3937 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3938 int i;
3939
3940 memcpy(&(fstats->total_bytes_received_hi),
3941 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3942 sizeof(struct host_func_stats) - 2*sizeof(u32));
3943 estats->error_bytes_received_hi = 0;
3944 estats->error_bytes_received_lo = 0;
3945 estats->etherstatsoverrsizepkts_hi = 0;
3946 estats->etherstatsoverrsizepkts_lo = 0;
3947 estats->no_buff_discard_hi = 0;
3948 estats->no_buff_discard_lo = 0;
3949
3950 for_each_rx_queue(bp, i) {
3951 struct bnx2x_fastpath *fp = &bp->fp[i];
3952 int cl_id = fp->cl_id;
3953 struct tstorm_per_client_stats *tclient =
3954 &stats->tstorm_common.client_statistics[cl_id];
3955 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3956 struct ustorm_per_client_stats *uclient =
3957 &stats->ustorm_common.client_statistics[cl_id];
3958 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3959 struct xstorm_per_client_stats *xclient =
3960 &stats->xstorm_common.client_statistics[cl_id];
3961 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3962 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3963 u32 diff;
3964
3965 /* are storm stats valid? */
3966 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3967 bp->stats_counter) {
3968 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3969 " xstorm counter (%d) != stats_counter (%d)\n",
3970 i, xclient->stats_counter, bp->stats_counter);
3971 return -1;
3972 }
3973 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3974 bp->stats_counter) {
3975 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3976 " tstorm counter (%d) != stats_counter (%d)\n",
3977 i, tclient->stats_counter, bp->stats_counter);
3978 return -2;
3979 }
3980 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3981 bp->stats_counter) {
3982 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3983 " ustorm counter (%d) != stats_counter (%d)\n",
3984 i, uclient->stats_counter, bp->stats_counter);
3985 return -4;
3986 }
3987
3988 qstats->total_bytes_received_hi =
3989 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3990 qstats->total_bytes_received_lo =
3991 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3992
3993 ADD_64(qstats->total_bytes_received_hi,
3994 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3995 qstats->total_bytes_received_lo,
3996 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3997
3998 ADD_64(qstats->total_bytes_received_hi,
3999 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4000 qstats->total_bytes_received_lo,
4001 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4002
4003 qstats->valid_bytes_received_hi =
4004 qstats->total_bytes_received_hi;
4005 qstats->valid_bytes_received_lo =
4006 qstats->total_bytes_received_lo;
4007
4008 qstats->error_bytes_received_hi =
4009 le32_to_cpu(tclient->rcv_error_bytes.hi);
4010 qstats->error_bytes_received_lo =
4011 le32_to_cpu(tclient->rcv_error_bytes.lo);
4012
4013 ADD_64(qstats->total_bytes_received_hi,
4014 qstats->error_bytes_received_hi,
4015 qstats->total_bytes_received_lo,
4016 qstats->error_bytes_received_lo);
4017
4018 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4019 total_unicast_packets_received);
4020 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4021 total_multicast_packets_received);
4022 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4023 total_broadcast_packets_received);
4024 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4025 etherstatsoverrsizepkts);
4026 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4027
4028 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4029 total_unicast_packets_received);
4030 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4031 total_multicast_packets_received);
4032 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4033 total_broadcast_packets_received);
4034 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4035 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4036 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4037
4038 qstats->total_bytes_transmitted_hi =
4039 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4040 qstats->total_bytes_transmitted_lo =
4041 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4042
4043 ADD_64(qstats->total_bytes_transmitted_hi,
4044 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4045 qstats->total_bytes_transmitted_lo,
4046 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4047
4048 ADD_64(qstats->total_bytes_transmitted_hi,
4049 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4050 qstats->total_bytes_transmitted_lo,
4051 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4052
4053 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4054 total_unicast_packets_transmitted);
4055 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4056 total_multicast_packets_transmitted);
4057 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4058 total_broadcast_packets_transmitted);
4059
4060 old_tclient->checksum_discard = tclient->checksum_discard;
4061 old_tclient->ttl0_discard = tclient->ttl0_discard;
4062
4063 ADD_64(fstats->total_bytes_received_hi,
4064 qstats->total_bytes_received_hi,
4065 fstats->total_bytes_received_lo,
4066 qstats->total_bytes_received_lo);
4067 ADD_64(fstats->total_bytes_transmitted_hi,
4068 qstats->total_bytes_transmitted_hi,
4069 fstats->total_bytes_transmitted_lo,
4070 qstats->total_bytes_transmitted_lo);
4071 ADD_64(fstats->total_unicast_packets_received_hi,
4072 qstats->total_unicast_packets_received_hi,
4073 fstats->total_unicast_packets_received_lo,
4074 qstats->total_unicast_packets_received_lo);
4075 ADD_64(fstats->total_multicast_packets_received_hi,
4076 qstats->total_multicast_packets_received_hi,
4077 fstats->total_multicast_packets_received_lo,
4078 qstats->total_multicast_packets_received_lo);
4079 ADD_64(fstats->total_broadcast_packets_received_hi,
4080 qstats->total_broadcast_packets_received_hi,
4081 fstats->total_broadcast_packets_received_lo,
4082 qstats->total_broadcast_packets_received_lo);
4083 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4084 qstats->total_unicast_packets_transmitted_hi,
4085 fstats->total_unicast_packets_transmitted_lo,
4086 qstats->total_unicast_packets_transmitted_lo);
4087 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4088 qstats->total_multicast_packets_transmitted_hi,
4089 fstats->total_multicast_packets_transmitted_lo,
4090 qstats->total_multicast_packets_transmitted_lo);
4091 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4092 qstats->total_broadcast_packets_transmitted_hi,
4093 fstats->total_broadcast_packets_transmitted_lo,
4094 qstats->total_broadcast_packets_transmitted_lo);
4095 ADD_64(fstats->valid_bytes_received_hi,
4096 qstats->valid_bytes_received_hi,
4097 fstats->valid_bytes_received_lo,
4098 qstats->valid_bytes_received_lo);
4099
4100 ADD_64(estats->error_bytes_received_hi,
4101 qstats->error_bytes_received_hi,
4102 estats->error_bytes_received_lo,
4103 qstats->error_bytes_received_lo);
4104 ADD_64(estats->etherstatsoverrsizepkts_hi,
4105 qstats->etherstatsoverrsizepkts_hi,
4106 estats->etherstatsoverrsizepkts_lo,
4107 qstats->etherstatsoverrsizepkts_lo);
4108 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4109 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4110 }
4111
4112 ADD_64(fstats->total_bytes_received_hi,
4113 estats->rx_stat_ifhcinbadoctets_hi,
4114 fstats->total_bytes_received_lo,
4115 estats->rx_stat_ifhcinbadoctets_lo);
4116
4117 memcpy(estats, &(fstats->total_bytes_received_hi),
4118 sizeof(struct host_func_stats) - 2*sizeof(u32));
4119
4120 ADD_64(estats->etherstatsoverrsizepkts_hi,
4121 estats->rx_stat_dot3statsframestoolong_hi,
4122 estats->etherstatsoverrsizepkts_lo,
4123 estats->rx_stat_dot3statsframestoolong_lo);
4124 ADD_64(estats->error_bytes_received_hi,
4125 estats->rx_stat_ifhcinbadoctets_hi,
4126 estats->error_bytes_received_lo,
4127 estats->rx_stat_ifhcinbadoctets_lo);
4128
4129 if (bp->port.pmf) {
4130 estats->mac_filter_discard =
4131 le32_to_cpu(tport->mac_filter_discard);
4132 estats->xxoverflow_discard =
4133 le32_to_cpu(tport->xxoverflow_discard);
4134 estats->brb_truncate_discard =
4135 le32_to_cpu(tport->brb_truncate_discard);
4136 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4137 }
4138
4139 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4140
4141 bp->stats_pending = 0;
4142
4143 return 0;
4144}
4145
4146static void bnx2x_net_stats_update(struct bnx2x *bp)
4147{
4148 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4149 struct net_device_stats *nstats = &bp->dev->stats;
4150 int i;
4151
4152 nstats->rx_packets =
4153 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4154 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4155 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4156
4157 nstats->tx_packets =
4158 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4159 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4160 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4161
4162 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4163
4164 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4165
4166 nstats->rx_dropped = estats->mac_discard;
4167 for_each_rx_queue(bp, i)
4168 nstats->rx_dropped +=
4169 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4170
4171 nstats->tx_dropped = 0;
4172
4173 nstats->multicast =
4174 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4175
4176 nstats->collisions =
4177 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4178
4179 nstats->rx_length_errors =
4180 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4181 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4182 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4183 bnx2x_hilo(&estats->brb_truncate_hi);
4184 nstats->rx_crc_errors =
4185 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4186 nstats->rx_frame_errors =
4187 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4188 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4189 nstats->rx_missed_errors = estats->xxoverflow_discard;
4190
4191 nstats->rx_errors = nstats->rx_length_errors +
4192 nstats->rx_over_errors +
4193 nstats->rx_crc_errors +
4194 nstats->rx_frame_errors +
4195 nstats->rx_fifo_errors +
4196 nstats->rx_missed_errors;
4197
4198 nstats->tx_aborted_errors =
4199 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4200 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4201 nstats->tx_carrier_errors =
4202 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4203 nstats->tx_fifo_errors = 0;
4204 nstats->tx_heartbeat_errors = 0;
4205 nstats->tx_window_errors = 0;
4206
4207 nstats->tx_errors = nstats->tx_aborted_errors +
4208 nstats->tx_carrier_errors +
4209 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4210}
4211
4212static void bnx2x_drv_stats_update(struct bnx2x *bp)
4213{
4214 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4215 int i;
4216
4217 estats->driver_xoff = 0;
4218 estats->rx_err_discard_pkt = 0;
4219 estats->rx_skb_alloc_failed = 0;
4220 estats->hw_csum_err = 0;
4221 for_each_rx_queue(bp, i) {
4222 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4223
4224 estats->driver_xoff += qstats->driver_xoff;
4225 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4226 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4227 estats->hw_csum_err += qstats->hw_csum_err;
4228 }
4229}
4230
4231static void bnx2x_stats_update(struct bnx2x *bp)
4232{
4233 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4234
4235 if (*stats_comp != DMAE_COMP_VAL)
4236 return;
4237
4238 if (bp->port.pmf)
4239 bnx2x_hw_stats_update(bp);
4240
4241 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4242 BNX2X_ERR("storm stats were not updated for 3 times\n");
4243 bnx2x_panic();
4244 return;
4245 }
4246
4247 bnx2x_net_stats_update(bp);
4248 bnx2x_drv_stats_update(bp);
4249
4250 if (bp->msglevel & NETIF_MSG_TIMER) {
4251 struct bnx2x_fastpath *fp0_rx = bp->fp;
4252 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4253 struct tstorm_per_client_stats *old_tclient =
4254 &bp->fp->old_tclient;
4255 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4256 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4257 struct net_device_stats *nstats = &bp->dev->stats;
4258 int i;
4259
4260 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4261 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4262 " tx pkt (%lx)\n",
4263 bnx2x_tx_avail(fp0_tx),
4264 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4265 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4266 " rx pkt (%lx)\n",
4267 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4268 fp0_rx->rx_comp_cons),
4269 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4270 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4271 "brb truncate %u\n",
4272 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4273 qstats->driver_xoff,
4274 estats->brb_drop_lo, estats->brb_truncate_lo);
4275 printk(KERN_DEBUG "tstats: checksum_discard %u "
4276 "packets_too_big_discard %lu no_buff_discard %lu "
4277 "mac_discard %u mac_filter_discard %u "
4278 "xxovrflow_discard %u brb_truncate_discard %u "
4279 "ttl0_discard %u\n",
4280 le32_to_cpu(old_tclient->checksum_discard),
4281 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4282 bnx2x_hilo(&qstats->no_buff_discard_hi),
4283 estats->mac_discard, estats->mac_filter_discard,
4284 estats->xxoverflow_discard, estats->brb_truncate_discard,
4285 le32_to_cpu(old_tclient->ttl0_discard));
4286
4287 for_each_queue(bp, i) {
4288 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4289 bnx2x_fp(bp, i, tx_pkt),
4290 bnx2x_fp(bp, i, rx_pkt),
4291 bnx2x_fp(bp, i, rx_calls));
4292 }
4293 }
4294
4295 bnx2x_hw_stats_post(bp);
4296 bnx2x_storm_stats_post(bp);
4297}
4298
4299static void bnx2x_port_stats_stop(struct bnx2x *bp)
4300{
4301 struct dmae_command *dmae;
4302 u32 opcode;
4303 int loader_idx = PMF_DMAE_C(bp);
4304 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4305
4306 bp->executer_idx = 0;
4307
4308 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4309 DMAE_CMD_C_ENABLE |
4310 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4311#ifdef __BIG_ENDIAN
4312 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4313#else
4314 DMAE_CMD_ENDIANITY_DW_SWAP |
4315#endif
4316 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4317 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4318
4319 if (bp->port.port_stx) {
4320
4321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4322 if (bp->func_stx)
4323 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4324 else
4325 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4326 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4327 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4328 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4329 dmae->dst_addr_hi = 0;
4330 dmae->len = sizeof(struct host_port_stats) >> 2;
4331 if (bp->func_stx) {
4332 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4333 dmae->comp_addr_hi = 0;
4334 dmae->comp_val = 1;
4335 } else {
4336 dmae->comp_addr_lo =
4337 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4338 dmae->comp_addr_hi =
4339 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4340 dmae->comp_val = DMAE_COMP_VAL;
4341
4342 *stats_comp = 0;
4343 }
4344 }
4345
4346 if (bp->func_stx) {
4347
4348 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4349 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4350 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4351 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4352 dmae->dst_addr_lo = bp->func_stx >> 2;
4353 dmae->dst_addr_hi = 0;
4354 dmae->len = sizeof(struct host_func_stats) >> 2;
4355 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4356 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4357 dmae->comp_val = DMAE_COMP_VAL;
4358
4359 *stats_comp = 0;
4360 }
4361}
4362
4363static void bnx2x_stats_stop(struct bnx2x *bp)
4364{
4365 int update = 0;
4366
4367 bnx2x_stats_comp(bp);
4368
4369 if (bp->port.pmf)
4370 update = (bnx2x_hw_stats_update(bp) == 0);
4371
4372 update |= (bnx2x_storm_stats_update(bp) == 0);
4373
4374 if (update) {
4375 bnx2x_net_stats_update(bp);
4376
4377 if (bp->port.pmf)
4378 bnx2x_port_stats_stop(bp);
4379
4380 bnx2x_hw_stats_post(bp);
4381 bnx2x_stats_comp(bp);
4382 }
4383}
4384
4385static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4386{
4387}
4388
4389static const struct {
4390 void (*action)(struct bnx2x *bp);
4391 enum bnx2x_stats_state next_state;
4392} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4393/* state event */
4394{
4395/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4396/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4397/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4398/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4399},
4400{
4401/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4402/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4403/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4404/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4405}
4406};
4407
4408static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4409{
4410 enum bnx2x_stats_state state = bp->stats_state;
4411
4412 bnx2x_stats_stm[state][event].action(bp);
4413 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4414
4415 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4416 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4417 state, event, bp->stats_state);
4418}
4419
4420static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4421{
4422 struct dmae_command *dmae;
4423 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4424
4425 /* sanity */
4426 if (!bp->port.pmf || !bp->port.port_stx) {
4427 BNX2X_ERR("BUG!\n");
4428 return;
4429 }
4430
4431 bp->executer_idx = 0;
4432
4433 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4434 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4435 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4436 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4437#ifdef __BIG_ENDIAN
4438 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4439#else
4440 DMAE_CMD_ENDIANITY_DW_SWAP |
4441#endif
4442 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4443 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4444 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4445 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4446 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4447 dmae->dst_addr_hi = 0;
4448 dmae->len = sizeof(struct host_port_stats) >> 2;
4449 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4450 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4451 dmae->comp_val = DMAE_COMP_VAL;
4452
4453 *stats_comp = 0;
4454 bnx2x_hw_stats_post(bp);
4455 bnx2x_stats_comp(bp);
4456}
4457
4458static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4459{
4460 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4461 int port = BP_PORT(bp);
4462 int func;
4463 u32 func_stx;
4464
4465 /* sanity */
4466 if (!bp->port.pmf || !bp->func_stx) {
4467 BNX2X_ERR("BUG!\n");
4468 return;
4469 }
4470
4471 /* save our func_stx */
4472 func_stx = bp->func_stx;
4473
4474 for (vn = VN_0; vn < vn_max; vn++) {
4475 func = 2*vn + port;
4476
4477 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4478 bnx2x_func_stats_init(bp);
4479 bnx2x_hw_stats_post(bp);
4480 bnx2x_stats_comp(bp);
4481 }
4482
4483 /* restore our func_stx */
4484 bp->func_stx = func_stx;
4485}
4486
4487static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4488{
4489 struct dmae_command *dmae = &bp->stats_dmae;
4490 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4491
4492 /* sanity */
4493 if (!bp->func_stx) {
4494 BNX2X_ERR("BUG!\n");
4495 return;
4496 }
4497
4498 bp->executer_idx = 0;
4499 memset(dmae, 0, sizeof(struct dmae_command));
4500
4501 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4502 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4503 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4504#ifdef __BIG_ENDIAN
4505 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4506#else
4507 DMAE_CMD_ENDIANITY_DW_SWAP |
4508#endif
4509 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4510 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4511 dmae->src_addr_lo = bp->func_stx >> 2;
4512 dmae->src_addr_hi = 0;
4513 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4514 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4515 dmae->len = sizeof(struct host_func_stats) >> 2;
4516 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4517 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4518 dmae->comp_val = DMAE_COMP_VAL;
4519
4520 *stats_comp = 0;
4521 bnx2x_hw_stats_post(bp);
4522 bnx2x_stats_comp(bp);
4523}
4524
4525static void bnx2x_stats_init(struct bnx2x *bp)
4526{
4527 int port = BP_PORT(bp);
4528 int func = BP_FUNC(bp);
4529 int i;
4530
4531 bp->stats_pending = 0;
4532 bp->executer_idx = 0;
4533 bp->stats_counter = 0;
4534
4535 /* port and func stats for management */
4536 if (!BP_NOMCP(bp)) {
4537 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4538 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4539
4540 } else {
4541 bp->port.port_stx = 0;
4542 bp->func_stx = 0;
4543 }
4544 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4545 bp->port.port_stx, bp->func_stx);
4546
4547 /* port stats */
4548 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4549 bp->port.old_nig_stats.brb_discard =
4550 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4551 bp->port.old_nig_stats.brb_truncate =
4552 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4553 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4554 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4555 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4556 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4557
4558 /* function stats */
4559 for_each_queue(bp, i) {
4560 struct bnx2x_fastpath *fp = &bp->fp[i];
4561
4562 memset(&fp->old_tclient, 0,
4563 sizeof(struct tstorm_per_client_stats));
4564 memset(&fp->old_uclient, 0,
4565 sizeof(struct ustorm_per_client_stats));
4566 memset(&fp->old_xclient, 0,
4567 sizeof(struct xstorm_per_client_stats));
4568 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4569 }
4570
4571 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4572 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4573
4574 bp->stats_state = STATS_STATE_DISABLED;
4575
4576 if (bp->port.pmf) {
4577 if (bp->port.port_stx)
4578 bnx2x_port_stats_base_init(bp);
4579
4580 if (bp->func_stx)
4581 bnx2x_func_stats_base_init(bp);
4582
4583 } else if (bp->func_stx)
4584 bnx2x_func_stats_base_update(bp);
4585}
4586
4587static void bnx2x_timer(unsigned long data)
4588{
4589 struct bnx2x *bp = (struct bnx2x *) data;
4590
4591 if (!netif_running(bp->dev))
4592 return;
4593
4594 if (atomic_read(&bp->intr_sem) != 0)
4595 goto timer_restart;
4596
4597 if (poll) {
4598 struct bnx2x_fastpath *fp = &bp->fp[0];
4599 int rc;
4600
4601 bnx2x_tx_int(fp);
4602 rc = bnx2x_rx_int(fp, 1000);
4603 }
4604
4605 if (!BP_NOMCP(bp)) {
4606 int func = BP_FUNC(bp);
4607 u32 drv_pulse;
4608 u32 mcp_pulse;
4609
4610 ++bp->fw_drv_pulse_wr_seq;
4611 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4612 /* TBD - add SYSTEM_TIME */
4613 drv_pulse = bp->fw_drv_pulse_wr_seq;
4614 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4615
4616 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4617 MCP_PULSE_SEQ_MASK);
4618 /* The delta between driver pulse and mcp response
4619 * should be 1 (before mcp response) or 0 (after mcp response)
4620 */
4621 if ((drv_pulse != mcp_pulse) &&
4622 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4623 /* someone lost a heartbeat... */
4624 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4625 drv_pulse, mcp_pulse);
4626 }
4627 }
4628
4629 if ((bp->state == BNX2X_STATE_OPEN) ||
4630 (bp->state == BNX2X_STATE_DISABLED))
4631 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4632
4633timer_restart:
4634 mod_timer(&bp->timer, jiffies + bp->current_interval);
4635}
4636
4637/* end of Statistics */
4638
4639/* nic init */
4640
4641/*
4642 * nic init service functions
4643 */
4644
4645static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4646{
4647 int port = BP_PORT(bp);
4648
4649 /* "CSTORM" */
4650 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4651 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4652 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4653 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4654 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4655 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4656}
4657
4658static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4659 dma_addr_t mapping, int sb_id)
4660{
4661 int port = BP_PORT(bp);
4662 int func = BP_FUNC(bp);
4663 int index;
4664 u64 section;
4665
4666 /* USTORM */
4667 section = ((u64)mapping) + offsetof(struct host_status_block,
4668 u_status_block);
4669 sb->u_status_block.status_block_id = sb_id;
4670
4671 REG_WR(bp, BAR_CSTRORM_INTMEM +
4672 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4673 REG_WR(bp, BAR_CSTRORM_INTMEM +
4674 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4675 U64_HI(section));
4676 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4677 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4678
4679 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4680 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4681 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4682
4683 /* CSTORM */
4684 section = ((u64)mapping) + offsetof(struct host_status_block,
4685 c_status_block);
4686 sb->c_status_block.status_block_id = sb_id;
4687
4688 REG_WR(bp, BAR_CSTRORM_INTMEM +
4689 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4690 REG_WR(bp, BAR_CSTRORM_INTMEM +
4691 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4692 U64_HI(section));
4693 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4694 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4695
4696 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4697 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4698 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4699
4700 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4701}
4702
4703static void bnx2x_zero_def_sb(struct bnx2x *bp)
4704{
4705 int func = BP_FUNC(bp);
4706
4707 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4708 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4709 sizeof(struct tstorm_def_status_block)/4);
4710 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4711 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4712 sizeof(struct cstorm_def_status_block_u)/4);
4713 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4714 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4715 sizeof(struct cstorm_def_status_block_c)/4);
4716 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4717 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4718 sizeof(struct xstorm_def_status_block)/4);
4719}
4720
4721static void bnx2x_init_def_sb(struct bnx2x *bp,
4722 struct host_def_status_block *def_sb,
4723 dma_addr_t mapping, int sb_id)
4724{
4725 int port = BP_PORT(bp);
4726 int func = BP_FUNC(bp);
4727 int index, val, reg_offset;
4728 u64 section;
4729
4730 /* ATTN */
4731 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4732 atten_status_block);
4733 def_sb->atten_status_block.status_block_id = sb_id;
4734
4735 bp->attn_state = 0;
4736
4737 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4738 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4739
4740 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4741 bp->attn_group[index].sig[0] = REG_RD(bp,
4742 reg_offset + 0x10*index);
4743 bp->attn_group[index].sig[1] = REG_RD(bp,
4744 reg_offset + 0x4 + 0x10*index);
4745 bp->attn_group[index].sig[2] = REG_RD(bp,
4746 reg_offset + 0x8 + 0x10*index);
4747 bp->attn_group[index].sig[3] = REG_RD(bp,
4748 reg_offset + 0xc + 0x10*index);
4749 }
4750
4751 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4752 HC_REG_ATTN_MSG0_ADDR_L);
4753
4754 REG_WR(bp, reg_offset, U64_LO(section));
4755 REG_WR(bp, reg_offset + 4, U64_HI(section));
4756
4757 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4758
4759 val = REG_RD(bp, reg_offset);
4760 val |= sb_id;
4761 REG_WR(bp, reg_offset, val);
4762
4763 /* USTORM */
4764 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4765 u_def_status_block);
4766 def_sb->u_def_status_block.status_block_id = sb_id;
4767
4768 REG_WR(bp, BAR_CSTRORM_INTMEM +
4769 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4770 REG_WR(bp, BAR_CSTRORM_INTMEM +
4771 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4772 U64_HI(section));
4773 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4774 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4775
4776 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4777 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4778 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4779
4780 /* CSTORM */
4781 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4782 c_def_status_block);
4783 def_sb->c_def_status_block.status_block_id = sb_id;
4784
4785 REG_WR(bp, BAR_CSTRORM_INTMEM +
4786 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4787 REG_WR(bp, BAR_CSTRORM_INTMEM +
4788 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4789 U64_HI(section));
4790 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4791 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4792
4793 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4794 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4795 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4796
4797 /* TSTORM */
4798 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4799 t_def_status_block);
4800 def_sb->t_def_status_block.status_block_id = sb_id;
4801
4802 REG_WR(bp, BAR_TSTRORM_INTMEM +
4803 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4804 REG_WR(bp, BAR_TSTRORM_INTMEM +
4805 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4806 U64_HI(section));
4807 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4808 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4809
4810 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4811 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4812 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4813
4814 /* XSTORM */
4815 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816 x_def_status_block);
4817 def_sb->x_def_status_block.status_block_id = sb_id;
4818
4819 REG_WR(bp, BAR_XSTRORM_INTMEM +
4820 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4821 REG_WR(bp, BAR_XSTRORM_INTMEM +
4822 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4823 U64_HI(section));
4824 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4825 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4826
4827 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4828 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4829 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4830
4831 bp->stats_pending = 0;
4832 bp->set_mac_pending = 0;
4833
4834 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4835}
4836
4837static void bnx2x_update_coalesce(struct bnx2x *bp)
4838{
4839 int port = BP_PORT(bp);
4840 int i;
4841
4842 for_each_queue(bp, i) {
4843 int sb_id = bp->fp[i].sb_id;
4844
4845 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4846 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4847 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4848 U_SB_ETH_RX_CQ_INDEX),
4849 bp->rx_ticks/12);
4850 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4851 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4852 U_SB_ETH_RX_CQ_INDEX),
4853 (bp->rx_ticks/12) ? 0 : 1);
4854
4855 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4856 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4857 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4858 C_SB_ETH_TX_CQ_INDEX),
4859 bp->tx_ticks/12);
4860 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4861 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4862 C_SB_ETH_TX_CQ_INDEX),
4863 (bp->tx_ticks/12) ? 0 : 1);
4864 }
4865}
4866
4867static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4868 struct bnx2x_fastpath *fp, int last)
4869{
4870 int i;
4871
4872 for (i = 0; i < last; i++) {
4873 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4874 struct sk_buff *skb = rx_buf->skb;
4875
4876 if (skb == NULL) {
4877 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4878 continue;
4879 }
4880
4881 if (fp->tpa_state[i] == BNX2X_TPA_START)
4882 pci_unmap_single(bp->pdev,
4883 pci_unmap_addr(rx_buf, mapping),
4884 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4885
4886 dev_kfree_skb(skb);
4887 rx_buf->skb = NULL;
4888 }
4889}
4890
4891static void bnx2x_init_rx_rings(struct bnx2x *bp)
4892{
4893 int func = BP_FUNC(bp);
4894 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4895 ETH_MAX_AGGREGATION_QUEUES_E1H;
4896 u16 ring_prod, cqe_ring_prod;
4897 int i, j;
4898
4899 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4900 DP(NETIF_MSG_IFUP,
4901 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4902
4903 if (bp->flags & TPA_ENABLE_FLAG) {
4904
4905 for_each_rx_queue(bp, j) {
4906 struct bnx2x_fastpath *fp = &bp->fp[j];
4907
4908 for (i = 0; i < max_agg_queues; i++) {
4909 fp->tpa_pool[i].skb =
4910 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4911 if (!fp->tpa_pool[i].skb) {
4912 BNX2X_ERR("Failed to allocate TPA "
4913 "skb pool for queue[%d] - "
4914 "disabling TPA on this "
4915 "queue!\n", j);
4916 bnx2x_free_tpa_pool(bp, fp, i);
4917 fp->disable_tpa = 1;
4918 break;
4919 }
4920 pci_unmap_addr_set((struct sw_rx_bd *)
4921 &bp->fp->tpa_pool[i],
4922 mapping, 0);
4923 fp->tpa_state[i] = BNX2X_TPA_STOP;
4924 }
4925 }
4926 }
4927
4928 for_each_rx_queue(bp, j) {
4929 struct bnx2x_fastpath *fp = &bp->fp[j];
4930
4931 fp->rx_bd_cons = 0;
4932 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4933 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4934
4935 /* Mark queue as Rx */
4936 fp->is_rx_queue = 1;
4937
4938 /* "next page" elements initialization */
4939 /* SGE ring */
4940 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4941 struct eth_rx_sge *sge;
4942
4943 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4944 sge->addr_hi =
4945 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4946 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4947 sge->addr_lo =
4948 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4949 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4950 }
4951
4952 bnx2x_init_sge_ring_bit_mask(fp);
4953
4954 /* RX BD ring */
4955 for (i = 1; i <= NUM_RX_RINGS; i++) {
4956 struct eth_rx_bd *rx_bd;
4957
4958 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4959 rx_bd->addr_hi =
4960 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4961 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4962 rx_bd->addr_lo =
4963 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4964 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4965 }
4966
4967 /* CQ ring */
4968 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4969 struct eth_rx_cqe_next_page *nextpg;
4970
4971 nextpg = (struct eth_rx_cqe_next_page *)
4972 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4973 nextpg->addr_hi =
4974 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4975 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4976 nextpg->addr_lo =
4977 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4978 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4979 }
4980
4981 /* Allocate SGEs and initialize the ring elements */
4982 for (i = 0, ring_prod = 0;
4983 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4984
4985 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4986 BNX2X_ERR("was only able to allocate "
4987 "%d rx sges\n", i);
4988 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4989 /* Cleanup already allocated elements */
4990 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4991 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4992 fp->disable_tpa = 1;
4993 ring_prod = 0;
4994 break;
4995 }
4996 ring_prod = NEXT_SGE_IDX(ring_prod);
4997 }
4998 fp->rx_sge_prod = ring_prod;
4999
5000 /* Allocate BDs and initialize BD ring */
5001 fp->rx_comp_cons = 0;
5002 cqe_ring_prod = ring_prod = 0;
5003 for (i = 0; i < bp->rx_ring_size; i++) {
5004 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5005 BNX2X_ERR("was only able to allocate "
5006 "%d rx skbs on queue[%d]\n", i, j);
5007 fp->eth_q_stats.rx_skb_alloc_failed++;
5008 break;
5009 }
5010 ring_prod = NEXT_RX_IDX(ring_prod);
5011 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5012 WARN_ON(ring_prod <= i);
5013 }
5014
5015 fp->rx_bd_prod = ring_prod;
5016 /* must not have more available CQEs than BDs */
5017 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5018 cqe_ring_prod);
5019 fp->rx_pkt = fp->rx_calls = 0;
5020
5021 /* Warning!
5022 * this will generate an interrupt (to the TSTORM)
5023 * must only be done after chip is initialized
5024 */
5025 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5026 fp->rx_sge_prod);
5027 if (j != 0)
5028 continue;
5029
5030 REG_WR(bp, BAR_USTRORM_INTMEM +
5031 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5032 U64_LO(fp->rx_comp_mapping));
5033 REG_WR(bp, BAR_USTRORM_INTMEM +
5034 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5035 U64_HI(fp->rx_comp_mapping));
5036 }
5037}
5038
5039static void bnx2x_init_tx_ring(struct bnx2x *bp)
5040{
5041 int i, j;
5042
5043 for_each_tx_queue(bp, j) {
5044 struct bnx2x_fastpath *fp = &bp->fp[j];
5045
5046 for (i = 1; i <= NUM_TX_RINGS; i++) {
5047 struct eth_tx_next_bd *tx_next_bd =
5048 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5049
5050 tx_next_bd->addr_hi =
5051 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5052 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5053 tx_next_bd->addr_lo =
5054 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5055 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5056 }
5057
5058 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5059 fp->tx_db.data.zero_fill1 = 0;
5060 fp->tx_db.data.prod = 0;
5061
5062 fp->tx_pkt_prod = 0;
5063 fp->tx_pkt_cons = 0;
5064 fp->tx_bd_prod = 0;
5065 fp->tx_bd_cons = 0;
5066 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5067 fp->tx_pkt = 0;
5068 }
5069
5070 /* clean tx statistics */
5071 for_each_rx_queue(bp, i)
5072 bnx2x_fp(bp, i, tx_pkt) = 0;
5073}
5074
5075static void bnx2x_init_sp_ring(struct bnx2x *bp)
5076{
5077 int func = BP_FUNC(bp);
5078
5079 spin_lock_init(&bp->spq_lock);
5080
5081 bp->spq_left = MAX_SPQ_PENDING;
5082 bp->spq_prod_idx = 0;
5083 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5084 bp->spq_prod_bd = bp->spq;
5085 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5086
5087 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5088 U64_LO(bp->spq_mapping));
5089 REG_WR(bp,
5090 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5091 U64_HI(bp->spq_mapping));
5092
5093 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5094 bp->spq_prod_idx);
5095}
5096
5097static void bnx2x_init_context(struct bnx2x *bp)
5098{
5099 int i;
5100
5101 for_each_rx_queue(bp, i) {
5102 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5103 struct bnx2x_fastpath *fp = &bp->fp[i];
5104 u8 cl_id = fp->cl_id;
5105
5106 context->ustorm_st_context.common.sb_index_numbers =
5107 BNX2X_RX_SB_INDEX_NUM;
5108 context->ustorm_st_context.common.clientId = cl_id;
5109 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5110 context->ustorm_st_context.common.flags =
5111 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5112 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5113 context->ustorm_st_context.common.statistics_counter_id =
5114 cl_id;
5115 context->ustorm_st_context.common.mc_alignment_log_size =
5116 BNX2X_RX_ALIGN_SHIFT;
5117 context->ustorm_st_context.common.bd_buff_size =
5118 bp->rx_buf_size;
5119 context->ustorm_st_context.common.bd_page_base_hi =
5120 U64_HI(fp->rx_desc_mapping);
5121 context->ustorm_st_context.common.bd_page_base_lo =
5122 U64_LO(fp->rx_desc_mapping);
5123 if (!fp->disable_tpa) {
5124 context->ustorm_st_context.common.flags |=
5125 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5126 context->ustorm_st_context.common.sge_buff_size =
5127 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5128 (u32)0xffff);
5129 context->ustorm_st_context.common.sge_page_base_hi =
5130 U64_HI(fp->rx_sge_mapping);
5131 context->ustorm_st_context.common.sge_page_base_lo =
5132 U64_LO(fp->rx_sge_mapping);
5133
5134 context->ustorm_st_context.common.max_sges_for_packet =
5135 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5136 context->ustorm_st_context.common.max_sges_for_packet =
5137 ((context->ustorm_st_context.common.
5138 max_sges_for_packet + PAGES_PER_SGE - 1) &
5139 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5140 }
5141
5142 context->ustorm_ag_context.cdu_usage =
5143 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5144 CDU_REGION_NUMBER_UCM_AG,
5145 ETH_CONNECTION_TYPE);
5146
5147 context->xstorm_ag_context.cdu_reserved =
5148 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5149 CDU_REGION_NUMBER_XCM_AG,
5150 ETH_CONNECTION_TYPE);
5151 }
5152
5153 for_each_tx_queue(bp, i) {
5154 struct bnx2x_fastpath *fp = &bp->fp[i];
5155 struct eth_context *context =
5156 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5157
5158 context->cstorm_st_context.sb_index_number =
5159 C_SB_ETH_TX_CQ_INDEX;
5160 context->cstorm_st_context.status_block_id = fp->sb_id;
5161
5162 context->xstorm_st_context.tx_bd_page_base_hi =
5163 U64_HI(fp->tx_desc_mapping);
5164 context->xstorm_st_context.tx_bd_page_base_lo =
5165 U64_LO(fp->tx_desc_mapping);
5166 context->xstorm_st_context.statistics_data = (fp->cl_id |
5167 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5168 }
5169}
5170
5171static void bnx2x_init_ind_table(struct bnx2x *bp)
5172{
5173 int func = BP_FUNC(bp);
5174 int i;
5175
5176 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5177 return;
5178
5179 DP(NETIF_MSG_IFUP,
5180 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5181 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5182 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5183 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5184 bp->fp->cl_id + (i % bp->num_rx_queues));
5185}
5186
5187static void bnx2x_set_client_config(struct bnx2x *bp)
5188{
5189 struct tstorm_eth_client_config tstorm_client = {0};
5190 int port = BP_PORT(bp);
5191 int i;
5192
5193 tstorm_client.mtu = bp->dev->mtu;
5194 tstorm_client.config_flags =
5195 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5196 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5197#ifdef BCM_VLAN
5198 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5199 tstorm_client.config_flags |=
5200 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5201 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5202 }
5203#endif
5204
5205 for_each_queue(bp, i) {
5206 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5207
5208 REG_WR(bp, BAR_TSTRORM_INTMEM +
5209 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5210 ((u32 *)&tstorm_client)[0]);
5211 REG_WR(bp, BAR_TSTRORM_INTMEM +
5212 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5213 ((u32 *)&tstorm_client)[1]);
5214 }
5215
5216 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5217 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5218}
5219
5220static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5221{
5222 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5223 int mode = bp->rx_mode;
5224 int mask = (1 << BP_L_ID(bp));
5225 int func = BP_FUNC(bp);
5226 int port = BP_PORT(bp);
5227 int i;
5228 /* All but management unicast packets should pass to the host as well */
5229 u32 llh_mask =
5230 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5231 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5232 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5233 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5234
5235 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5236
5237 switch (mode) {
5238 case BNX2X_RX_MODE_NONE: /* no Rx */
5239 tstorm_mac_filter.ucast_drop_all = mask;
5240 tstorm_mac_filter.mcast_drop_all = mask;
5241 tstorm_mac_filter.bcast_drop_all = mask;
5242 break;
5243
5244 case BNX2X_RX_MODE_NORMAL:
5245 tstorm_mac_filter.bcast_accept_all = mask;
5246 break;
5247
5248 case BNX2X_RX_MODE_ALLMULTI:
5249 tstorm_mac_filter.mcast_accept_all = mask;
5250 tstorm_mac_filter.bcast_accept_all = mask;
5251 break;
5252
5253 case BNX2X_RX_MODE_PROMISC:
5254 tstorm_mac_filter.ucast_accept_all = mask;
5255 tstorm_mac_filter.mcast_accept_all = mask;
5256 tstorm_mac_filter.bcast_accept_all = mask;
5257 /* pass management unicast packets as well */
5258 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5259 break;
5260
5261 default:
5262 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5263 break;
5264 }
5265
5266 REG_WR(bp,
5267 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5268 llh_mask);
5269
5270 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5271 REG_WR(bp, BAR_TSTRORM_INTMEM +
5272 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5273 ((u32 *)&tstorm_mac_filter)[i]);
5274
5275/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5276 ((u32 *)&tstorm_mac_filter)[i]); */
5277 }
5278
5279 if (mode != BNX2X_RX_MODE_NONE)
5280 bnx2x_set_client_config(bp);
5281}
5282
5283static void bnx2x_init_internal_common(struct bnx2x *bp)
5284{
5285 int i;
5286
5287 /* Zero this manually as its initialization is
5288 currently missing in the initTool */
5289 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5290 REG_WR(bp, BAR_USTRORM_INTMEM +
5291 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5292}
5293
5294static void bnx2x_init_internal_port(struct bnx2x *bp)
5295{
5296 int port = BP_PORT(bp);
5297
5298 REG_WR(bp,
5299 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5300 REG_WR(bp,
5301 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5302 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5303 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5304}
5305
5306static void bnx2x_init_internal_func(struct bnx2x *bp)
5307{
5308 struct tstorm_eth_function_common_config tstorm_config = {0};
5309 struct stats_indication_flags stats_flags = {0};
5310 int port = BP_PORT(bp);
5311 int func = BP_FUNC(bp);
5312 int i, j;
5313 u32 offset;
5314 u16 max_agg_size;
5315
5316 if (is_multi(bp)) {
5317 tstorm_config.config_flags = MULTI_FLAGS(bp);
5318 tstorm_config.rss_result_mask = MULTI_MASK;
5319 }
5320
5321 /* Enable TPA if needed */
5322 if (bp->flags & TPA_ENABLE_FLAG)
5323 tstorm_config.config_flags |=
5324 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5325
5326 if (IS_E1HMF(bp))
5327 tstorm_config.config_flags |=
5328 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5329
5330 tstorm_config.leading_client_id = BP_L_ID(bp);
5331
5332 REG_WR(bp, BAR_TSTRORM_INTMEM +
5333 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5334 (*(u32 *)&tstorm_config));
5335
5336 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5337 bnx2x_set_storm_rx_mode(bp);
5338
5339 for_each_queue(bp, i) {
5340 u8 cl_id = bp->fp[i].cl_id;
5341
5342 /* reset xstorm per client statistics */
5343 offset = BAR_XSTRORM_INTMEM +
5344 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5345 for (j = 0;
5346 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5347 REG_WR(bp, offset + j*4, 0);
5348
5349 /* reset tstorm per client statistics */
5350 offset = BAR_TSTRORM_INTMEM +
5351 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5352 for (j = 0;
5353 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5354 REG_WR(bp, offset + j*4, 0);
5355
5356 /* reset ustorm per client statistics */
5357 offset = BAR_USTRORM_INTMEM +
5358 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5359 for (j = 0;
5360 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5361 REG_WR(bp, offset + j*4, 0);
5362 }
5363
5364 /* Init statistics related context */
5365 stats_flags.collect_eth = 1;
5366
5367 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5368 ((u32 *)&stats_flags)[0]);
5369 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5370 ((u32 *)&stats_flags)[1]);
5371
5372 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5373 ((u32 *)&stats_flags)[0]);
5374 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5375 ((u32 *)&stats_flags)[1]);
5376
5377 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5378 ((u32 *)&stats_flags)[0]);
5379 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5380 ((u32 *)&stats_flags)[1]);
5381
5382 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5383 ((u32 *)&stats_flags)[0]);
5384 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5385 ((u32 *)&stats_flags)[1]);
5386
5387 REG_WR(bp, BAR_XSTRORM_INTMEM +
5388 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5389 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5390 REG_WR(bp, BAR_XSTRORM_INTMEM +
5391 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5392 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5393
5394 REG_WR(bp, BAR_TSTRORM_INTMEM +
5395 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5396 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5397 REG_WR(bp, BAR_TSTRORM_INTMEM +
5398 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5399 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5400
5401 REG_WR(bp, BAR_USTRORM_INTMEM +
5402 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5403 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5404 REG_WR(bp, BAR_USTRORM_INTMEM +
5405 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5406 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5407
5408 if (CHIP_IS_E1H(bp)) {
5409 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5410 IS_E1HMF(bp));
5411 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5412 IS_E1HMF(bp));
5413 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5414 IS_E1HMF(bp));
5415 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5416 IS_E1HMF(bp));
5417
5418 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5419 bp->e1hov);
5420 }
5421
5422 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5423 max_agg_size =
5424 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5425 SGE_PAGE_SIZE * PAGES_PER_SGE),
5426 (u32)0xffff);
5427 for_each_rx_queue(bp, i) {
5428 struct bnx2x_fastpath *fp = &bp->fp[i];
5429
5430 REG_WR(bp, BAR_USTRORM_INTMEM +
5431 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5432 U64_LO(fp->rx_comp_mapping));
5433 REG_WR(bp, BAR_USTRORM_INTMEM +
5434 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5435 U64_HI(fp->rx_comp_mapping));
5436
5437 /* Next page */
5438 REG_WR(bp, BAR_USTRORM_INTMEM +
5439 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5440 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5441 REG_WR(bp, BAR_USTRORM_INTMEM +
5442 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5443 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5444
5445 REG_WR16(bp, BAR_USTRORM_INTMEM +
5446 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5447 max_agg_size);
5448 }
5449
5450 /* dropless flow control */
5451 if (CHIP_IS_E1H(bp)) {
5452 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5453
5454 rx_pause.bd_thr_low = 250;
5455 rx_pause.cqe_thr_low = 250;
5456 rx_pause.cos = 1;
5457 rx_pause.sge_thr_low = 0;
5458 rx_pause.bd_thr_high = 350;
5459 rx_pause.cqe_thr_high = 350;
5460 rx_pause.sge_thr_high = 0;
5461
5462 for_each_rx_queue(bp, i) {
5463 struct bnx2x_fastpath *fp = &bp->fp[i];
5464
5465 if (!fp->disable_tpa) {
5466 rx_pause.sge_thr_low = 150;
5467 rx_pause.sge_thr_high = 250;
5468 }
5469
5470
5471 offset = BAR_USTRORM_INTMEM +
5472 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5473 fp->cl_id);
5474 for (j = 0;
5475 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5476 j++)
5477 REG_WR(bp, offset + j*4,
5478 ((u32 *)&rx_pause)[j]);
5479 }
5480 }
5481
5482 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5483
5484 /* Init rate shaping and fairness contexts */
5485 if (IS_E1HMF(bp)) {
5486 int vn;
5487
5488 /* During init there is no active link
5489 Until link is up, set link rate to 10Gbps */
5490 bp->link_vars.line_speed = SPEED_10000;
5491 bnx2x_init_port_minmax(bp);
5492
5493 bnx2x_calc_vn_weight_sum(bp);
5494
5495 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5496 bnx2x_init_vn_minmax(bp, 2*vn + port);
5497
5498 /* Enable rate shaping and fairness */
5499 bp->cmng.flags.cmng_enables =
5500 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5501 if (bp->vn_weight_sum)
5502 bp->cmng.flags.cmng_enables |=
5503 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5504 else
5505 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5506 " fairness will be disabled\n");
5507 } else {
5508 /* rate shaping and fairness are disabled */
5509 DP(NETIF_MSG_IFUP,
5510 "single function mode minmax will be disabled\n");
5511 }
5512
5513
5514 /* Store it to internal memory */
5515 if (bp->port.pmf)
5516 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5517 REG_WR(bp, BAR_XSTRORM_INTMEM +
5518 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5519 ((u32 *)(&bp->cmng))[i]);
5520}
5521
5522static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5523{
5524 switch (load_code) {
5525 case FW_MSG_CODE_DRV_LOAD_COMMON:
5526 bnx2x_init_internal_common(bp);
5527 /* no break */
5528
5529 case FW_MSG_CODE_DRV_LOAD_PORT:
5530 bnx2x_init_internal_port(bp);
5531 /* no break */
5532
5533 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5534 bnx2x_init_internal_func(bp);
5535 break;
5536
5537 default:
5538 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5539 break;
5540 }
5541}
5542
5543static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5544{
5545 int i;
5546
5547 for_each_queue(bp, i) {
5548 struct bnx2x_fastpath *fp = &bp->fp[i];
5549
5550 fp->bp = bp;
5551 fp->state = BNX2X_FP_STATE_CLOSED;
5552 fp->index = i;
5553 fp->cl_id = BP_L_ID(bp) + i;
5554 fp->sb_id = fp->cl_id;
5555 /* Suitable Rx and Tx SBs are served by the same client */
5556 if (i >= bp->num_rx_queues)
5557 fp->cl_id -= bp->num_rx_queues;
5558 DP(NETIF_MSG_IFUP,
5559 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5560 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5561 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5562 fp->sb_id);
5563 bnx2x_update_fpsb_idx(fp);
5564 }
5565
5566 /* ensure status block indices were read */
5567 rmb();
5568
5569
5570 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5571 DEF_SB_ID);
5572 bnx2x_update_dsb_idx(bp);
5573 bnx2x_update_coalesce(bp);
5574 bnx2x_init_rx_rings(bp);
5575 bnx2x_init_tx_ring(bp);
5576 bnx2x_init_sp_ring(bp);
5577 bnx2x_init_context(bp);
5578 bnx2x_init_internal(bp, load_code);
5579 bnx2x_init_ind_table(bp);
5580 bnx2x_stats_init(bp);
5581
5582 /* At this point, we are ready for interrupts */
5583 atomic_set(&bp->intr_sem, 0);
5584
5585 /* flush all before enabling interrupts */
5586 mb();
5587 mmiowb();
5588
5589 bnx2x_int_enable(bp);
5590
5591 /* Check for SPIO5 */
5592 bnx2x_attn_int_deasserted0(bp,
5593 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5594 AEU_INPUTS_ATTN_BITS_SPIO5);
5595}
5596
5597/* end of nic init */
5598
5599/*
5600 * gzip service functions
5601 */
5602
5603static int bnx2x_gunzip_init(struct bnx2x *bp)
5604{
5605 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5606 &bp->gunzip_mapping);
5607 if (bp->gunzip_buf == NULL)
5608 goto gunzip_nomem1;
5609
5610 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5611 if (bp->strm == NULL)
5612 goto gunzip_nomem2;
5613
5614 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5615 GFP_KERNEL);
5616 if (bp->strm->workspace == NULL)
5617 goto gunzip_nomem3;
5618
5619 return 0;
5620
5621gunzip_nomem3:
5622 kfree(bp->strm);
5623 bp->strm = NULL;
5624
5625gunzip_nomem2:
5626 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5627 bp->gunzip_mapping);
5628 bp->gunzip_buf = NULL;
5629
5630gunzip_nomem1:
5631 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5632 " un-compression\n", bp->dev->name);
5633 return -ENOMEM;
5634}
5635
5636static void bnx2x_gunzip_end(struct bnx2x *bp)
5637{
5638 kfree(bp->strm->workspace);
5639
5640 kfree(bp->strm);
5641 bp->strm = NULL;
5642
5643 if (bp->gunzip_buf) {
5644 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5645 bp->gunzip_mapping);
5646 bp->gunzip_buf = NULL;
5647 }
5648}
5649
5650static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5651{
5652 int n, rc;
5653
5654 /* check gzip header */
5655 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5656 BNX2X_ERR("Bad gzip header\n");
5657 return -EINVAL;
5658 }
5659
5660 n = 10;
5661
5662#define FNAME 0x8
5663
5664 if (zbuf[3] & FNAME)
5665 while ((zbuf[n++] != 0) && (n < len));
5666
5667 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5668 bp->strm->avail_in = len - n;
5669 bp->strm->next_out = bp->gunzip_buf;
5670 bp->strm->avail_out = FW_BUF_SIZE;
5671
5672 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5673 if (rc != Z_OK)
5674 return rc;
5675
5676 rc = zlib_inflate(bp->strm, Z_FINISH);
5677 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5678 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5679 bp->dev->name, bp->strm->msg);
5680
5681 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5682 if (bp->gunzip_outlen & 0x3)
5683 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5684 " gunzip_outlen (%d) not aligned\n",
5685 bp->dev->name, bp->gunzip_outlen);
5686 bp->gunzip_outlen >>= 2;
5687
5688 zlib_inflateEnd(bp->strm);
5689
5690 if (rc == Z_STREAM_END)
5691 return 0;
5692
5693 return rc;
5694}
5695
5696/* nic load/unload */
5697
5698/*
5699 * General service functions
5700 */
5701
5702/* send a NIG loopback debug packet */
5703static void bnx2x_lb_pckt(struct bnx2x *bp)
5704{
5705 u32 wb_write[3];
5706
5707 /* Ethernet source and destination addresses */
5708 wb_write[0] = 0x55555555;
5709 wb_write[1] = 0x55555555;
5710 wb_write[2] = 0x20; /* SOP */
5711 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5712
5713 /* NON-IP protocol */
5714 wb_write[0] = 0x09000000;
5715 wb_write[1] = 0x55555555;
5716 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5717 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5718}
5719
5720/* some of the internal memories
5721 * are not directly readable from the driver
5722 * to test them we send debug packets
5723 */
5724static int bnx2x_int_mem_test(struct bnx2x *bp)
5725{
5726 int factor;
5727 int count, i;
5728 u32 val = 0;
5729
5730 if (CHIP_REV_IS_FPGA(bp))
5731 factor = 120;
5732 else if (CHIP_REV_IS_EMUL(bp))
5733 factor = 200;
5734 else
5735 factor = 1;
5736
5737 DP(NETIF_MSG_HW, "start part1\n");
5738
5739 /* Disable inputs of parser neighbor blocks */
5740 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5741 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5742 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5743 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5744
5745 /* Write 0 to parser credits for CFC search request */
5746 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5747
5748 /* send Ethernet packet */
5749 bnx2x_lb_pckt(bp);
5750
5751 /* TODO do i reset NIG statistic? */
5752 /* Wait until NIG register shows 1 packet of size 0x10 */
5753 count = 1000 * factor;
5754 while (count) {
5755
5756 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5757 val = *bnx2x_sp(bp, wb_data[0]);
5758 if (val == 0x10)
5759 break;
5760
5761 msleep(10);
5762 count--;
5763 }
5764 if (val != 0x10) {
5765 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5766 return -1;
5767 }
5768
5769 /* Wait until PRS register shows 1 packet */
5770 count = 1000 * factor;
5771 while (count) {
5772 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5773 if (val == 1)
5774 break;
5775
5776 msleep(10);
5777 count--;
5778 }
5779 if (val != 0x1) {
5780 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5781 return -2;
5782 }
5783
5784 /* Reset and init BRB, PRS */
5785 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5786 msleep(50);
5787 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5788 msleep(50);
5789 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5790 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5791
5792 DP(NETIF_MSG_HW, "part2\n");
5793
5794 /* Disable inputs of parser neighbor blocks */
5795 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5796 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5797 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5798 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5799
5800 /* Write 0 to parser credits for CFC search request */
5801 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5802
5803 /* send 10 Ethernet packets */
5804 for (i = 0; i < 10; i++)
5805 bnx2x_lb_pckt(bp);
5806
5807 /* Wait until NIG register shows 10 + 1
5808 packets of size 11*0x10 = 0xb0 */
5809 count = 1000 * factor;
5810 while (count) {
5811
5812 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5813 val = *bnx2x_sp(bp, wb_data[0]);
5814 if (val == 0xb0)
5815 break;
5816
5817 msleep(10);
5818 count--;
5819 }
5820 if (val != 0xb0) {
5821 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5822 return -3;
5823 }
5824
5825 /* Wait until PRS register shows 2 packets */
5826 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5827 if (val != 2)
5828 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5829
5830 /* Write 1 to parser credits for CFC search request */
5831 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5832
5833 /* Wait until PRS register shows 3 packets */
5834 msleep(10 * factor);
5835 /* Wait until NIG register shows 1 packet of size 0x10 */
5836 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5837 if (val != 3)
5838 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5839
5840 /* clear NIG EOP FIFO */
5841 for (i = 0; i < 11; i++)
5842 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5843 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5844 if (val != 1) {
5845 BNX2X_ERR("clear of NIG failed\n");
5846 return -4;
5847 }
5848
5849 /* Reset and init BRB, PRS, NIG */
5850 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5851 msleep(50);
5852 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5853 msleep(50);
5854 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5855 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5856#ifndef BCM_ISCSI
5857 /* set NIC mode */
5858 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5859#endif
5860
5861 /* Enable inputs of parser neighbor blocks */
5862 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5863 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5864 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5865 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5866
5867 DP(NETIF_MSG_HW, "done\n");
5868
5869 return 0; /* OK */
5870}
5871
5872static void enable_blocks_attention(struct bnx2x *bp)
5873{
5874 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5875 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5876 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5877 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5878 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5879 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5880 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5881 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5882 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5883/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5884/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5885 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5886 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5887 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5888/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5889/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5890 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5891 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5892 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5893 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5894/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5895/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5896 if (CHIP_REV_IS_FPGA(bp))
5897 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5898 else
5899 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5900 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5901 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5902 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5903/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5904/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5905 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5906 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5907/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5908 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5909}
5910
5911
5912static void bnx2x_reset_common(struct bnx2x *bp)
5913{
5914 /* reset_common */
5915 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5916 0xd3ffff7f);
5917 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5918}
5919
5920
5921static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5922{
5923 u32 val;
5924 u8 port;
5925 u8 is_required = 0;
5926
5927 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5928 SHARED_HW_CFG_FAN_FAILURE_MASK;
5929
5930 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5931 is_required = 1;
5932
5933 /*
5934 * The fan failure mechanism is usually related to the PHY type since
5935 * the power consumption of the board is affected by the PHY. Currently,
5936 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5937 */
5938 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5939 for (port = PORT_0; port < PORT_MAX; port++) {
5940 u32 phy_type =
5941 SHMEM_RD(bp, dev_info.port_hw_config[port].
5942 external_phy_config) &
5943 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5944 is_required |=
5945 ((phy_type ==
5946 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5947 (phy_type ==
5948 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5949 (phy_type ==
5950 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5951 }
5952
5953 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5954
5955 if (is_required == 0)
5956 return;
5957
5958 /* Fan failure is indicated by SPIO 5 */
5959 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5960 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5961
5962 /* set to active low mode */
5963 val = REG_RD(bp, MISC_REG_SPIO_INT);
5964 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5965 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5966 REG_WR(bp, MISC_REG_SPIO_INT, val);
5967
5968 /* enable interrupt to signal the IGU */
5969 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5970 val |= (1 << MISC_REGISTERS_SPIO_5);
5971 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5972}
5973
5974static int bnx2x_init_common(struct bnx2x *bp)
5975{
5976 u32 val, i;
5977
5978 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5979
5980 bnx2x_reset_common(bp);
5981 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5982 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5983
5984 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5985 if (CHIP_IS_E1H(bp))
5986 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5987
5988 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5989 msleep(30);
5990 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5991
5992 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5993 if (CHIP_IS_E1(bp)) {
5994 /* enable HW interrupt from PXP on USDM overflow
5995 bit 16 on INT_MASK_0 */
5996 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5997 }
5998
5999 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6000 bnx2x_init_pxp(bp);
6001
6002#ifdef __BIG_ENDIAN
6003 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6004 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6005 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6006 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6007 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6008 /* make sure this value is 0 */
6009 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6010
6011/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6012 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6013 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6014 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6015 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6016#endif
6017
6018 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6019#ifdef BCM_ISCSI
6020 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6021 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6022 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6023#endif
6024
6025 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6026 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6027
6028 /* let the HW do it's magic ... */
6029 msleep(100);
6030 /* finish PXP init */
6031 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6032 if (val != 1) {
6033 BNX2X_ERR("PXP2 CFG failed\n");
6034 return -EBUSY;
6035 }
6036 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6037 if (val != 1) {
6038 BNX2X_ERR("PXP2 RD_INIT failed\n");
6039 return -EBUSY;
6040 }
6041
6042 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6043 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6044
6045 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6046
6047 /* clean the DMAE memory */
6048 bp->dmae_ready = 1;
6049 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6050
6051 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6052 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6053 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6054 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6055
6056 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6057 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6058 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6059 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6060
6061 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6062 /* soft reset pulse */
6063 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6064 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6065
6066#ifdef BCM_ISCSI
6067 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6068#endif
6069
6070 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6071 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6072 if (!CHIP_REV_IS_SLOW(bp)) {
6073 /* enable hw interrupt from doorbell Q */
6074 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6075 }
6076
6077 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6078 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6079 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6080 /* set NIC mode */
6081 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6082 if (CHIP_IS_E1H(bp))
6083 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6084
6085 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6086 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6087 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6088 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6089
6090 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6091 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6092 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6093 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6094
6095 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6096 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6097 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6098 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6099
6100 /* sync semi rtc */
6101 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6102 0x80000000);
6103 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6104 0x80000000);
6105
6106 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6107 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6108 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6109
6110 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6111 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6112 REG_WR(bp, i, 0xc0cac01a);
6113 /* TODO: replace with something meaningful */
6114 }
6115 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6116 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6117
6118 if (sizeof(union cdu_context) != 1024)
6119 /* we currently assume that a context is 1024 bytes */
6120 printk(KERN_ALERT PFX "please adjust the size of"
6121 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6122
6123 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6124 val = (4 << 24) + (0 << 12) + 1024;
6125 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6126
6127 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6128 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6129 /* enable context validation interrupt from CFC */
6130 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6131
6132 /* set the thresholds to prevent CFC/CDU race */
6133 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6134
6135 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6136 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6137
6138 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6139 /* Reset PCIE errors for debug */
6140 REG_WR(bp, 0x2814, 0xffffffff);
6141 REG_WR(bp, 0x3820, 0xffffffff);
6142
6143 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6144 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6145 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6146 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6147
6148 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6149 if (CHIP_IS_E1H(bp)) {
6150 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6151 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6152 }
6153
6154 if (CHIP_REV_IS_SLOW(bp))
6155 msleep(200);
6156
6157 /* finish CFC init */
6158 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6159 if (val != 1) {
6160 BNX2X_ERR("CFC LL_INIT failed\n");
6161 return -EBUSY;
6162 }
6163 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6164 if (val != 1) {
6165 BNX2X_ERR("CFC AC_INIT failed\n");
6166 return -EBUSY;
6167 }
6168 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6169 if (val != 1) {
6170 BNX2X_ERR("CFC CAM_INIT failed\n");
6171 return -EBUSY;
6172 }
6173 REG_WR(bp, CFC_REG_DEBUG0, 0);
6174
6175 /* read NIG statistic
6176 to see if this is our first up since powerup */
6177 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6178 val = *bnx2x_sp(bp, wb_data[0]);
6179
6180 /* do internal memory self test */
6181 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6182 BNX2X_ERR("internal mem self test failed\n");
6183 return -EBUSY;
6184 }
6185
6186 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6187 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6188 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6189 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6190 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6191 bp->port.need_hw_lock = 1;
6192 break;
6193
6194 default:
6195 break;
6196 }
6197
6198 bnx2x_setup_fan_failure_detection(bp);
6199
6200 /* clear PXP2 attentions */
6201 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6202
6203 enable_blocks_attention(bp);
6204
6205 if (!BP_NOMCP(bp)) {
6206 bnx2x_acquire_phy_lock(bp);
6207 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6208 bnx2x_release_phy_lock(bp);
6209 } else
6210 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6211
6212 return 0;
6213}
6214
6215static int bnx2x_init_port(struct bnx2x *bp)
6216{
6217 int port = BP_PORT(bp);
6218 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6219 u32 low, high;
6220 u32 val;
6221
6222 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6223
6224 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6225
6226 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6227 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6228
6229 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6230 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6231 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6232#ifdef BCM_ISCSI
6233 /* Port0 1
6234 * Port1 385 */
6235 i++;
6236 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6237 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6238 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6239 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6240
6241 /* Port0 2
6242 * Port1 386 */
6243 i++;
6244 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6245 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6246 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6247 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6248
6249 /* Port0 3
6250 * Port1 387 */
6251 i++;
6252 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6253 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6254 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6255 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6256#endif
6257 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6258
6259#ifdef BCM_ISCSI
6260 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6261 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6262
6263 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6264#endif
6265 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6266
6267 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6268 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6269 /* no pause for emulation and FPGA */
6270 low = 0;
6271 high = 513;
6272 } else {
6273 if (IS_E1HMF(bp))
6274 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6275 else if (bp->dev->mtu > 4096) {
6276 if (bp->flags & ONE_PORT_FLAG)
6277 low = 160;
6278 else {
6279 val = bp->dev->mtu;
6280 /* (24*1024 + val*4)/256 */
6281 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6282 }
6283 } else
6284 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6285 high = low + 56; /* 14*1024/256 */
6286 }
6287 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6288 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6289
6290
6291 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6292
6293 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6294 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6295 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6296 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6297
6298 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6299 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6300 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6301 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6302
6303 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6304 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6305
6306 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6307
6308 /* configure PBF to work without PAUSE mtu 9000 */
6309 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6310
6311 /* update threshold */
6312 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6313 /* update init credit */
6314 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6315
6316 /* probe changes */
6317 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6318 msleep(5);
6319 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6320
6321#ifdef BCM_ISCSI
6322 /* tell the searcher where the T2 table is */
6323 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6324
6325 wb_write[0] = U64_LO(bp->t2_mapping);
6326 wb_write[1] = U64_HI(bp->t2_mapping);
6327 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6328 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6329 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6330 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6331
6332 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6333#endif
6334 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6335 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6336
6337 if (CHIP_IS_E1(bp)) {
6338 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6339 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6340 }
6341 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6342
6343 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6344 /* init aeu_mask_attn_func_0/1:
6345 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6346 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6347 * bits 4-7 are used for "per vn group attention" */
6348 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6349 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6350
6351 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6352 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6353 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6354 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6355 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6356
6357 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6358
6359 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6360
6361 if (CHIP_IS_E1H(bp)) {
6362 /* 0x2 disable e1hov, 0x1 enable */
6363 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6364 (IS_E1HMF(bp) ? 0x1 : 0x2));
6365
6366 {
6367 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6368 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6369 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6370 }
6371 }
6372
6373 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6374 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6375
6376 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6377 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6378 {
6379 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6380
6381 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6382 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6383
6384 /* The GPIO should be swapped if the swap register is
6385 set and active */
6386 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6387 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6388
6389 /* Select function upon port-swap configuration */
6390 if (port == 0) {
6391 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6392 aeu_gpio_mask = (swap_val && swap_override) ?
6393 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6394 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6395 } else {
6396 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6397 aeu_gpio_mask = (swap_val && swap_override) ?
6398 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6399 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6400 }
6401 val = REG_RD(bp, offset);
6402 /* add GPIO3 to group */
6403 val |= aeu_gpio_mask;
6404 REG_WR(bp, offset, val);
6405 }
6406 break;
6407
6408 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6409 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6410 /* add SPIO 5 to group 0 */
6411 {
6412 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6413 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6414 val = REG_RD(bp, reg_addr);
6415 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6416 REG_WR(bp, reg_addr, val);
6417 }
6418 break;
6419
6420 default:
6421 break;
6422 }
6423
6424 bnx2x__link_reset(bp);
6425
6426 return 0;
6427}
6428
6429#define ILT_PER_FUNC (768/2)
6430#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6431/* the phys address is shifted right 12 bits and has an added
6432 1=valid bit added to the 53rd bit
6433 then since this is a wide register(TM)
6434 we split it into two 32 bit writes
6435 */
6436#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6437#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6438#define PXP_ONE_ILT(x) (((x) << 10) | x)
6439#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6440
6441#define CNIC_ILT_LINES 0
6442
6443static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6444{
6445 int reg;
6446
6447 if (CHIP_IS_E1H(bp))
6448 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6449 else /* E1 */
6450 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6451
6452 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6453}
6454
6455static int bnx2x_init_func(struct bnx2x *bp)
6456{
6457 int port = BP_PORT(bp);
6458 int func = BP_FUNC(bp);
6459 u32 addr, val;
6460 int i;
6461
6462 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6463
6464 /* set MSI reconfigure capability */
6465 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6466 val = REG_RD(bp, addr);
6467 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6468 REG_WR(bp, addr, val);
6469
6470 i = FUNC_ILT_BASE(func);
6471
6472 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6473 if (CHIP_IS_E1H(bp)) {
6474 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6475 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6476 } else /* E1 */
6477 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6478 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6479
6480
6481 if (CHIP_IS_E1H(bp)) {
6482 for (i = 0; i < 9; i++)
6483 bnx2x_init_block(bp,
6484 cm_blocks[i], FUNC0_STAGE + func);
6485
6486 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6487 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6488 }
6489
6490 /* HC init per function */
6491 if (CHIP_IS_E1H(bp)) {
6492 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6493
6494 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6495 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6496 }
6497 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6498
6499 /* Reset PCIE errors for debug */
6500 REG_WR(bp, 0x2114, 0xffffffff);
6501 REG_WR(bp, 0x2120, 0xffffffff);
6502
6503 return 0;
6504}
6505
6506static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6507{
6508 int i, rc = 0;
6509
6510 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6511 BP_FUNC(bp), load_code);
6512
6513 bp->dmae_ready = 0;
6514 mutex_init(&bp->dmae_mutex);
6515 bnx2x_gunzip_init(bp);
6516
6517 switch (load_code) {
6518 case FW_MSG_CODE_DRV_LOAD_COMMON:
6519 rc = bnx2x_init_common(bp);
6520 if (rc)
6521 goto init_hw_err;
6522 /* no break */
6523
6524 case FW_MSG_CODE_DRV_LOAD_PORT:
6525 bp->dmae_ready = 1;
6526 rc = bnx2x_init_port(bp);
6527 if (rc)
6528 goto init_hw_err;
6529 /* no break */
6530
6531 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6532 bp->dmae_ready = 1;
6533 rc = bnx2x_init_func(bp);
6534 if (rc)
6535 goto init_hw_err;
6536 break;
6537
6538 default:
6539 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6540 break;
6541 }
6542
6543 if (!BP_NOMCP(bp)) {
6544 int func = BP_FUNC(bp);
6545
6546 bp->fw_drv_pulse_wr_seq =
6547 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6548 DRV_PULSE_SEQ_MASK);
6549 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6550 }
6551
6552 /* this needs to be done before gunzip end */
6553 bnx2x_zero_def_sb(bp);
6554 for_each_queue(bp, i)
6555 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6556
6557init_hw_err:
6558 bnx2x_gunzip_end(bp);
6559
6560 return rc;
6561}
6562
6563static void bnx2x_free_mem(struct bnx2x *bp)
6564{
6565
6566#define BNX2X_PCI_FREE(x, y, size) \
6567 do { \
6568 if (x) { \
6569 pci_free_consistent(bp->pdev, size, x, y); \
6570 x = NULL; \
6571 y = 0; \
6572 } \
6573 } while (0)
6574
6575#define BNX2X_FREE(x) \
6576 do { \
6577 if (x) { \
6578 vfree(x); \
6579 x = NULL; \
6580 } \
6581 } while (0)
6582
6583 int i;
6584
6585 /* fastpath */
6586 /* Common */
6587 for_each_queue(bp, i) {
6588
6589 /* status blocks */
6590 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6591 bnx2x_fp(bp, i, status_blk_mapping),
6592 sizeof(struct host_status_block));
6593 }
6594 /* Rx */
6595 for_each_rx_queue(bp, i) {
6596
6597 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6598 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6599 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6600 bnx2x_fp(bp, i, rx_desc_mapping),
6601 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6602
6603 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6604 bnx2x_fp(bp, i, rx_comp_mapping),
6605 sizeof(struct eth_fast_path_rx_cqe) *
6606 NUM_RCQ_BD);
6607
6608 /* SGE ring */
6609 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6610 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6611 bnx2x_fp(bp, i, rx_sge_mapping),
6612 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6613 }
6614 /* Tx */
6615 for_each_tx_queue(bp, i) {
6616
6617 /* fastpath tx rings: tx_buf tx_desc */
6618 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6619 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6620 bnx2x_fp(bp, i, tx_desc_mapping),
6621 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6622 }
6623 /* end of fastpath */
6624
6625 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6626 sizeof(struct host_def_status_block));
6627
6628 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6629 sizeof(struct bnx2x_slowpath));
6630
6631#ifdef BCM_ISCSI
6632 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6633 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6634 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6635 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6636#endif
6637 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6638
6639#undef BNX2X_PCI_FREE
6640#undef BNX2X_KFREE
6641}
6642
6643static int bnx2x_alloc_mem(struct bnx2x *bp)
6644{
6645
6646#define BNX2X_PCI_ALLOC(x, y, size) \
6647 do { \
6648 x = pci_alloc_consistent(bp->pdev, size, y); \
6649 if (x == NULL) \
6650 goto alloc_mem_err; \
6651 memset(x, 0, size); \
6652 } while (0)
6653
6654#define BNX2X_ALLOC(x, size) \
6655 do { \
6656 x = vmalloc(size); \
6657 if (x == NULL) \
6658 goto alloc_mem_err; \
6659 memset(x, 0, size); \
6660 } while (0)
6661
6662 int i;
6663
6664 /* fastpath */
6665 /* Common */
6666 for_each_queue(bp, i) {
6667 bnx2x_fp(bp, i, bp) = bp;
6668
6669 /* status blocks */
6670 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6671 &bnx2x_fp(bp, i, status_blk_mapping),
6672 sizeof(struct host_status_block));
6673 }
6674 /* Rx */
6675 for_each_rx_queue(bp, i) {
6676
6677 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6678 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6679 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6680 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6681 &bnx2x_fp(bp, i, rx_desc_mapping),
6682 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6683
6684 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6685 &bnx2x_fp(bp, i, rx_comp_mapping),
6686 sizeof(struct eth_fast_path_rx_cqe) *
6687 NUM_RCQ_BD);
6688
6689 /* SGE ring */
6690 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6691 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6692 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6693 &bnx2x_fp(bp, i, rx_sge_mapping),
6694 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6695 }
6696 /* Tx */
6697 for_each_tx_queue(bp, i) {
6698
6699 /* fastpath tx rings: tx_buf tx_desc */
6700 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6701 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6702 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6703 &bnx2x_fp(bp, i, tx_desc_mapping),
6704 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6705 }
6706 /* end of fastpath */
6707
6708 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6709 sizeof(struct host_def_status_block));
6710
6711 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6712 sizeof(struct bnx2x_slowpath));
6713
6714#ifdef BCM_ISCSI
6715 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6716
6717 /* Initialize T1 */
6718 for (i = 0; i < 64*1024; i += 64) {
6719 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6720 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6721 }
6722
6723 /* allocate searcher T2 table
6724 we allocate 1/4 of alloc num for T2
6725 (which is not entered into the ILT) */
6726 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6727
6728 /* Initialize T2 */
6729 for (i = 0; i < 16*1024; i += 64)
6730 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6731
6732 /* now fixup the last line in the block to point to the next block */
6733 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6734
6735 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6736 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6737
6738 /* QM queues (128*MAX_CONN) */
6739 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6740#endif
6741
6742 /* Slow path ring */
6743 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6744
6745 return 0;
6746
6747alloc_mem_err:
6748 bnx2x_free_mem(bp);
6749 return -ENOMEM;
6750
6751#undef BNX2X_PCI_ALLOC
6752#undef BNX2X_ALLOC
6753}
6754
6755static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6756{
6757 int i;
6758
6759 for_each_tx_queue(bp, i) {
6760 struct bnx2x_fastpath *fp = &bp->fp[i];
6761
6762 u16 bd_cons = fp->tx_bd_cons;
6763 u16 sw_prod = fp->tx_pkt_prod;
6764 u16 sw_cons = fp->tx_pkt_cons;
6765
6766 while (sw_cons != sw_prod) {
6767 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6768 sw_cons++;
6769 }
6770 }
6771}
6772
6773static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6774{
6775 int i, j;
6776
6777 for_each_rx_queue(bp, j) {
6778 struct bnx2x_fastpath *fp = &bp->fp[j];
6779
6780 for (i = 0; i < NUM_RX_BD; i++) {
6781 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6782 struct sk_buff *skb = rx_buf->skb;
6783
6784 if (skb == NULL)
6785 continue;
6786
6787 pci_unmap_single(bp->pdev,
6788 pci_unmap_addr(rx_buf, mapping),
6789 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6790
6791 rx_buf->skb = NULL;
6792 dev_kfree_skb(skb);
6793 }
6794 if (!fp->disable_tpa)
6795 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6796 ETH_MAX_AGGREGATION_QUEUES_E1 :
6797 ETH_MAX_AGGREGATION_QUEUES_E1H);
6798 }
6799}
6800
6801static void bnx2x_free_skbs(struct bnx2x *bp)
6802{
6803 bnx2x_free_tx_skbs(bp);
6804 bnx2x_free_rx_skbs(bp);
6805}
6806
6807static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6808{
6809 int i, offset = 1;
6810
6811 free_irq(bp->msix_table[0].vector, bp->dev);
6812 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6813 bp->msix_table[0].vector);
6814
6815 for_each_queue(bp, i) {
6816 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6817 "state %x\n", i, bp->msix_table[i + offset].vector,
6818 bnx2x_fp(bp, i, state));
6819
6820 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6821 }
6822}
6823
6824static void bnx2x_free_irq(struct bnx2x *bp)
6825{
6826 if (bp->flags & USING_MSIX_FLAG) {
6827 bnx2x_free_msix_irqs(bp);
6828 pci_disable_msix(bp->pdev);
6829 bp->flags &= ~USING_MSIX_FLAG;
6830
6831 } else if (bp->flags & USING_MSI_FLAG) {
6832 free_irq(bp->pdev->irq, bp->dev);
6833 pci_disable_msi(bp->pdev);
6834 bp->flags &= ~USING_MSI_FLAG;
6835
6836 } else
6837 free_irq(bp->pdev->irq, bp->dev);
6838}
6839
6840static int bnx2x_enable_msix(struct bnx2x *bp)
6841{
6842 int i, rc, offset = 1;
6843 int igu_vec = 0;
6844
6845 bp->msix_table[0].entry = igu_vec;
6846 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6847
6848 for_each_queue(bp, i) {
6849 igu_vec = BP_L_ID(bp) + offset + i;
6850 bp->msix_table[i + offset].entry = igu_vec;
6851 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6852 "(fastpath #%u)\n", i + offset, igu_vec, i);
6853 }
6854
6855 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6856 BNX2X_NUM_QUEUES(bp) + offset);
6857 if (rc) {
6858 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6859 return rc;
6860 }
6861
6862 bp->flags |= USING_MSIX_FLAG;
6863
6864 return 0;
6865}
6866
6867static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6868{
6869 int i, rc, offset = 1;
6870
6871 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6872 bp->dev->name, bp->dev);
6873 if (rc) {
6874 BNX2X_ERR("request sp irq failed\n");
6875 return -EBUSY;
6876 }
6877
6878 for_each_queue(bp, i) {
6879 struct bnx2x_fastpath *fp = &bp->fp[i];
6880
6881 if (i < bp->num_rx_queues)
6882 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6883 else
6884 sprintf(fp->name, "%s-tx-%d",
6885 bp->dev->name, i - bp->num_rx_queues);
6886
6887 rc = request_irq(bp->msix_table[i + offset].vector,
6888 bnx2x_msix_fp_int, 0, fp->name, fp);
6889 if (rc) {
6890 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6891 bnx2x_free_msix_irqs(bp);
6892 return -EBUSY;
6893 }
6894
6895 fp->state = BNX2X_FP_STATE_IRQ;
6896 }
6897
6898 i = BNX2X_NUM_QUEUES(bp);
6899 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6900 " ... fp[%d] %d\n",
6901 bp->dev->name, bp->msix_table[0].vector,
6902 0, bp->msix_table[offset].vector,
6903 i - 1, bp->msix_table[offset + i - 1].vector);
6904
6905 return 0;
6906}
6907
6908static int bnx2x_enable_msi(struct bnx2x *bp)
6909{
6910 int rc;
6911
6912 rc = pci_enable_msi(bp->pdev);
6913 if (rc) {
6914 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6915 return -1;
6916 }
6917 bp->flags |= USING_MSI_FLAG;
6918
6919 return 0;
6920}
6921
6922static int bnx2x_req_irq(struct bnx2x *bp)
6923{
6924 unsigned long flags;
6925 int rc;
6926
6927 if (bp->flags & USING_MSI_FLAG)
6928 flags = 0;
6929 else
6930 flags = IRQF_SHARED;
6931
6932 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6933 bp->dev->name, bp->dev);
6934 if (!rc)
6935 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6936
6937 return rc;
6938}
6939
6940static void bnx2x_napi_enable(struct bnx2x *bp)
6941{
6942 int i;
6943
6944 for_each_rx_queue(bp, i)
6945 napi_enable(&bnx2x_fp(bp, i, napi));
6946}
6947
6948static void bnx2x_napi_disable(struct bnx2x *bp)
6949{
6950 int i;
6951
6952 for_each_rx_queue(bp, i)
6953 napi_disable(&bnx2x_fp(bp, i, napi));
6954}
6955
6956static void bnx2x_netif_start(struct bnx2x *bp)
6957{
6958 int intr_sem;
6959
6960 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6961 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6962
6963 if (intr_sem) {
6964 if (netif_running(bp->dev)) {
6965 bnx2x_napi_enable(bp);
6966 bnx2x_int_enable(bp);
6967 if (bp->state == BNX2X_STATE_OPEN)
6968 netif_tx_wake_all_queues(bp->dev);
6969 }
6970 }
6971}
6972
6973static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6974{
6975 bnx2x_int_disable_sync(bp, disable_hw);
6976 bnx2x_napi_disable(bp);
6977 netif_tx_disable(bp->dev);
6978 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6979}
6980
6981/*
6982 * Init service functions
6983 */
6984
6985static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6986{
6987 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6988 int port = BP_PORT(bp);
6989
6990 /* CAM allocation
6991 * unicasts 0-31:port0 32-63:port1
6992 * multicast 64-127:port0 128-191:port1
6993 */
6994 config->hdr.length = 2;
6995 config->hdr.offset = port ? 32 : 0;
6996 config->hdr.client_id = bp->fp->cl_id;
6997 config->hdr.reserved1 = 0;
6998
6999 /* primary MAC */
7000 config->config_table[0].cam_entry.msb_mac_addr =
7001 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7002 config->config_table[0].cam_entry.middle_mac_addr =
7003 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7004 config->config_table[0].cam_entry.lsb_mac_addr =
7005 swab16(*(u16 *)&bp->dev->dev_addr[4]);
7006 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7007 if (set)
7008 config->config_table[0].target_table_entry.flags = 0;
7009 else
7010 CAM_INVALIDATE(config->config_table[0]);
7011 config->config_table[0].target_table_entry.clients_bit_vector =
7012 cpu_to_le32(1 << BP_L_ID(bp));
7013 config->config_table[0].target_table_entry.vlan_id = 0;
7014
7015 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7016 (set ? "setting" : "clearing"),
7017 config->config_table[0].cam_entry.msb_mac_addr,
7018 config->config_table[0].cam_entry.middle_mac_addr,
7019 config->config_table[0].cam_entry.lsb_mac_addr);
7020
7021 /* broadcast */
7022 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7023 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7024 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
7025 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7026 if (set)
7027 config->config_table[1].target_table_entry.flags =
7028 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7029 else
7030 CAM_INVALIDATE(config->config_table[1]);
7031 config->config_table[1].target_table_entry.clients_bit_vector =
7032 cpu_to_le32(1 << BP_L_ID(bp));
7033 config->config_table[1].target_table_entry.vlan_id = 0;
7034
7035 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7036 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7037 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7038}
7039
7040static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
7041{
7042 struct mac_configuration_cmd_e1h *config =
7043 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7044
7045 /* CAM allocation for E1H
7046 * unicasts: by func number
7047 * multicast: 20+FUNC*20, 20 each
7048 */
7049 config->hdr.length = 1;
7050 config->hdr.offset = BP_FUNC(bp);
7051 config->hdr.client_id = bp->fp->cl_id;
7052 config->hdr.reserved1 = 0;
7053
7054 /* primary MAC */
7055 config->config_table[0].msb_mac_addr =
7056 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7057 config->config_table[0].middle_mac_addr =
7058 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7059 config->config_table[0].lsb_mac_addr =
7060 swab16(*(u16 *)&bp->dev->dev_addr[4]);
7061 config->config_table[0].clients_bit_vector =
7062 cpu_to_le32(1 << BP_L_ID(bp));
7063 config->config_table[0].vlan_id = 0;
7064 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7065 if (set)
7066 config->config_table[0].flags = BP_PORT(bp);
7067 else
7068 config->config_table[0].flags =
7069 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7070
7071 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
7072 (set ? "setting" : "clearing"),
7073 config->config_table[0].msb_mac_addr,
7074 config->config_table[0].middle_mac_addr,
7075 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7076
7077 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7078 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7079 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7080}
7081
7082static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7083 int *state_p, int poll)
7084{
7085 /* can take a while if any port is running */
7086 int cnt = 5000;
7087
7088 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7089 poll ? "polling" : "waiting", state, idx);
7090
7091 might_sleep();
7092 while (cnt--) {
7093 if (poll) {
7094 bnx2x_rx_int(bp->fp, 10);
7095 /* if index is different from 0
7096 * the reply for some commands will
7097 * be on the non default queue
7098 */
7099 if (idx)
7100 bnx2x_rx_int(&bp->fp[idx], 10);
7101 }
7102
7103 mb(); /* state is changed by bnx2x_sp_event() */
7104 if (*state_p == state) {
7105#ifdef BNX2X_STOP_ON_ERROR
7106 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7107#endif
7108 return 0;
7109 }
7110
7111 msleep(1);
7112 }
7113
7114 /* timeout! */
7115 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7116 poll ? "polling" : "waiting", state, idx);
7117#ifdef BNX2X_STOP_ON_ERROR
7118 bnx2x_panic();
7119#endif
7120
7121 return -EBUSY;
7122}
7123
7124static int bnx2x_setup_leading(struct bnx2x *bp)
7125{
7126 int rc;
7127
7128 /* reset IGU state */
7129 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7130
7131 /* SETUP ramrod */
7132 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7133
7134 /* Wait for completion */
7135 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7136
7137 return rc;
7138}
7139
7140static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7141{
7142 struct bnx2x_fastpath *fp = &bp->fp[index];
7143
7144 /* reset IGU state */
7145 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7146
7147 /* SETUP ramrod */
7148 fp->state = BNX2X_FP_STATE_OPENING;
7149 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7150 fp->cl_id, 0);
7151
7152 /* Wait for completion */
7153 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7154 &(fp->state), 0);
7155}
7156
7157static int bnx2x_poll(struct napi_struct *napi, int budget);
7158
7159static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7160 int *num_tx_queues_out)
7161{
7162 int _num_rx_queues = 0, _num_tx_queues = 0;
7163
7164 switch (bp->multi_mode) {
7165 case ETH_RSS_MODE_DISABLED:
7166 _num_rx_queues = 1;
7167 _num_tx_queues = 1;
7168 break;
7169
7170 case ETH_RSS_MODE_REGULAR:
7171 if (num_rx_queues)
7172 _num_rx_queues = min_t(u32, num_rx_queues,
7173 BNX2X_MAX_QUEUES(bp));
7174 else
7175 _num_rx_queues = min_t(u32, num_online_cpus(),
7176 BNX2X_MAX_QUEUES(bp));
7177
7178 if (num_tx_queues)
7179 _num_tx_queues = min_t(u32, num_tx_queues,
7180 BNX2X_MAX_QUEUES(bp));
7181 else
7182 _num_tx_queues = min_t(u32, num_online_cpus(),
7183 BNX2X_MAX_QUEUES(bp));
7184
7185 /* There must be not more Tx queues than Rx queues */
7186 if (_num_tx_queues > _num_rx_queues) {
7187 BNX2X_ERR("number of tx queues (%d) > "
7188 "number of rx queues (%d)"
7189 " defaulting to %d\n",
7190 _num_tx_queues, _num_rx_queues,
7191 _num_rx_queues);
7192 _num_tx_queues = _num_rx_queues;
7193 }
7194 break;
7195
7196
7197 default:
7198 _num_rx_queues = 1;
7199 _num_tx_queues = 1;
7200 break;
7201 }
7202
7203 *num_rx_queues_out = _num_rx_queues;
7204 *num_tx_queues_out = _num_tx_queues;
7205}
7206
7207static int bnx2x_set_int_mode(struct bnx2x *bp)
7208{
7209 int rc = 0;
7210
7211 switch (int_mode) {
7212 case INT_MODE_INTx:
7213 case INT_MODE_MSI:
7214 bp->num_rx_queues = 1;
7215 bp->num_tx_queues = 1;
7216 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7217 break;
7218
7219 case INT_MODE_MSIX:
7220 default:
7221 /* Set interrupt mode according to bp->multi_mode value */
7222 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7223 &bp->num_tx_queues);
7224
7225 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7226 bp->num_rx_queues, bp->num_tx_queues);
7227
7228 /* if we can't use MSI-X we only need one fp,
7229 * so try to enable MSI-X with the requested number of fp's
7230 * and fallback to MSI or legacy INTx with one fp
7231 */
7232 rc = bnx2x_enable_msix(bp);
7233 if (rc) {
7234 /* failed to enable MSI-X */
7235 if (bp->multi_mode)
7236 BNX2X_ERR("Multi requested but failed to "
7237 "enable MSI-X (rx %d tx %d), "
7238 "set number of queues to 1\n",
7239 bp->num_rx_queues, bp->num_tx_queues);
7240 bp->num_rx_queues = 1;
7241 bp->num_tx_queues = 1;
7242 }
7243 break;
7244 }
7245 bp->dev->real_num_tx_queues = bp->num_tx_queues;
7246 return rc;
7247}
7248
7249
7250/* must be called with rtnl_lock */
7251static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7252{
7253 u32 load_code;
7254 int i, rc;
7255
7256#ifdef BNX2X_STOP_ON_ERROR
7257 if (unlikely(bp->panic))
7258 return -EPERM;
7259#endif
7260
7261 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7262
7263 rc = bnx2x_set_int_mode(bp);
7264
7265 if (bnx2x_alloc_mem(bp))
7266 return -ENOMEM;
7267
7268 for_each_rx_queue(bp, i)
7269 bnx2x_fp(bp, i, disable_tpa) =
7270 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7271
7272 for_each_rx_queue(bp, i)
7273 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7274 bnx2x_poll, 128);
7275
7276 bnx2x_napi_enable(bp);
7277
7278 if (bp->flags & USING_MSIX_FLAG) {
7279 rc = bnx2x_req_msix_irqs(bp);
7280 if (rc) {
7281 pci_disable_msix(bp->pdev);
7282 goto load_error1;
7283 }
7284 } else {
7285 /* Fall to INTx if failed to enable MSI-X due to lack of
7286 memory (in bnx2x_set_int_mode()) */
7287 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7288 bnx2x_enable_msi(bp);
7289 bnx2x_ack_int(bp);
7290 rc = bnx2x_req_irq(bp);
7291 if (rc) {
7292 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7293 if (bp->flags & USING_MSI_FLAG)
7294 pci_disable_msi(bp->pdev);
7295 goto load_error1;
7296 }
7297 if (bp->flags & USING_MSI_FLAG) {
7298 bp->dev->irq = bp->pdev->irq;
7299 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7300 bp->dev->name, bp->pdev->irq);
7301 }
7302 }
7303
7304 /* Send LOAD_REQUEST command to MCP
7305 Returns the type of LOAD command:
7306 if it is the first port to be initialized
7307 common blocks should be initialized, otherwise - not
7308 */
7309 if (!BP_NOMCP(bp)) {
7310 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7311 if (!load_code) {
7312 BNX2X_ERR("MCP response failure, aborting\n");
7313 rc = -EBUSY;
7314 goto load_error2;
7315 }
7316 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7317 rc = -EBUSY; /* other port in diagnostic mode */
7318 goto load_error2;
7319 }
7320
7321 } else {
7322 int port = BP_PORT(bp);
7323
7324 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7325 load_count[0], load_count[1], load_count[2]);
7326 load_count[0]++;
7327 load_count[1 + port]++;
7328 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7329 load_count[0], load_count[1], load_count[2]);
7330 if (load_count[0] == 1)
7331 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7332 else if (load_count[1 + port] == 1)
7333 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7334 else
7335 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7336 }
7337
7338 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7339 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7340 bp->port.pmf = 1;
7341 else
7342 bp->port.pmf = 0;
7343 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7344
7345 /* Initialize HW */
7346 rc = bnx2x_init_hw(bp, load_code);
7347 if (rc) {
7348 BNX2X_ERR("HW init failed, aborting\n");
7349 goto load_error2;
7350 }
7351
7352 /* Setup NIC internals and enable interrupts */
7353 bnx2x_nic_init(bp, load_code);
7354
7355 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7356 (bp->common.shmem2_base))
7357 SHMEM2_WR(bp, dcc_support,
7358 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7359 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7360
7361 /* Send LOAD_DONE command to MCP */
7362 if (!BP_NOMCP(bp)) {
7363 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7364 if (!load_code) {
7365 BNX2X_ERR("MCP response failure, aborting\n");
7366 rc = -EBUSY;
7367 goto load_error3;
7368 }
7369 }
7370
7371 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7372
7373 rc = bnx2x_setup_leading(bp);
7374 if (rc) {
7375 BNX2X_ERR("Setup leading failed!\n");
7376 goto load_error3;
7377 }
7378
7379 if (CHIP_IS_E1H(bp))
7380 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7381 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7382 bp->state = BNX2X_STATE_DISABLED;
7383 }
7384
7385 if (bp->state == BNX2X_STATE_OPEN) {
7386 for_each_nondefault_queue(bp, i) {
7387 rc = bnx2x_setup_multi(bp, i);
7388 if (rc)
7389 goto load_error3;
7390 }
7391
7392 if (CHIP_IS_E1(bp))
7393 bnx2x_set_mac_addr_e1(bp, 1);
7394 else
7395 bnx2x_set_mac_addr_e1h(bp, 1);
7396 }
7397
7398 if (bp->port.pmf)
7399 bnx2x_initial_phy_init(bp, load_mode);
7400
7401 /* Start fast path */
7402 switch (load_mode) {
7403 case LOAD_NORMAL:
7404 if (bp->state == BNX2X_STATE_OPEN) {
7405 /* Tx queue should be only reenabled */
7406 netif_tx_wake_all_queues(bp->dev);
7407 }
7408 /* Initialize the receive filter. */
7409 bnx2x_set_rx_mode(bp->dev);
7410 break;
7411
7412 case LOAD_OPEN:
7413 netif_tx_start_all_queues(bp->dev);
7414 if (bp->state != BNX2X_STATE_OPEN)
7415 netif_tx_disable(bp->dev);
7416 /* Initialize the receive filter. */
7417 bnx2x_set_rx_mode(bp->dev);
7418 break;
7419
7420 case LOAD_DIAG:
7421 /* Initialize the receive filter. */
7422 bnx2x_set_rx_mode(bp->dev);
7423 bp->state = BNX2X_STATE_DIAG;
7424 break;
7425
7426 default:
7427 break;
7428 }
7429
7430 if (!bp->port.pmf)
7431 bnx2x__link_status_update(bp);
7432
7433 /* start the timer */
7434 mod_timer(&bp->timer, jiffies + bp->current_interval);
7435
7436
7437 return 0;
7438
7439load_error3:
7440 bnx2x_int_disable_sync(bp, 1);
7441 if (!BP_NOMCP(bp)) {
7442 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7443 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7444 }
7445 bp->port.pmf = 0;
7446 /* Free SKBs, SGEs, TPA pool and driver internals */
7447 bnx2x_free_skbs(bp);
7448 for_each_rx_queue(bp, i)
7449 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7450load_error2:
7451 /* Release IRQs */
7452 bnx2x_free_irq(bp);
7453load_error1:
7454 bnx2x_napi_disable(bp);
7455 for_each_rx_queue(bp, i)
7456 netif_napi_del(&bnx2x_fp(bp, i, napi));
7457 bnx2x_free_mem(bp);
7458
7459 return rc;
7460}
7461
7462static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7463{
7464 struct bnx2x_fastpath *fp = &bp->fp[index];
7465 int rc;
7466
7467 /* halt the connection */
7468 fp->state = BNX2X_FP_STATE_HALTING;
7469 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7470
7471 /* Wait for completion */
7472 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7473 &(fp->state), 1);
7474 if (rc) /* timeout */
7475 return rc;
7476
7477 /* delete cfc entry */
7478 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7479
7480 /* Wait for completion */
7481 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7482 &(fp->state), 1);
7483 return rc;
7484}
7485
7486static int bnx2x_stop_leading(struct bnx2x *bp)
7487{
7488 __le16 dsb_sp_prod_idx;
7489 /* if the other port is handling traffic,
7490 this can take a lot of time */
7491 int cnt = 500;
7492 int rc;
7493
7494 might_sleep();
7495
7496 /* Send HALT ramrod */
7497 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7498 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7499
7500 /* Wait for completion */
7501 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7502 &(bp->fp[0].state), 1);
7503 if (rc) /* timeout */
7504 return rc;
7505
7506 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7507
7508 /* Send PORT_DELETE ramrod */
7509 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7510
7511 /* Wait for completion to arrive on default status block
7512 we are going to reset the chip anyway
7513 so there is not much to do if this times out
7514 */
7515 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7516 if (!cnt) {
7517 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7518 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7519 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7520#ifdef BNX2X_STOP_ON_ERROR
7521 bnx2x_panic();
7522#endif
7523 rc = -EBUSY;
7524 break;
7525 }
7526 cnt--;
7527 msleep(1);
7528 rmb(); /* Refresh the dsb_sp_prod */
7529 }
7530 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7531 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7532
7533 return rc;
7534}
7535
7536static void bnx2x_reset_func(struct bnx2x *bp)
7537{
7538 int port = BP_PORT(bp);
7539 int func = BP_FUNC(bp);
7540 int base, i;
7541
7542 /* Configure IGU */
7543 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7544 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7545
7546 /* Clear ILT */
7547 base = FUNC_ILT_BASE(func);
7548 for (i = base; i < base + ILT_PER_FUNC; i++)
7549 bnx2x_ilt_wr(bp, i, 0);
7550}
7551
7552static void bnx2x_reset_port(struct bnx2x *bp)
7553{
7554 int port = BP_PORT(bp);
7555 u32 val;
7556
7557 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7558
7559 /* Do not rcv packets to BRB */
7560 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7561 /* Do not direct rcv packets that are not for MCP to the BRB */
7562 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7563 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7564
7565 /* Configure AEU */
7566 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7567
7568 msleep(100);
7569 /* Check for BRB port occupancy */
7570 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7571 if (val)
7572 DP(NETIF_MSG_IFDOWN,
7573 "BRB1 is not empty %d blocks are occupied\n", val);
7574
7575 /* TODO: Close Doorbell port? */
7576}
7577
7578static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7579{
7580 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7581 BP_FUNC(bp), reset_code);
7582
7583 switch (reset_code) {
7584 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7585 bnx2x_reset_port(bp);
7586 bnx2x_reset_func(bp);
7587 bnx2x_reset_common(bp);
7588 break;
7589
7590 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7591 bnx2x_reset_port(bp);
7592 bnx2x_reset_func(bp);
7593 break;
7594
7595 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7596 bnx2x_reset_func(bp);
7597 break;
7598
7599 default:
7600 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7601 break;
7602 }
7603}
7604
7605/* must be called with rtnl_lock */
7606static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7607{
7608 int port = BP_PORT(bp);
7609 u32 reset_code = 0;
7610 int i, cnt, rc;
7611
7612 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7613
7614 bp->rx_mode = BNX2X_RX_MODE_NONE;
7615 bnx2x_set_storm_rx_mode(bp);
7616
7617 bnx2x_netif_stop(bp, 1);
7618
7619 del_timer_sync(&bp->timer);
7620 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7621 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7622 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7623
7624 /* Release IRQs */
7625 bnx2x_free_irq(bp);
7626
7627 /* Wait until tx fastpath tasks complete */
7628 for_each_tx_queue(bp, i) {
7629 struct bnx2x_fastpath *fp = &bp->fp[i];
7630
7631 cnt = 1000;
7632 while (bnx2x_has_tx_work_unload(fp)) {
7633
7634 bnx2x_tx_int(fp);
7635 if (!cnt) {
7636 BNX2X_ERR("timeout waiting for queue[%d]\n",
7637 i);
7638#ifdef BNX2X_STOP_ON_ERROR
7639 bnx2x_panic();
7640 return -EBUSY;
7641#else
7642 break;
7643#endif
7644 }
7645 cnt--;
7646 msleep(1);
7647 }
7648 }
7649 /* Give HW time to discard old tx messages */
7650 msleep(1);
7651
7652 if (CHIP_IS_E1(bp)) {
7653 struct mac_configuration_cmd *config =
7654 bnx2x_sp(bp, mcast_config);
7655
7656 bnx2x_set_mac_addr_e1(bp, 0);
7657
7658 for (i = 0; i < config->hdr.length; i++)
7659 CAM_INVALIDATE(config->config_table[i]);
7660
7661 config->hdr.length = i;
7662 if (CHIP_REV_IS_SLOW(bp))
7663 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7664 else
7665 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7666 config->hdr.client_id = bp->fp->cl_id;
7667 config->hdr.reserved1 = 0;
7668
7669 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7670 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7671 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7672
7673 } else { /* E1H */
7674 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7675
7676 bnx2x_set_mac_addr_e1h(bp, 0);
7677
7678 for (i = 0; i < MC_HASH_SIZE; i++)
7679 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7680
7681 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7682 }
7683
7684 if (unload_mode == UNLOAD_NORMAL)
7685 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7686
7687 else if (bp->flags & NO_WOL_FLAG)
7688 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7689
7690 else if (bp->wol) {
7691 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7692 u8 *mac_addr = bp->dev->dev_addr;
7693 u32 val;
7694 /* The mac address is written to entries 1-4 to
7695 preserve entry 0 which is used by the PMF */
7696 u8 entry = (BP_E1HVN(bp) + 1)*8;
7697
7698 val = (mac_addr[0] << 8) | mac_addr[1];
7699 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7700
7701 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7702 (mac_addr[4] << 8) | mac_addr[5];
7703 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7704
7705 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7706
7707 } else
7708 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7709
7710 /* Close multi and leading connections
7711 Completions for ramrods are collected in a synchronous way */
7712 for_each_nondefault_queue(bp, i)
7713 if (bnx2x_stop_multi(bp, i))
7714 goto unload_error;
7715
7716 rc = bnx2x_stop_leading(bp);
7717 if (rc) {
7718 BNX2X_ERR("Stop leading failed!\n");
7719#ifdef BNX2X_STOP_ON_ERROR
7720 return -EBUSY;
7721#else
7722 goto unload_error;
7723#endif
7724 }
7725
7726unload_error:
7727 if (!BP_NOMCP(bp))
7728 reset_code = bnx2x_fw_command(bp, reset_code);
7729 else {
7730 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7731 load_count[0], load_count[1], load_count[2]);
7732 load_count[0]--;
7733 load_count[1 + port]--;
7734 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7735 load_count[0], load_count[1], load_count[2]);
7736 if (load_count[0] == 0)
7737 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7738 else if (load_count[1 + port] == 0)
7739 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7740 else
7741 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7742 }
7743
7744 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7745 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7746 bnx2x__link_reset(bp);
7747
7748 /* Reset the chip */
7749 bnx2x_reset_chip(bp, reset_code);
7750
7751 /* Report UNLOAD_DONE to MCP */
7752 if (!BP_NOMCP(bp))
7753 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7754
7755 bp->port.pmf = 0;
7756
7757 /* Free SKBs, SGEs, TPA pool and driver internals */
7758 bnx2x_free_skbs(bp);
7759 for_each_rx_queue(bp, i)
7760 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7761 for_each_rx_queue(bp, i)
7762 netif_napi_del(&bnx2x_fp(bp, i, napi));
7763 bnx2x_free_mem(bp);
7764
7765 bp->state = BNX2X_STATE_CLOSED;
7766
7767 netif_carrier_off(bp->dev);
7768
7769 return 0;
7770}
7771
7772static void bnx2x_reset_task(struct work_struct *work)
7773{
7774 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7775
7776#ifdef BNX2X_STOP_ON_ERROR
7777 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7778 " so reset not done to allow debug dump,\n"
7779 " you will need to reboot when done\n");
7780 return;
7781#endif
7782
7783 rtnl_lock();
7784
7785 if (!netif_running(bp->dev))
7786 goto reset_task_exit;
7787
7788 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7789 bnx2x_nic_load(bp, LOAD_NORMAL);
7790
7791reset_task_exit:
7792 rtnl_unlock();
7793}
7794
7795/* end of nic load/unload */
7796
7797/* ethtool_ops */
7798
7799/*
7800 * Init service functions
7801 */
7802
7803static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7804{
7805 switch (func) {
7806 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7807 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7808 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7809 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7810 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7811 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7812 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7813 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7814 default:
7815 BNX2X_ERR("Unsupported function index: %d\n", func);
7816 return (u32)(-1);
7817 }
7818}
7819
7820static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7821{
7822 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7823
7824 /* Flush all outstanding writes */
7825 mmiowb();
7826
7827 /* Pretend to be function 0 */
7828 REG_WR(bp, reg, 0);
7829 /* Flush the GRC transaction (in the chip) */
7830 new_val = REG_RD(bp, reg);
7831 if (new_val != 0) {
7832 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7833 new_val);
7834 BUG();
7835 }
7836
7837 /* From now we are in the "like-E1" mode */
7838 bnx2x_int_disable(bp);
7839
7840 /* Flush all outstanding writes */
7841 mmiowb();
7842
7843 /* Restore the original funtion settings */
7844 REG_WR(bp, reg, orig_func);
7845 new_val = REG_RD(bp, reg);
7846 if (new_val != orig_func) {
7847 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7848 orig_func, new_val);
7849 BUG();
7850 }
7851}
7852
7853static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7854{
7855 if (CHIP_IS_E1H(bp))
7856 bnx2x_undi_int_disable_e1h(bp, func);
7857 else
7858 bnx2x_int_disable(bp);
7859}
7860
7861static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7862{
7863 u32 val;
7864
7865 /* Check if there is any driver already loaded */
7866 val = REG_RD(bp, MISC_REG_UNPREPARED);
7867 if (val == 0x1) {
7868 /* Check if it is the UNDI driver
7869 * UNDI driver initializes CID offset for normal bell to 0x7
7870 */
7871 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7872 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7873 if (val == 0x7) {
7874 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7875 /* save our func */
7876 int func = BP_FUNC(bp);
7877 u32 swap_en;
7878 u32 swap_val;
7879
7880 /* clear the UNDI indication */
7881 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7882
7883 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7884
7885 /* try unload UNDI on port 0 */
7886 bp->func = 0;
7887 bp->fw_seq =
7888 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7889 DRV_MSG_SEQ_NUMBER_MASK);
7890 reset_code = bnx2x_fw_command(bp, reset_code);
7891
7892 /* if UNDI is loaded on the other port */
7893 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7894
7895 /* send "DONE" for previous unload */
7896 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7897
7898 /* unload UNDI on port 1 */
7899 bp->func = 1;
7900 bp->fw_seq =
7901 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7902 DRV_MSG_SEQ_NUMBER_MASK);
7903 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7904
7905 bnx2x_fw_command(bp, reset_code);
7906 }
7907
7908 /* now it's safe to release the lock */
7909 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7910
7911 bnx2x_undi_int_disable(bp, func);
7912
7913 /* close input traffic and wait for it */
7914 /* Do not rcv packets to BRB */
7915 REG_WR(bp,
7916 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7917 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7918 /* Do not direct rcv packets that are not for MCP to
7919 * the BRB */
7920 REG_WR(bp,
7921 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7922 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7923 /* clear AEU */
7924 REG_WR(bp,
7925 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7926 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7927 msleep(10);
7928
7929 /* save NIG port swap info */
7930 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7931 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7932 /* reset device */
7933 REG_WR(bp,
7934 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7935 0xd3ffffff);
7936 REG_WR(bp,
7937 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7938 0x1403);
7939 /* take the NIG out of reset and restore swap values */
7940 REG_WR(bp,
7941 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7942 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7943 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7944 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7945
7946 /* send unload done to the MCP */
7947 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7948
7949 /* restore our func and fw_seq */
7950 bp->func = func;
7951 bp->fw_seq =
7952 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7953 DRV_MSG_SEQ_NUMBER_MASK);
7954
7955 } else
7956 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7957 }
7958}
7959
7960static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7961{
7962 u32 val, val2, val3, val4, id;
7963 u16 pmc;
7964
7965 /* Get the chip revision id and number. */
7966 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7967 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7968 id = ((val & 0xffff) << 16);
7969 val = REG_RD(bp, MISC_REG_CHIP_REV);
7970 id |= ((val & 0xf) << 12);
7971 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7972 id |= ((val & 0xff) << 4);
7973 val = REG_RD(bp, MISC_REG_BOND_ID);
7974 id |= (val & 0xf);
7975 bp->common.chip_id = id;
7976 bp->link_params.chip_id = bp->common.chip_id;
7977 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7978
7979 val = (REG_RD(bp, 0x2874) & 0x55);
7980 if ((bp->common.chip_id & 0x1) ||
7981 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7982 bp->flags |= ONE_PORT_FLAG;
7983 BNX2X_DEV_INFO("single port device\n");
7984 }
7985
7986 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7987 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7988 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7989 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7990 bp->common.flash_size, bp->common.flash_size);
7991
7992 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7993 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
7994 bp->link_params.shmem_base = bp->common.shmem_base;
7995 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7996 bp->common.shmem_base, bp->common.shmem2_base);
7997
7998 if (!bp->common.shmem_base ||
7999 (bp->common.shmem_base < 0xA0000) ||
8000 (bp->common.shmem_base >= 0xC0000)) {
8001 BNX2X_DEV_INFO("MCP not active\n");
8002 bp->flags |= NO_MCP_FLAG;
8003 return;
8004 }
8005
8006 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8007 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8008 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8009 BNX2X_ERR("BAD MCP validity signature\n");
8010
8011 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8012 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8013
8014 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8015 SHARED_HW_CFG_LED_MODE_MASK) >>
8016 SHARED_HW_CFG_LED_MODE_SHIFT);
8017
8018 bp->link_params.feature_config_flags = 0;
8019 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8020 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8021 bp->link_params.feature_config_flags |=
8022 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8023 else
8024 bp->link_params.feature_config_flags &=
8025 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8026
8027 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8028 bp->common.bc_ver = val;
8029 BNX2X_DEV_INFO("bc_ver %X\n", val);
8030 if (val < BNX2X_BC_VER) {
8031 /* for now only warn
8032 * later we might need to enforce this */
8033 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8034 " please upgrade BC\n", BNX2X_BC_VER, val);
8035 }
8036 bp->link_params.feature_config_flags |=
8037 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8038 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8039
8040 if (BP_E1HVN(bp) == 0) {
8041 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8042 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8043 } else {
8044 /* no WOL capability for E1HVN != 0 */
8045 bp->flags |= NO_WOL_FLAG;
8046 }
8047 BNX2X_DEV_INFO("%sWoL capable\n",
8048 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8049
8050 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8051 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8052 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8053 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8054
8055 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8056 val, val2, val3, val4);
8057}
8058
8059static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8060 u32 switch_cfg)
8061{
8062 int port = BP_PORT(bp);
8063 u32 ext_phy_type;
8064
8065 switch (switch_cfg) {
8066 case SWITCH_CFG_1G:
8067 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8068
8069 ext_phy_type =
8070 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8071 switch (ext_phy_type) {
8072 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8073 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8074 ext_phy_type);
8075
8076 bp->port.supported |= (SUPPORTED_10baseT_Half |
8077 SUPPORTED_10baseT_Full |
8078 SUPPORTED_100baseT_Half |
8079 SUPPORTED_100baseT_Full |
8080 SUPPORTED_1000baseT_Full |
8081 SUPPORTED_2500baseX_Full |
8082 SUPPORTED_TP |
8083 SUPPORTED_FIBRE |
8084 SUPPORTED_Autoneg |
8085 SUPPORTED_Pause |
8086 SUPPORTED_Asym_Pause);
8087 break;
8088
8089 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8090 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8091 ext_phy_type);
8092
8093 bp->port.supported |= (SUPPORTED_10baseT_Half |
8094 SUPPORTED_10baseT_Full |
8095 SUPPORTED_100baseT_Half |
8096 SUPPORTED_100baseT_Full |
8097 SUPPORTED_1000baseT_Full |
8098 SUPPORTED_TP |
8099 SUPPORTED_FIBRE |
8100 SUPPORTED_Autoneg |
8101 SUPPORTED_Pause |
8102 SUPPORTED_Asym_Pause);
8103 break;
8104
8105 default:
8106 BNX2X_ERR("NVRAM config error. "
8107 "BAD SerDes ext_phy_config 0x%x\n",
8108 bp->link_params.ext_phy_config);
8109 return;
8110 }
8111
8112 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8113 port*0x10);
8114 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8115 break;
8116
8117 case SWITCH_CFG_10G:
8118 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8119
8120 ext_phy_type =
8121 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8122 switch (ext_phy_type) {
8123 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8124 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8125 ext_phy_type);
8126
8127 bp->port.supported |= (SUPPORTED_10baseT_Half |
8128 SUPPORTED_10baseT_Full |
8129 SUPPORTED_100baseT_Half |
8130 SUPPORTED_100baseT_Full |
8131 SUPPORTED_1000baseT_Full |
8132 SUPPORTED_2500baseX_Full |
8133 SUPPORTED_10000baseT_Full |
8134 SUPPORTED_TP |
8135 SUPPORTED_FIBRE |
8136 SUPPORTED_Autoneg |
8137 SUPPORTED_Pause |
8138 SUPPORTED_Asym_Pause);
8139 break;
8140
8141 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8142 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8143 ext_phy_type);
8144
8145 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8146 SUPPORTED_1000baseT_Full |
8147 SUPPORTED_FIBRE |
8148 SUPPORTED_Autoneg |
8149 SUPPORTED_Pause |
8150 SUPPORTED_Asym_Pause);
8151 break;
8152
8153 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8154 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8155 ext_phy_type);
8156
8157 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8158 SUPPORTED_2500baseX_Full |
8159 SUPPORTED_1000baseT_Full |
8160 SUPPORTED_FIBRE |
8161 SUPPORTED_Autoneg |
8162 SUPPORTED_Pause |
8163 SUPPORTED_Asym_Pause);
8164 break;
8165
8166 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8167 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8168 ext_phy_type);
8169
8170 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8171 SUPPORTED_FIBRE |
8172 SUPPORTED_Pause |
8173 SUPPORTED_Asym_Pause);
8174 break;
8175
8176 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8177 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8178 ext_phy_type);
8179
8180 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8181 SUPPORTED_1000baseT_Full |
8182 SUPPORTED_FIBRE |
8183 SUPPORTED_Pause |
8184 SUPPORTED_Asym_Pause);
8185 break;
8186
8187 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8188 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8189 ext_phy_type);
8190
8191 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8192 SUPPORTED_1000baseT_Full |
8193 SUPPORTED_Autoneg |
8194 SUPPORTED_FIBRE |
8195 SUPPORTED_Pause |
8196 SUPPORTED_Asym_Pause);
8197 break;
8198
8199 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8200 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8201 ext_phy_type);
8202
8203 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8204 SUPPORTED_1000baseT_Full |
8205 SUPPORTED_Autoneg |
8206 SUPPORTED_FIBRE |
8207 SUPPORTED_Pause |
8208 SUPPORTED_Asym_Pause);
8209 break;
8210
8211 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8212 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8213 ext_phy_type);
8214
8215 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8216 SUPPORTED_TP |
8217 SUPPORTED_Autoneg |
8218 SUPPORTED_Pause |
8219 SUPPORTED_Asym_Pause);
8220 break;
8221
8222 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8223 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8224 ext_phy_type);
8225
8226 bp->port.supported |= (SUPPORTED_10baseT_Half |
8227 SUPPORTED_10baseT_Full |
8228 SUPPORTED_100baseT_Half |
8229 SUPPORTED_100baseT_Full |
8230 SUPPORTED_1000baseT_Full |
8231 SUPPORTED_10000baseT_Full |
8232 SUPPORTED_TP |
8233 SUPPORTED_Autoneg |
8234 SUPPORTED_Pause |
8235 SUPPORTED_Asym_Pause);
8236 break;
8237
8238 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8239 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8240 bp->link_params.ext_phy_config);
8241 break;
8242
8243 default:
8244 BNX2X_ERR("NVRAM config error. "
8245 "BAD XGXS ext_phy_config 0x%x\n",
8246 bp->link_params.ext_phy_config);
8247 return;
8248 }
8249
8250 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8251 port*0x18);
8252 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8253
8254 break;
8255
8256 default:
8257 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8258 bp->port.link_config);
8259 return;
8260 }
8261 bp->link_params.phy_addr = bp->port.phy_addr;
8262
8263 /* mask what we support according to speed_cap_mask */
8264 if (!(bp->link_params.speed_cap_mask &
8265 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8266 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8267
8268 if (!(bp->link_params.speed_cap_mask &
8269 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8270 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8271
8272 if (!(bp->link_params.speed_cap_mask &
8273 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8274 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8275
8276 if (!(bp->link_params.speed_cap_mask &
8277 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8278 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8279
8280 if (!(bp->link_params.speed_cap_mask &
8281 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8282 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8283 SUPPORTED_1000baseT_Full);
8284
8285 if (!(bp->link_params.speed_cap_mask &
8286 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8287 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8288
8289 if (!(bp->link_params.speed_cap_mask &
8290 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8291 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8292
8293 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8294}
8295
8296static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8297{
8298 bp->link_params.req_duplex = DUPLEX_FULL;
8299
8300 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8301 case PORT_FEATURE_LINK_SPEED_AUTO:
8302 if (bp->port.supported & SUPPORTED_Autoneg) {
8303 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8304 bp->port.advertising = bp->port.supported;
8305 } else {
8306 u32 ext_phy_type =
8307 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8308
8309 if ((ext_phy_type ==
8310 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8311 (ext_phy_type ==
8312 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8313 /* force 10G, no AN */
8314 bp->link_params.req_line_speed = SPEED_10000;
8315 bp->port.advertising =
8316 (ADVERTISED_10000baseT_Full |
8317 ADVERTISED_FIBRE);
8318 break;
8319 }
8320 BNX2X_ERR("NVRAM config error. "
8321 "Invalid link_config 0x%x"
8322 " Autoneg not supported\n",
8323 bp->port.link_config);
8324 return;
8325 }
8326 break;
8327
8328 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8329 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8330 bp->link_params.req_line_speed = SPEED_10;
8331 bp->port.advertising = (ADVERTISED_10baseT_Full |
8332 ADVERTISED_TP);
8333 } else {
8334 BNX2X_ERR("NVRAM config error. "
8335 "Invalid link_config 0x%x"
8336 " speed_cap_mask 0x%x\n",
8337 bp->port.link_config,
8338 bp->link_params.speed_cap_mask);
8339 return;
8340 }
8341 break;
8342
8343 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8344 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8345 bp->link_params.req_line_speed = SPEED_10;
8346 bp->link_params.req_duplex = DUPLEX_HALF;
8347 bp->port.advertising = (ADVERTISED_10baseT_Half |
8348 ADVERTISED_TP);
8349 } else {
8350 BNX2X_ERR("NVRAM config error. "
8351 "Invalid link_config 0x%x"
8352 " speed_cap_mask 0x%x\n",
8353 bp->port.link_config,
8354 bp->link_params.speed_cap_mask);
8355 return;
8356 }
8357 break;
8358
8359 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8360 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8361 bp->link_params.req_line_speed = SPEED_100;
8362 bp->port.advertising = (ADVERTISED_100baseT_Full |
8363 ADVERTISED_TP);
8364 } else {
8365 BNX2X_ERR("NVRAM config error. "
8366 "Invalid link_config 0x%x"
8367 " speed_cap_mask 0x%x\n",
8368 bp->port.link_config,
8369 bp->link_params.speed_cap_mask);
8370 return;
8371 }
8372 break;
8373
8374 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8375 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8376 bp->link_params.req_line_speed = SPEED_100;
8377 bp->link_params.req_duplex = DUPLEX_HALF;
8378 bp->port.advertising = (ADVERTISED_100baseT_Half |
8379 ADVERTISED_TP);
8380 } else {
8381 BNX2X_ERR("NVRAM config error. "
8382 "Invalid link_config 0x%x"
8383 " speed_cap_mask 0x%x\n",
8384 bp->port.link_config,
8385 bp->link_params.speed_cap_mask);
8386 return;
8387 }
8388 break;
8389
8390 case PORT_FEATURE_LINK_SPEED_1G:
8391 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8392 bp->link_params.req_line_speed = SPEED_1000;
8393 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8394 ADVERTISED_TP);
8395 } else {
8396 BNX2X_ERR("NVRAM config error. "
8397 "Invalid link_config 0x%x"
8398 " speed_cap_mask 0x%x\n",
8399 bp->port.link_config,
8400 bp->link_params.speed_cap_mask);
8401 return;
8402 }
8403 break;
8404
8405 case PORT_FEATURE_LINK_SPEED_2_5G:
8406 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8407 bp->link_params.req_line_speed = SPEED_2500;
8408 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8409 ADVERTISED_TP);
8410 } else {
8411 BNX2X_ERR("NVRAM config error. "
8412 "Invalid link_config 0x%x"
8413 " speed_cap_mask 0x%x\n",
8414 bp->port.link_config,
8415 bp->link_params.speed_cap_mask);
8416 return;
8417 }
8418 break;
8419
8420 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8421 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8422 case PORT_FEATURE_LINK_SPEED_10G_KR:
8423 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8424 bp->link_params.req_line_speed = SPEED_10000;
8425 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8426 ADVERTISED_FIBRE);
8427 } else {
8428 BNX2X_ERR("NVRAM config error. "
8429 "Invalid link_config 0x%x"
8430 " speed_cap_mask 0x%x\n",
8431 bp->port.link_config,
8432 bp->link_params.speed_cap_mask);
8433 return;
8434 }
8435 break;
8436
8437 default:
8438 BNX2X_ERR("NVRAM config error. "
8439 "BAD link speed link_config 0x%x\n",
8440 bp->port.link_config);
8441 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8442 bp->port.advertising = bp->port.supported;
8443 break;
8444 }
8445
8446 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8447 PORT_FEATURE_FLOW_CONTROL_MASK);
8448 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8449 !(bp->port.supported & SUPPORTED_Autoneg))
8450 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8451
8452 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8453 " advertising 0x%x\n",
8454 bp->link_params.req_line_speed,
8455 bp->link_params.req_duplex,
8456 bp->link_params.req_flow_ctrl, bp->port.advertising);
8457}
8458
8459static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8460{
8461 int port = BP_PORT(bp);
8462 u32 val, val2;
8463 u32 config;
8464 u16 i;
8465 u32 ext_phy_type;
8466
8467 bp->link_params.bp = bp;
8468 bp->link_params.port = port;
8469
8470 bp->link_params.lane_config =
8471 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8472 bp->link_params.ext_phy_config =
8473 SHMEM_RD(bp,
8474 dev_info.port_hw_config[port].external_phy_config);
8475 /* BCM8727_NOC => BCM8727 no over current */
8476 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8477 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8478 bp->link_params.ext_phy_config &=
8479 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8480 bp->link_params.ext_phy_config |=
8481 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8482 bp->link_params.feature_config_flags |=
8483 FEATURE_CONFIG_BCM8727_NOC;
8484 }
8485
8486 bp->link_params.speed_cap_mask =
8487 SHMEM_RD(bp,
8488 dev_info.port_hw_config[port].speed_capability_mask);
8489
8490 bp->port.link_config =
8491 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8492
8493 /* Get the 4 lanes xgxs config rx and tx */
8494 for (i = 0; i < 2; i++) {
8495 val = SHMEM_RD(bp,
8496 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8497 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8498 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8499
8500 val = SHMEM_RD(bp,
8501 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8502 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8503 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8504 }
8505
8506 /* If the device is capable of WoL, set the default state according
8507 * to the HW
8508 */
8509 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8510 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8511 (config & PORT_FEATURE_WOL_ENABLED));
8512
8513 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8514 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8515 bp->link_params.lane_config,
8516 bp->link_params.ext_phy_config,
8517 bp->link_params.speed_cap_mask, bp->port.link_config);
8518
8519 bp->link_params.switch_cfg |= (bp->port.link_config &
8520 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8521 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8522
8523 bnx2x_link_settings_requested(bp);
8524
8525 /*
8526 * If connected directly, work with the internal PHY, otherwise, work
8527 * with the external PHY
8528 */
8529 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8530 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8531 bp->mdio.prtad = bp->link_params.phy_addr;
8532
8533 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8534 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8535 bp->mdio.prtad =
8536 (bp->link_params.ext_phy_config &
8537 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
8538 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
8539
8540 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8541 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8542 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8543 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8544 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8545 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8546 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8547 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8548 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8549 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8550}
8551
8552static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8553{
8554 int func = BP_FUNC(bp);
8555 u32 val, val2;
8556 int rc = 0;
8557
8558 bnx2x_get_common_hwinfo(bp);
8559
8560 bp->e1hov = 0;
8561 bp->e1hmf = 0;
8562 if (CHIP_IS_E1H(bp)) {
8563 bp->mf_config =
8564 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8565
8566 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8567 FUNC_MF_CFG_E1HOV_TAG_MASK);
8568 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8569 bp->e1hmf = 1;
8570 BNX2X_DEV_INFO("%s function mode\n",
8571 IS_E1HMF(bp) ? "multi" : "single");
8572
8573 if (IS_E1HMF(bp)) {
8574 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8575 e1hov_tag) &
8576 FUNC_MF_CFG_E1HOV_TAG_MASK);
8577 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8578 bp->e1hov = val;
8579 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8580 "(0x%04x)\n",
8581 func, bp->e1hov, bp->e1hov);
8582 } else {
8583 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8584 " aborting\n", func);
8585 rc = -EPERM;
8586 }
8587 } else {
8588 if (BP_E1HVN(bp)) {
8589 BNX2X_ERR("!!! VN %d in single function mode,"
8590 " aborting\n", BP_E1HVN(bp));
8591 rc = -EPERM;
8592 }
8593 }
8594 }
8595
8596 if (!BP_NOMCP(bp)) {
8597 bnx2x_get_port_hwinfo(bp);
8598
8599 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8600 DRV_MSG_SEQ_NUMBER_MASK);
8601 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8602 }
8603
8604 if (IS_E1HMF(bp)) {
8605 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8606 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8607 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8608 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8609 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8610 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8611 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8612 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8613 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8614 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8615 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8616 ETH_ALEN);
8617 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8618 ETH_ALEN);
8619 }
8620
8621 return rc;
8622 }
8623
8624 if (BP_NOMCP(bp)) {
8625 /* only supposed to happen on emulation/FPGA */
8626 BNX2X_ERR("warning random MAC workaround active\n");
8627 random_ether_addr(bp->dev->dev_addr);
8628 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8629 }
8630
8631 return rc;
8632}
8633
8634static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8635{
8636 int func = BP_FUNC(bp);
8637 int timer_interval;
8638 int rc;
8639
8640 /* Disable interrupt handling until HW is initialized */
8641 atomic_set(&bp->intr_sem, 1);
8642 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8643
8644 mutex_init(&bp->port.phy_mutex);
8645
8646 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8647 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8648
8649 rc = bnx2x_get_hwinfo(bp);
8650
8651 /* need to reset chip if undi was active */
8652 if (!BP_NOMCP(bp))
8653 bnx2x_undi_unload(bp);
8654
8655 if (CHIP_REV_IS_FPGA(bp))
8656 printk(KERN_ERR PFX "FPGA detected\n");
8657
8658 if (BP_NOMCP(bp) && (func == 0))
8659 printk(KERN_ERR PFX
8660 "MCP disabled, must load devices in order!\n");
8661
8662 /* Set multi queue mode */
8663 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8664 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8665 printk(KERN_ERR PFX
8666 "Multi disabled since int_mode requested is not MSI-X\n");
8667 multi_mode = ETH_RSS_MODE_DISABLED;
8668 }
8669 bp->multi_mode = multi_mode;
8670
8671
8672 /* Set TPA flags */
8673 if (disable_tpa) {
8674 bp->flags &= ~TPA_ENABLE_FLAG;
8675 bp->dev->features &= ~NETIF_F_LRO;
8676 } else {
8677 bp->flags |= TPA_ENABLE_FLAG;
8678 bp->dev->features |= NETIF_F_LRO;
8679 }
8680
8681 if (CHIP_IS_E1(bp))
8682 bp->dropless_fc = 0;
8683 else
8684 bp->dropless_fc = dropless_fc;
8685
8686 bp->mrrs = mrrs;
8687
8688 bp->tx_ring_size = MAX_TX_AVAIL;
8689 bp->rx_ring_size = MAX_RX_AVAIL;
8690
8691 bp->rx_csum = 1;
8692
8693 bp->tx_ticks = 50;
8694 bp->rx_ticks = 25;
8695
8696 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8697 bp->current_interval = (poll ? poll : timer_interval);
8698
8699 init_timer(&bp->timer);
8700 bp->timer.expires = jiffies + bp->current_interval;
8701 bp->timer.data = (unsigned long) bp;
8702 bp->timer.function = bnx2x_timer;
8703
8704 return rc;
8705}
8706
8707/*
8708 * ethtool service functions
8709 */
8710
8711/* All ethtool functions called with rtnl_lock */
8712
8713static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8714{
8715 struct bnx2x *bp = netdev_priv(dev);
8716
8717 cmd->supported = bp->port.supported;
8718 cmd->advertising = bp->port.advertising;
8719
8720 if (netif_carrier_ok(dev)) {
8721 cmd->speed = bp->link_vars.line_speed;
8722 cmd->duplex = bp->link_vars.duplex;
8723 } else {
8724 cmd->speed = bp->link_params.req_line_speed;
8725 cmd->duplex = bp->link_params.req_duplex;
8726 }
8727 if (IS_E1HMF(bp)) {
8728 u16 vn_max_rate;
8729
8730 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8731 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8732 if (vn_max_rate < cmd->speed)
8733 cmd->speed = vn_max_rate;
8734 }
8735
8736 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8737 u32 ext_phy_type =
8738 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8739
8740 switch (ext_phy_type) {
8741 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8742 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8743 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8744 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8745 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8746 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8747 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8748 cmd->port = PORT_FIBRE;
8749 break;
8750
8751 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8752 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8753 cmd->port = PORT_TP;
8754 break;
8755
8756 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8757 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8758 bp->link_params.ext_phy_config);
8759 break;
8760
8761 default:
8762 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8763 bp->link_params.ext_phy_config);
8764 break;
8765 }
8766 } else
8767 cmd->port = PORT_TP;
8768
8769 cmd->phy_address = bp->mdio.prtad;
8770 cmd->transceiver = XCVR_INTERNAL;
8771
8772 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8773 cmd->autoneg = AUTONEG_ENABLE;
8774 else
8775 cmd->autoneg = AUTONEG_DISABLE;
8776
8777 cmd->maxtxpkt = 0;
8778 cmd->maxrxpkt = 0;
8779
8780 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8781 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8782 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8783 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8784 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8785 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8786 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8787
8788 return 0;
8789}
8790
8791static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8792{
8793 struct bnx2x *bp = netdev_priv(dev);
8794 u32 advertising;
8795
8796 if (IS_E1HMF(bp))
8797 return 0;
8798
8799 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8800 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8801 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8802 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8803 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8804 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8805 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8806
8807 if (cmd->autoneg == AUTONEG_ENABLE) {
8808 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8809 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8810 return -EINVAL;
8811 }
8812
8813 /* advertise the requested speed and duplex if supported */
8814 cmd->advertising &= bp->port.supported;
8815
8816 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8817 bp->link_params.req_duplex = DUPLEX_FULL;
8818 bp->port.advertising |= (ADVERTISED_Autoneg |
8819 cmd->advertising);
8820
8821 } else { /* forced speed */
8822 /* advertise the requested speed and duplex if supported */
8823 switch (cmd->speed) {
8824 case SPEED_10:
8825 if (cmd->duplex == DUPLEX_FULL) {
8826 if (!(bp->port.supported &
8827 SUPPORTED_10baseT_Full)) {
8828 DP(NETIF_MSG_LINK,
8829 "10M full not supported\n");
8830 return -EINVAL;
8831 }
8832
8833 advertising = (ADVERTISED_10baseT_Full |
8834 ADVERTISED_TP);
8835 } else {
8836 if (!(bp->port.supported &
8837 SUPPORTED_10baseT_Half)) {
8838 DP(NETIF_MSG_LINK,
8839 "10M half not supported\n");
8840 return -EINVAL;
8841 }
8842
8843 advertising = (ADVERTISED_10baseT_Half |
8844 ADVERTISED_TP);
8845 }
8846 break;
8847
8848 case SPEED_100:
8849 if (cmd->duplex == DUPLEX_FULL) {
8850 if (!(bp->port.supported &
8851 SUPPORTED_100baseT_Full)) {
8852 DP(NETIF_MSG_LINK,
8853 "100M full not supported\n");
8854 return -EINVAL;
8855 }
8856
8857 advertising = (ADVERTISED_100baseT_Full |
8858 ADVERTISED_TP);
8859 } else {
8860 if (!(bp->port.supported &
8861 SUPPORTED_100baseT_Half)) {
8862 DP(NETIF_MSG_LINK,
8863 "100M half not supported\n");
8864 return -EINVAL;
8865 }
8866
8867 advertising = (ADVERTISED_100baseT_Half |
8868 ADVERTISED_TP);
8869 }
8870 break;
8871
8872 case SPEED_1000:
8873 if (cmd->duplex != DUPLEX_FULL) {
8874 DP(NETIF_MSG_LINK, "1G half not supported\n");
8875 return -EINVAL;
8876 }
8877
8878 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8879 DP(NETIF_MSG_LINK, "1G full not supported\n");
8880 return -EINVAL;
8881 }
8882
8883 advertising = (ADVERTISED_1000baseT_Full |
8884 ADVERTISED_TP);
8885 break;
8886
8887 case SPEED_2500:
8888 if (cmd->duplex != DUPLEX_FULL) {
8889 DP(NETIF_MSG_LINK,
8890 "2.5G half not supported\n");
8891 return -EINVAL;
8892 }
8893
8894 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8895 DP(NETIF_MSG_LINK,
8896 "2.5G full not supported\n");
8897 return -EINVAL;
8898 }
8899
8900 advertising = (ADVERTISED_2500baseX_Full |
8901 ADVERTISED_TP);
8902 break;
8903
8904 case SPEED_10000:
8905 if (cmd->duplex != DUPLEX_FULL) {
8906 DP(NETIF_MSG_LINK, "10G half not supported\n");
8907 return -EINVAL;
8908 }
8909
8910 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8911 DP(NETIF_MSG_LINK, "10G full not supported\n");
8912 return -EINVAL;
8913 }
8914
8915 advertising = (ADVERTISED_10000baseT_Full |
8916 ADVERTISED_FIBRE);
8917 break;
8918
8919 default:
8920 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8921 return -EINVAL;
8922 }
8923
8924 bp->link_params.req_line_speed = cmd->speed;
8925 bp->link_params.req_duplex = cmd->duplex;
8926 bp->port.advertising = advertising;
8927 }
8928
8929 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8930 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8931 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8932 bp->port.advertising);
8933
8934 if (netif_running(dev)) {
8935 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8936 bnx2x_link_set(bp);
8937 }
8938
8939 return 0;
8940}
8941
8942#define PHY_FW_VER_LEN 10
8943
8944static void bnx2x_get_drvinfo(struct net_device *dev,
8945 struct ethtool_drvinfo *info)
8946{
8947 struct bnx2x *bp = netdev_priv(dev);
8948 u8 phy_fw_ver[PHY_FW_VER_LEN];
8949
8950 strcpy(info->driver, DRV_MODULE_NAME);
8951 strcpy(info->version, DRV_MODULE_VERSION);
8952
8953 phy_fw_ver[0] = '\0';
8954 if (bp->port.pmf) {
8955 bnx2x_acquire_phy_lock(bp);
8956 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8957 (bp->state != BNX2X_STATE_CLOSED),
8958 phy_fw_ver, PHY_FW_VER_LEN);
8959 bnx2x_release_phy_lock(bp);
8960 }
8961
8962 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8963 (bp->common.bc_ver & 0xff0000) >> 16,
8964 (bp->common.bc_ver & 0xff00) >> 8,
8965 (bp->common.bc_ver & 0xff),
8966 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8967 strcpy(info->bus_info, pci_name(bp->pdev));
8968 info->n_stats = BNX2X_NUM_STATS;
8969 info->testinfo_len = BNX2X_NUM_TESTS;
8970 info->eedump_len = bp->common.flash_size;
8971 info->regdump_len = 0;
8972}
8973
8974#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8975#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8976
8977static int bnx2x_get_regs_len(struct net_device *dev)
8978{
8979 static u32 regdump_len;
8980 struct bnx2x *bp = netdev_priv(dev);
8981 int i;
8982
8983 if (regdump_len)
8984 return regdump_len;
8985
8986 if (CHIP_IS_E1(bp)) {
8987 for (i = 0; i < REGS_COUNT; i++)
8988 if (IS_E1_ONLINE(reg_addrs[i].info))
8989 regdump_len += reg_addrs[i].size;
8990
8991 for (i = 0; i < WREGS_COUNT_E1; i++)
8992 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8993 regdump_len += wreg_addrs_e1[i].size *
8994 (1 + wreg_addrs_e1[i].read_regs_count);
8995
8996 } else { /* E1H */
8997 for (i = 0; i < REGS_COUNT; i++)
8998 if (IS_E1H_ONLINE(reg_addrs[i].info))
8999 regdump_len += reg_addrs[i].size;
9000
9001 for (i = 0; i < WREGS_COUNT_E1H; i++)
9002 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9003 regdump_len += wreg_addrs_e1h[i].size *
9004 (1 + wreg_addrs_e1h[i].read_regs_count);
9005 }
9006 regdump_len *= 4;
9007 regdump_len += sizeof(struct dump_hdr);
9008
9009 return regdump_len;
9010}
9011
9012static void bnx2x_get_regs(struct net_device *dev,
9013 struct ethtool_regs *regs, void *_p)
9014{
9015 u32 *p = _p, i, j;
9016 struct bnx2x *bp = netdev_priv(dev);
9017 struct dump_hdr dump_hdr = {0};
9018
9019 regs->version = 0;
9020 memset(p, 0, regs->len);
9021
9022 if (!netif_running(bp->dev))
9023 return;
9024
9025 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9026 dump_hdr.dump_sign = dump_sign_all;
9027 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9028 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9029 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9030 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9031 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9032
9033 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9034 p += dump_hdr.hdr_size + 1;
9035
9036 if (CHIP_IS_E1(bp)) {
9037 for (i = 0; i < REGS_COUNT; i++)
9038 if (IS_E1_ONLINE(reg_addrs[i].info))
9039 for (j = 0; j < reg_addrs[i].size; j++)
9040 *p++ = REG_RD(bp,
9041 reg_addrs[i].addr + j*4);
9042
9043 } else { /* E1H */
9044 for (i = 0; i < REGS_COUNT; i++)
9045 if (IS_E1H_ONLINE(reg_addrs[i].info))
9046 for (j = 0; j < reg_addrs[i].size; j++)
9047 *p++ = REG_RD(bp,
9048 reg_addrs[i].addr + j*4);
9049 }
9050}
9051
9052static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9053{
9054 struct bnx2x *bp = netdev_priv(dev);
9055
9056 if (bp->flags & NO_WOL_FLAG) {
9057 wol->supported = 0;
9058 wol->wolopts = 0;
9059 } else {
9060 wol->supported = WAKE_MAGIC;
9061 if (bp->wol)
9062 wol->wolopts = WAKE_MAGIC;
9063 else
9064 wol->wolopts = 0;
9065 }
9066 memset(&wol->sopass, 0, sizeof(wol->sopass));
9067}
9068
9069static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9070{
9071 struct bnx2x *bp = netdev_priv(dev);
9072
9073 if (wol->wolopts & ~WAKE_MAGIC)
9074 return -EINVAL;
9075
9076 if (wol->wolopts & WAKE_MAGIC) {
9077 if (bp->flags & NO_WOL_FLAG)
9078 return -EINVAL;
9079
9080 bp->wol = 1;
9081 } else
9082 bp->wol = 0;
9083
9084 return 0;
9085}
9086
9087static u32 bnx2x_get_msglevel(struct net_device *dev)
9088{
9089 struct bnx2x *bp = netdev_priv(dev);
9090
9091 return bp->msglevel;
9092}
9093
9094static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9095{
9096 struct bnx2x *bp = netdev_priv(dev);
9097
9098 if (capable(CAP_NET_ADMIN))
9099 bp->msglevel = level;
9100}
9101
9102static int bnx2x_nway_reset(struct net_device *dev)
9103{
9104 struct bnx2x *bp = netdev_priv(dev);
9105
9106 if (!bp->port.pmf)
9107 return 0;
9108
9109 if (netif_running(dev)) {
9110 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9111 bnx2x_link_set(bp);
9112 }
9113
9114 return 0;
9115}
9116
9117static u32
9118bnx2x_get_link(struct net_device *dev)
9119{
9120 struct bnx2x *bp = netdev_priv(dev);
9121
9122 return bp->link_vars.link_up;
9123}
9124
9125static int bnx2x_get_eeprom_len(struct net_device *dev)
9126{
9127 struct bnx2x *bp = netdev_priv(dev);
9128
9129 return bp->common.flash_size;
9130}
9131
9132static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9133{
9134 int port = BP_PORT(bp);
9135 int count, i;
9136 u32 val = 0;
9137
9138 /* adjust timeout for emulation/FPGA */
9139 count = NVRAM_TIMEOUT_COUNT;
9140 if (CHIP_REV_IS_SLOW(bp))
9141 count *= 100;
9142
9143 /* request access to nvram interface */
9144 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9145 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9146
9147 for (i = 0; i < count*10; i++) {
9148 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9149 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9150 break;
9151
9152 udelay(5);
9153 }
9154
9155 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9156 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9157 return -EBUSY;
9158 }
9159
9160 return 0;
9161}
9162
9163static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9164{
9165 int port = BP_PORT(bp);
9166 int count, i;
9167 u32 val = 0;
9168
9169 /* adjust timeout for emulation/FPGA */
9170 count = NVRAM_TIMEOUT_COUNT;
9171 if (CHIP_REV_IS_SLOW(bp))
9172 count *= 100;
9173
9174 /* relinquish nvram interface */
9175 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9176 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9177
9178 for (i = 0; i < count*10; i++) {
9179 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9180 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9181 break;
9182
9183 udelay(5);
9184 }
9185
9186 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9187 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9188 return -EBUSY;
9189 }
9190
9191 return 0;
9192}
9193
9194static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9195{
9196 u32 val;
9197
9198 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9199
9200 /* enable both bits, even on read */
9201 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9202 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9203 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9204}
9205
9206static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9207{
9208 u32 val;
9209
9210 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9211
9212 /* disable both bits, even after read */
9213 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9214 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9215 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9216}
9217
9218static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9219 u32 cmd_flags)
9220{
9221 int count, i, rc;
9222 u32 val;
9223
9224 /* build the command word */
9225 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9226
9227 /* need to clear DONE bit separately */
9228 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9229
9230 /* address of the NVRAM to read from */
9231 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9232 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9233
9234 /* issue a read command */
9235 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9236
9237 /* adjust timeout for emulation/FPGA */
9238 count = NVRAM_TIMEOUT_COUNT;
9239 if (CHIP_REV_IS_SLOW(bp))
9240 count *= 100;
9241
9242 /* wait for completion */
9243 *ret_val = 0;
9244 rc = -EBUSY;
9245 for (i = 0; i < count; i++) {
9246 udelay(5);
9247 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9248
9249 if (val & MCPR_NVM_COMMAND_DONE) {
9250 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9251 /* we read nvram data in cpu order
9252 * but ethtool sees it as an array of bytes
9253 * converting to big-endian will do the work */
9254 *ret_val = cpu_to_be32(val);
9255 rc = 0;
9256 break;
9257 }
9258 }
9259
9260 return rc;
9261}
9262
9263static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9264 int buf_size)
9265{
9266 int rc;
9267 u32 cmd_flags;
9268 __be32 val;
9269
9270 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9271 DP(BNX2X_MSG_NVM,
9272 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9273 offset, buf_size);
9274 return -EINVAL;
9275 }
9276
9277 if (offset + buf_size > bp->common.flash_size) {
9278 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9279 " buf_size (0x%x) > flash_size (0x%x)\n",
9280 offset, buf_size, bp->common.flash_size);
9281 return -EINVAL;
9282 }
9283
9284 /* request access to nvram interface */
9285 rc = bnx2x_acquire_nvram_lock(bp);
9286 if (rc)
9287 return rc;
9288
9289 /* enable access to nvram interface */
9290 bnx2x_enable_nvram_access(bp);
9291
9292 /* read the first word(s) */
9293 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9294 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9295 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9296 memcpy(ret_buf, &val, 4);
9297
9298 /* advance to the next dword */
9299 offset += sizeof(u32);
9300 ret_buf += sizeof(u32);
9301 buf_size -= sizeof(u32);
9302 cmd_flags = 0;
9303 }
9304
9305 if (rc == 0) {
9306 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9307 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9308 memcpy(ret_buf, &val, 4);
9309 }
9310
9311 /* disable access to nvram interface */
9312 bnx2x_disable_nvram_access(bp);
9313 bnx2x_release_nvram_lock(bp);
9314
9315 return rc;
9316}
9317
9318static int bnx2x_get_eeprom(struct net_device *dev,
9319 struct ethtool_eeprom *eeprom, u8 *eebuf)
9320{
9321 struct bnx2x *bp = netdev_priv(dev);
9322 int rc;
9323
9324 if (!netif_running(dev))
9325 return -EAGAIN;
9326
9327 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9328 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9329 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9330 eeprom->len, eeprom->len);
9331
9332 /* parameters already validated in ethtool_get_eeprom */
9333
9334 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9335
9336 return rc;
9337}
9338
9339static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9340 u32 cmd_flags)
9341{
9342 int count, i, rc;
9343
9344 /* build the command word */
9345 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9346
9347 /* need to clear DONE bit separately */
9348 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9349
9350 /* write the data */
9351 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9352
9353 /* address of the NVRAM to write to */
9354 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9355 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9356
9357 /* issue the write command */
9358 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9359
9360 /* adjust timeout for emulation/FPGA */
9361 count = NVRAM_TIMEOUT_COUNT;
9362 if (CHIP_REV_IS_SLOW(bp))
9363 count *= 100;
9364
9365 /* wait for completion */
9366 rc = -EBUSY;
9367 for (i = 0; i < count; i++) {
9368 udelay(5);
9369 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9370 if (val & MCPR_NVM_COMMAND_DONE) {
9371 rc = 0;
9372 break;
9373 }
9374 }
9375
9376 return rc;
9377}
9378
9379#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9380
9381static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9382 int buf_size)
9383{
9384 int rc;
9385 u32 cmd_flags;
9386 u32 align_offset;
9387 __be32 val;
9388
9389 if (offset + buf_size > bp->common.flash_size) {
9390 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9391 " buf_size (0x%x) > flash_size (0x%x)\n",
9392 offset, buf_size, bp->common.flash_size);
9393 return -EINVAL;
9394 }
9395
9396 /* request access to nvram interface */
9397 rc = bnx2x_acquire_nvram_lock(bp);
9398 if (rc)
9399 return rc;
9400
9401 /* enable access to nvram interface */
9402 bnx2x_enable_nvram_access(bp);
9403
9404 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9405 align_offset = (offset & ~0x03);
9406 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9407
9408 if (rc == 0) {
9409 val &= ~(0xff << BYTE_OFFSET(offset));
9410 val |= (*data_buf << BYTE_OFFSET(offset));
9411
9412 /* nvram data is returned as an array of bytes
9413 * convert it back to cpu order */
9414 val = be32_to_cpu(val);
9415
9416 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9417 cmd_flags);
9418 }
9419
9420 /* disable access to nvram interface */
9421 bnx2x_disable_nvram_access(bp);
9422 bnx2x_release_nvram_lock(bp);
9423
9424 return rc;
9425}
9426
9427static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9428 int buf_size)
9429{
9430 int rc;
9431 u32 cmd_flags;
9432 u32 val;
9433 u32 written_so_far;
9434
9435 if (buf_size == 1) /* ethtool */
9436 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9437
9438 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9439 DP(BNX2X_MSG_NVM,
9440 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9441 offset, buf_size);
9442 return -EINVAL;
9443 }
9444
9445 if (offset + buf_size > bp->common.flash_size) {
9446 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9447 " buf_size (0x%x) > flash_size (0x%x)\n",
9448 offset, buf_size, bp->common.flash_size);
9449 return -EINVAL;
9450 }
9451
9452 /* request access to nvram interface */
9453 rc = bnx2x_acquire_nvram_lock(bp);
9454 if (rc)
9455 return rc;
9456
9457 /* enable access to nvram interface */
9458 bnx2x_enable_nvram_access(bp);
9459
9460 written_so_far = 0;
9461 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9462 while ((written_so_far < buf_size) && (rc == 0)) {
9463 if (written_so_far == (buf_size - sizeof(u32)))
9464 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9465 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9466 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9467 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9468 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9469
9470 memcpy(&val, data_buf, 4);
9471
9472 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9473
9474 /* advance to the next dword */
9475 offset += sizeof(u32);
9476 data_buf += sizeof(u32);
9477 written_so_far += sizeof(u32);
9478 cmd_flags = 0;
9479 }
9480
9481 /* disable access to nvram interface */
9482 bnx2x_disable_nvram_access(bp);
9483 bnx2x_release_nvram_lock(bp);
9484
9485 return rc;
9486}
9487
9488static int bnx2x_set_eeprom(struct net_device *dev,
9489 struct ethtool_eeprom *eeprom, u8 *eebuf)
9490{
9491 struct bnx2x *bp = netdev_priv(dev);
9492 int port = BP_PORT(bp);
9493 int rc = 0;
9494
9495 if (!netif_running(dev))
9496 return -EAGAIN;
9497
9498 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9499 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9500 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9501 eeprom->len, eeprom->len);
9502
9503 /* parameters already validated in ethtool_set_eeprom */
9504
9505 /* PHY eeprom can be accessed only by the PMF */
9506 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9507 !bp->port.pmf)
9508 return -EINVAL;
9509
9510 if (eeprom->magic == 0x50485950) {
9511 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9512 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9513
9514 bnx2x_acquire_phy_lock(bp);
9515 rc |= bnx2x_link_reset(&bp->link_params,
9516 &bp->link_vars, 0);
9517 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9518 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9519 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9520 MISC_REGISTERS_GPIO_HIGH, port);
9521 bnx2x_release_phy_lock(bp);
9522 bnx2x_link_report(bp);
9523
9524 } else if (eeprom->magic == 0x50485952) {
9525 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9526 if ((bp->state == BNX2X_STATE_OPEN) ||
9527 (bp->state == BNX2X_STATE_DISABLED)) {
9528 bnx2x_acquire_phy_lock(bp);
9529 rc |= bnx2x_link_reset(&bp->link_params,
9530 &bp->link_vars, 1);
9531
9532 rc |= bnx2x_phy_init(&bp->link_params,
9533 &bp->link_vars);
9534 bnx2x_release_phy_lock(bp);
9535 bnx2x_calc_fc_adv(bp);
9536 }
9537 } else if (eeprom->magic == 0x53985943) {
9538 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9539 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9540 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9541 u8 ext_phy_addr =
9542 (bp->link_params.ext_phy_config &
9543 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
9544 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
9545
9546 /* DSP Remove Download Mode */
9547 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9548 MISC_REGISTERS_GPIO_LOW, port);
9549
9550 bnx2x_acquire_phy_lock(bp);
9551
9552 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9553
9554 /* wait 0.5 sec to allow it to run */
9555 msleep(500);
9556 bnx2x_ext_phy_hw_reset(bp, port);
9557 msleep(500);
9558 bnx2x_release_phy_lock(bp);
9559 }
9560 } else
9561 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9562
9563 return rc;
9564}
9565
9566static int bnx2x_get_coalesce(struct net_device *dev,
9567 struct ethtool_coalesce *coal)
9568{
9569 struct bnx2x *bp = netdev_priv(dev);
9570
9571 memset(coal, 0, sizeof(struct ethtool_coalesce));
9572
9573 coal->rx_coalesce_usecs = bp->rx_ticks;
9574 coal->tx_coalesce_usecs = bp->tx_ticks;
9575
9576 return 0;
9577}
9578
9579#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9580static int bnx2x_set_coalesce(struct net_device *dev,
9581 struct ethtool_coalesce *coal)
9582{
9583 struct bnx2x *bp = netdev_priv(dev);
9584
9585 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9586 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9587 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9588
9589 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9590 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9591 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9592
9593 if (netif_running(dev))
9594 bnx2x_update_coalesce(bp);
9595
9596 return 0;
9597}
9598
9599static void bnx2x_get_ringparam(struct net_device *dev,
9600 struct ethtool_ringparam *ering)
9601{
9602 struct bnx2x *bp = netdev_priv(dev);
9603
9604 ering->rx_max_pending = MAX_RX_AVAIL;
9605 ering->rx_mini_max_pending = 0;
9606 ering->rx_jumbo_max_pending = 0;
9607
9608 ering->rx_pending = bp->rx_ring_size;
9609 ering->rx_mini_pending = 0;
9610 ering->rx_jumbo_pending = 0;
9611
9612 ering->tx_max_pending = MAX_TX_AVAIL;
9613 ering->tx_pending = bp->tx_ring_size;
9614}
9615
9616static int bnx2x_set_ringparam(struct net_device *dev,
9617 struct ethtool_ringparam *ering)
9618{
9619 struct bnx2x *bp = netdev_priv(dev);
9620 int rc = 0;
9621
9622 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9623 (ering->tx_pending > MAX_TX_AVAIL) ||
9624 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9625 return -EINVAL;
9626
9627 bp->rx_ring_size = ering->rx_pending;
9628 bp->tx_ring_size = ering->tx_pending;
9629
9630 if (netif_running(dev)) {
9631 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9632 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9633 }
9634
9635 return rc;
9636}
9637
9638static void bnx2x_get_pauseparam(struct net_device *dev,
9639 struct ethtool_pauseparam *epause)
9640{
9641 struct bnx2x *bp = netdev_priv(dev);
9642
9643 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9644 BNX2X_FLOW_CTRL_AUTO) &&
9645 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9646
9647 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9648 BNX2X_FLOW_CTRL_RX);
9649 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9650 BNX2X_FLOW_CTRL_TX);
9651
9652 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9653 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9654 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9655}
9656
9657static int bnx2x_set_pauseparam(struct net_device *dev,
9658 struct ethtool_pauseparam *epause)
9659{
9660 struct bnx2x *bp = netdev_priv(dev);
9661
9662 if (IS_E1HMF(bp))
9663 return 0;
9664
9665 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9666 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9667 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9668
9669 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9670
9671 if (epause->rx_pause)
9672 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9673
9674 if (epause->tx_pause)
9675 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9676
9677 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9678 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9679
9680 if (epause->autoneg) {
9681 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9682 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9683 return -EINVAL;
9684 }
9685
9686 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9687 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9688 }
9689
9690 DP(NETIF_MSG_LINK,
9691 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9692
9693 if (netif_running(dev)) {
9694 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9695 bnx2x_link_set(bp);
9696 }
9697
9698 return 0;
9699}
9700
9701static int bnx2x_set_flags(struct net_device *dev, u32 data)
9702{
9703 struct bnx2x *bp = netdev_priv(dev);
9704 int changed = 0;
9705 int rc = 0;
9706
9707 /* TPA requires Rx CSUM offloading */
9708 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9709 if (!(dev->features & NETIF_F_LRO)) {
9710 dev->features |= NETIF_F_LRO;
9711 bp->flags |= TPA_ENABLE_FLAG;
9712 changed = 1;
9713 }
9714
9715 } else if (dev->features & NETIF_F_LRO) {
9716 dev->features &= ~NETIF_F_LRO;
9717 bp->flags &= ~TPA_ENABLE_FLAG;
9718 changed = 1;
9719 }
9720
9721 if (changed && netif_running(dev)) {
9722 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9723 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9724 }
9725
9726 return rc;
9727}
9728
9729static u32 bnx2x_get_rx_csum(struct net_device *dev)
9730{
9731 struct bnx2x *bp = netdev_priv(dev);
9732
9733 return bp->rx_csum;
9734}
9735
9736static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9737{
9738 struct bnx2x *bp = netdev_priv(dev);
9739 int rc = 0;
9740
9741 bp->rx_csum = data;
9742
9743 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9744 TPA'ed packets will be discarded due to wrong TCP CSUM */
9745 if (!data) {
9746 u32 flags = ethtool_op_get_flags(dev);
9747
9748 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9749 }
9750
9751 return rc;
9752}
9753
9754static int bnx2x_set_tso(struct net_device *dev, u32 data)
9755{
9756 if (data) {
9757 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9758 dev->features |= NETIF_F_TSO6;
9759 } else {
9760 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9761 dev->features &= ~NETIF_F_TSO6;
9762 }
9763
9764 return 0;
9765}
9766
9767static const struct {
9768 char string[ETH_GSTRING_LEN];
9769} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9770 { "register_test (offline)" },
9771 { "memory_test (offline)" },
9772 { "loopback_test (offline)" },
9773 { "nvram_test (online)" },
9774 { "interrupt_test (online)" },
9775 { "link_test (online)" },
9776 { "idle check (online)" }
9777};
9778
9779static int bnx2x_self_test_count(struct net_device *dev)
9780{
9781 return BNX2X_NUM_TESTS;
9782}
9783
9784static int bnx2x_test_registers(struct bnx2x *bp)
9785{
9786 int idx, i, rc = -ENODEV;
9787 u32 wr_val = 0;
9788 int port = BP_PORT(bp);
9789 static const struct {
9790 u32 offset0;
9791 u32 offset1;
9792 u32 mask;
9793 } reg_tbl[] = {
9794/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9795 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9796 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9797 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9798 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9799 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9800 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9801 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9802 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9803 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9804/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9805 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9806 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9807 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9808 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9809 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9810 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9811 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9812 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9813 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9814/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9815 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9816 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9817 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9818 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9819 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9820 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9821 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9822 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9823 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9824/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9825 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9826 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9827 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9828 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9829 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9830 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9831
9832 { 0xffffffff, 0, 0x00000000 }
9833 };
9834
9835 if (!netif_running(bp->dev))
9836 return rc;
9837
9838 /* Repeat the test twice:
9839 First by writing 0x00000000, second by writing 0xffffffff */
9840 for (idx = 0; idx < 2; idx++) {
9841
9842 switch (idx) {
9843 case 0:
9844 wr_val = 0;
9845 break;
9846 case 1:
9847 wr_val = 0xffffffff;
9848 break;
9849 }
9850
9851 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9852 u32 offset, mask, save_val, val;
9853
9854 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9855 mask = reg_tbl[i].mask;
9856
9857 save_val = REG_RD(bp, offset);
9858
9859 REG_WR(bp, offset, wr_val);
9860 val = REG_RD(bp, offset);
9861
9862 /* Restore the original register's value */
9863 REG_WR(bp, offset, save_val);
9864
9865 /* verify that value is as expected value */
9866 if ((val & mask) != (wr_val & mask))
9867 goto test_reg_exit;
9868 }
9869 }
9870
9871 rc = 0;
9872
9873test_reg_exit:
9874 return rc;
9875}
9876
9877static int bnx2x_test_memory(struct bnx2x *bp)
9878{
9879 int i, j, rc = -ENODEV;
9880 u32 val;
9881 static const struct {
9882 u32 offset;
9883 int size;
9884 } mem_tbl[] = {
9885 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9886 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9887 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9888 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9889 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9890 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9891 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9892
9893 { 0xffffffff, 0 }
9894 };
9895 static const struct {
9896 char *name;
9897 u32 offset;
9898 u32 e1_mask;
9899 u32 e1h_mask;
9900 } prty_tbl[] = {
9901 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9902 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9903 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9904 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9905 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9906 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9907
9908 { NULL, 0xffffffff, 0, 0 }
9909 };
9910
9911 if (!netif_running(bp->dev))
9912 return rc;
9913
9914 /* Go through all the memories */
9915 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9916 for (j = 0; j < mem_tbl[i].size; j++)
9917 REG_RD(bp, mem_tbl[i].offset + j*4);
9918
9919 /* Check the parity status */
9920 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9921 val = REG_RD(bp, prty_tbl[i].offset);
9922 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9923 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9924 DP(NETIF_MSG_HW,
9925 "%s is 0x%x\n", prty_tbl[i].name, val);
9926 goto test_mem_exit;
9927 }
9928 }
9929
9930 rc = 0;
9931
9932test_mem_exit:
9933 return rc;
9934}
9935
9936static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9937{
9938 int cnt = 1000;
9939
9940 if (link_up)
9941 while (bnx2x_link_test(bp) && cnt--)
9942 msleep(10);
9943}
9944
9945static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9946{
9947 unsigned int pkt_size, num_pkts, i;
9948 struct sk_buff *skb;
9949 unsigned char *packet;
9950 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9951 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
9952 u16 tx_start_idx, tx_idx;
9953 u16 rx_start_idx, rx_idx;
9954 u16 pkt_prod, bd_prod;
9955 struct sw_tx_bd *tx_buf;
9956 struct eth_tx_start_bd *tx_start_bd;
9957 struct eth_tx_parse_bd *pbd = NULL;
9958 dma_addr_t mapping;
9959 union eth_rx_cqe *cqe;
9960 u8 cqe_fp_flags;
9961 struct sw_rx_bd *rx_buf;
9962 u16 len;
9963 int rc = -ENODEV;
9964
9965 /* check the loopback mode */
9966 switch (loopback_mode) {
9967 case BNX2X_PHY_LOOPBACK:
9968 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9969 return -EINVAL;
9970 break;
9971 case BNX2X_MAC_LOOPBACK:
9972 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9973 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9974 break;
9975 default:
9976 return -EINVAL;
9977 }
9978
9979 /* prepare the loopback packet */
9980 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9981 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9982 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9983 if (!skb) {
9984 rc = -ENOMEM;
9985 goto test_loopback_exit;
9986 }
9987 packet = skb_put(skb, pkt_size);
9988 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9989 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9990 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
9991 for (i = ETH_HLEN; i < pkt_size; i++)
9992 packet[i] = (unsigned char) (i & 0xff);
9993
9994 /* send the loopback packet */
9995 num_pkts = 0;
9996 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9997 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9998
9999 pkt_prod = fp_tx->tx_pkt_prod++;
10000 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10001 tx_buf->first_bd = fp_tx->tx_bd_prod;
10002 tx_buf->skb = skb;
10003 tx_buf->flags = 0;
10004
10005 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10006 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10007 mapping = pci_map_single(bp->pdev, skb->data,
10008 skb_headlen(skb), PCI_DMA_TODEVICE);
10009 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10010 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10011 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10012 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10013 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10014 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10015 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10016 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10017
10018 /* turn on parsing and get a BD */
10019 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10020 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10021
10022 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10023
10024 wmb();
10025
10026 fp_tx->tx_db.data.prod += 2;
10027 barrier();
10028 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10029
10030 mmiowb();
10031
10032 num_pkts++;
10033 fp_tx->tx_bd_prod += 2; /* start + pbd */
10034 bp->dev->trans_start = jiffies;
10035
10036 udelay(100);
10037
10038 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10039 if (tx_idx != tx_start_idx + num_pkts)
10040 goto test_loopback_exit;
10041
10042 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10043 if (rx_idx != rx_start_idx + num_pkts)
10044 goto test_loopback_exit;
10045
10046 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10047 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10048 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10049 goto test_loopback_rx_exit;
10050
10051 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10052 if (len != pkt_size)
10053 goto test_loopback_rx_exit;
10054
10055 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10056 skb = rx_buf->skb;
10057 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10058 for (i = ETH_HLEN; i < pkt_size; i++)
10059 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10060 goto test_loopback_rx_exit;
10061
10062 rc = 0;
10063
10064test_loopback_rx_exit:
10065
10066 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10067 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10068 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10069 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10070
10071 /* Update producers */
10072 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10073 fp_rx->rx_sge_prod);
10074
10075test_loopback_exit:
10076 bp->link_params.loopback_mode = LOOPBACK_NONE;
10077
10078 return rc;
10079}
10080
10081static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10082{
10083 int rc = 0, res;
10084
10085 if (!netif_running(bp->dev))
10086 return BNX2X_LOOPBACK_FAILED;
10087
10088 bnx2x_netif_stop(bp, 1);
10089 bnx2x_acquire_phy_lock(bp);
10090
10091 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10092 if (res) {
10093 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10094 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10095 }
10096
10097 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10098 if (res) {
10099 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10100 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10101 }
10102
10103 bnx2x_release_phy_lock(bp);
10104 bnx2x_netif_start(bp);
10105
10106 return rc;
10107}
10108
10109#define CRC32_RESIDUAL 0xdebb20e3
10110
10111static int bnx2x_test_nvram(struct bnx2x *bp)
10112{
10113 static const struct {
10114 int offset;
10115 int size;
10116 } nvram_tbl[] = {
10117 { 0, 0x14 }, /* bootstrap */
10118 { 0x14, 0xec }, /* dir */
10119 { 0x100, 0x350 }, /* manuf_info */
10120 { 0x450, 0xf0 }, /* feature_info */
10121 { 0x640, 0x64 }, /* upgrade_key_info */
10122 { 0x6a4, 0x64 },
10123 { 0x708, 0x70 }, /* manuf_key_info */
10124 { 0x778, 0x70 },
10125 { 0, 0 }
10126 };
10127 __be32 buf[0x350 / 4];
10128 u8 *data = (u8 *)buf;
10129 int i, rc;
10130 u32 magic, csum;
10131
10132 rc = bnx2x_nvram_read(bp, 0, data, 4);
10133 if (rc) {
10134 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10135 goto test_nvram_exit;
10136 }
10137
10138 magic = be32_to_cpu(buf[0]);
10139 if (magic != 0x669955aa) {
10140 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10141 rc = -ENODEV;
10142 goto test_nvram_exit;
10143 }
10144
10145 for (i = 0; nvram_tbl[i].size; i++) {
10146
10147 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10148 nvram_tbl[i].size);
10149 if (rc) {
10150 DP(NETIF_MSG_PROBE,
10151 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10152 goto test_nvram_exit;
10153 }
10154
10155 csum = ether_crc_le(nvram_tbl[i].size, data);
10156 if (csum != CRC32_RESIDUAL) {
10157 DP(NETIF_MSG_PROBE,
10158 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10159 rc = -ENODEV;
10160 goto test_nvram_exit;
10161 }
10162 }
10163
10164test_nvram_exit:
10165 return rc;
10166}
10167
10168static int bnx2x_test_intr(struct bnx2x *bp)
10169{
10170 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10171 int i, rc;
10172
10173 if (!netif_running(bp->dev))
10174 return -ENODEV;
10175
10176 config->hdr.length = 0;
10177 if (CHIP_IS_E1(bp))
10178 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10179 else
10180 config->hdr.offset = BP_FUNC(bp);
10181 config->hdr.client_id = bp->fp->cl_id;
10182 config->hdr.reserved1 = 0;
10183
10184 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10185 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10186 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10187 if (rc == 0) {
10188 bp->set_mac_pending++;
10189 for (i = 0; i < 10; i++) {
10190 if (!bp->set_mac_pending)
10191 break;
10192 msleep_interruptible(10);
10193 }
10194 if (i == 10)
10195 rc = -ENODEV;
10196 }
10197
10198 return rc;
10199}
10200
10201static void bnx2x_self_test(struct net_device *dev,
10202 struct ethtool_test *etest, u64 *buf)
10203{
10204 struct bnx2x *bp = netdev_priv(dev);
10205
10206 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10207
10208 if (!netif_running(dev))
10209 return;
10210
10211 /* offline tests are not supported in MF mode */
10212 if (IS_E1HMF(bp))
10213 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10214
10215 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10216 int port = BP_PORT(bp);
10217 u32 val;
10218 u8 link_up;
10219
10220 /* save current value of input enable for TX port IF */
10221 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10222 /* disable input for TX port IF */
10223 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10224
10225 link_up = bp->link_vars.link_up;
10226 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10227 bnx2x_nic_load(bp, LOAD_DIAG);
10228 /* wait until link state is restored */
10229 bnx2x_wait_for_link(bp, link_up);
10230
10231 if (bnx2x_test_registers(bp) != 0) {
10232 buf[0] = 1;
10233 etest->flags |= ETH_TEST_FL_FAILED;
10234 }
10235 if (bnx2x_test_memory(bp) != 0) {
10236 buf[1] = 1;
10237 etest->flags |= ETH_TEST_FL_FAILED;
10238 }
10239 buf[2] = bnx2x_test_loopback(bp, link_up);
10240 if (buf[2] != 0)
10241 etest->flags |= ETH_TEST_FL_FAILED;
10242
10243 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10244
10245 /* restore input for TX port IF */
10246 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10247
10248 bnx2x_nic_load(bp, LOAD_NORMAL);
10249 /* wait until link state is restored */
10250 bnx2x_wait_for_link(bp, link_up);
10251 }
10252 if (bnx2x_test_nvram(bp) != 0) {
10253 buf[3] = 1;
10254 etest->flags |= ETH_TEST_FL_FAILED;
10255 }
10256 if (bnx2x_test_intr(bp) != 0) {
10257 buf[4] = 1;
10258 etest->flags |= ETH_TEST_FL_FAILED;
10259 }
10260 if (bp->port.pmf)
10261 if (bnx2x_link_test(bp) != 0) {
10262 buf[5] = 1;
10263 etest->flags |= ETH_TEST_FL_FAILED;
10264 }
10265
10266#ifdef BNX2X_EXTRA_DEBUG
10267 bnx2x_panic_dump(bp);
10268#endif
10269}
10270
10271static const struct {
10272 long offset;
10273 int size;
10274 u8 string[ETH_GSTRING_LEN];
10275} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10276/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10277 { Q_STATS_OFFSET32(error_bytes_received_hi),
10278 8, "[%d]: rx_error_bytes" },
10279 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10280 8, "[%d]: rx_ucast_packets" },
10281 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10282 8, "[%d]: rx_mcast_packets" },
10283 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10284 8, "[%d]: rx_bcast_packets" },
10285 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10286 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10287 4, "[%d]: rx_phy_ip_err_discards"},
10288 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10289 4, "[%d]: rx_skb_alloc_discard" },
10290 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10291
10292/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10293 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10294 8, "[%d]: tx_packets" }
10295};
10296
10297static const struct {
10298 long offset;
10299 int size;
10300 u32 flags;
10301#define STATS_FLAGS_PORT 1
10302#define STATS_FLAGS_FUNC 2
10303#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10304 u8 string[ETH_GSTRING_LEN];
10305} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10306/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10307 8, STATS_FLAGS_BOTH, "rx_bytes" },
10308 { STATS_OFFSET32(error_bytes_received_hi),
10309 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10310 { STATS_OFFSET32(total_unicast_packets_received_hi),
10311 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10312 { STATS_OFFSET32(total_multicast_packets_received_hi),
10313 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10314 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10315 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10316 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10317 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10318 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10319 8, STATS_FLAGS_PORT, "rx_align_errors" },
10320 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10321 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10322 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10323 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10324/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10325 8, STATS_FLAGS_PORT, "rx_fragments" },
10326 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10327 8, STATS_FLAGS_PORT, "rx_jabbers" },
10328 { STATS_OFFSET32(no_buff_discard_hi),
10329 8, STATS_FLAGS_BOTH, "rx_discards" },
10330 { STATS_OFFSET32(mac_filter_discard),
10331 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10332 { STATS_OFFSET32(xxoverflow_discard),
10333 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10334 { STATS_OFFSET32(brb_drop_hi),
10335 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10336 { STATS_OFFSET32(brb_truncate_hi),
10337 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10338 { STATS_OFFSET32(pause_frames_received_hi),
10339 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10340 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10341 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10342 { STATS_OFFSET32(nig_timer_max),
10343 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10344/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10345 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10346 { STATS_OFFSET32(rx_skb_alloc_failed),
10347 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10348 { STATS_OFFSET32(hw_csum_err),
10349 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10350
10351 { STATS_OFFSET32(total_bytes_transmitted_hi),
10352 8, STATS_FLAGS_BOTH, "tx_bytes" },
10353 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10354 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10355 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10356 8, STATS_FLAGS_BOTH, "tx_packets" },
10357 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10358 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10359 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10360 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10361 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10362 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10363 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10364 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10365/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10366 8, STATS_FLAGS_PORT, "tx_deferred" },
10367 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10368 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10369 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10370 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10371 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10372 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10373 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10374 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10375 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10376 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10377 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10378 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10379 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10380 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10381 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10382 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10383 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10384 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10385/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10386 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10387 { STATS_OFFSET32(pause_frames_sent_hi),
10388 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10389};
10390
10391#define IS_PORT_STAT(i) \
10392 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10393#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10394#define IS_E1HMF_MODE_STAT(bp) \
10395 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10396
10397static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10398{
10399 struct bnx2x *bp = netdev_priv(dev);
10400 int i, j, k;
10401
10402 switch (stringset) {
10403 case ETH_SS_STATS:
10404 if (is_multi(bp)) {
10405 k = 0;
10406 for_each_rx_queue(bp, i) {
10407 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10408 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10409 bnx2x_q_stats_arr[j].string, i);
10410 k += BNX2X_NUM_Q_STATS;
10411 }
10412 if (IS_E1HMF_MODE_STAT(bp))
10413 break;
10414 for (j = 0; j < BNX2X_NUM_STATS; j++)
10415 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10416 bnx2x_stats_arr[j].string);
10417 } else {
10418 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10419 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10420 continue;
10421 strcpy(buf + j*ETH_GSTRING_LEN,
10422 bnx2x_stats_arr[i].string);
10423 j++;
10424 }
10425 }
10426 break;
10427
10428 case ETH_SS_TEST:
10429 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10430 break;
10431 }
10432}
10433
10434static int bnx2x_get_stats_count(struct net_device *dev)
10435{
10436 struct bnx2x *bp = netdev_priv(dev);
10437 int i, num_stats;
10438
10439 if (is_multi(bp)) {
10440 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10441 if (!IS_E1HMF_MODE_STAT(bp))
10442 num_stats += BNX2X_NUM_STATS;
10443 } else {
10444 if (IS_E1HMF_MODE_STAT(bp)) {
10445 num_stats = 0;
10446 for (i = 0; i < BNX2X_NUM_STATS; i++)
10447 if (IS_FUNC_STAT(i))
10448 num_stats++;
10449 } else
10450 num_stats = BNX2X_NUM_STATS;
10451 }
10452
10453 return num_stats;
10454}
10455
10456static void bnx2x_get_ethtool_stats(struct net_device *dev,
10457 struct ethtool_stats *stats, u64 *buf)
10458{
10459 struct bnx2x *bp = netdev_priv(dev);
10460 u32 *hw_stats, *offset;
10461 int i, j, k;
10462
10463 if (is_multi(bp)) {
10464 k = 0;
10465 for_each_rx_queue(bp, i) {
10466 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10467 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10468 if (bnx2x_q_stats_arr[j].size == 0) {
10469 /* skip this counter */
10470 buf[k + j] = 0;
10471 continue;
10472 }
10473 offset = (hw_stats +
10474 bnx2x_q_stats_arr[j].offset);
10475 if (bnx2x_q_stats_arr[j].size == 4) {
10476 /* 4-byte counter */
10477 buf[k + j] = (u64) *offset;
10478 continue;
10479 }
10480 /* 8-byte counter */
10481 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10482 }
10483 k += BNX2X_NUM_Q_STATS;
10484 }
10485 if (IS_E1HMF_MODE_STAT(bp))
10486 return;
10487 hw_stats = (u32 *)&bp->eth_stats;
10488 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10489 if (bnx2x_stats_arr[j].size == 0) {
10490 /* skip this counter */
10491 buf[k + j] = 0;
10492 continue;
10493 }
10494 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10495 if (bnx2x_stats_arr[j].size == 4) {
10496 /* 4-byte counter */
10497 buf[k + j] = (u64) *offset;
10498 continue;
10499 }
10500 /* 8-byte counter */
10501 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10502 }
10503 } else {
10504 hw_stats = (u32 *)&bp->eth_stats;
10505 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10506 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10507 continue;
10508 if (bnx2x_stats_arr[i].size == 0) {
10509 /* skip this counter */
10510 buf[j] = 0;
10511 j++;
10512 continue;
10513 }
10514 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10515 if (bnx2x_stats_arr[i].size == 4) {
10516 /* 4-byte counter */
10517 buf[j] = (u64) *offset;
10518 j++;
10519 continue;
10520 }
10521 /* 8-byte counter */
10522 buf[j] = HILO_U64(*offset, *(offset + 1));
10523 j++;
10524 }
10525 }
10526}
10527
10528static int bnx2x_phys_id(struct net_device *dev, u32 data)
10529{
10530 struct bnx2x *bp = netdev_priv(dev);
10531 int port = BP_PORT(bp);
10532 int i;
10533
10534 if (!netif_running(dev))
10535 return 0;
10536
10537 if (!bp->port.pmf)
10538 return 0;
10539
10540 if (data == 0)
10541 data = 2;
10542
10543 for (i = 0; i < (data * 2); i++) {
10544 if ((i % 2) == 0)
10545 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10546 bp->link_params.hw_led_mode,
10547 bp->link_params.chip_id);
10548 else
10549 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10550 bp->link_params.hw_led_mode,
10551 bp->link_params.chip_id);
10552
10553 msleep_interruptible(500);
10554 if (signal_pending(current))
10555 break;
10556 }
10557
10558 if (bp->link_vars.link_up)
10559 bnx2x_set_led(bp, port, LED_MODE_OPER,
10560 bp->link_vars.line_speed,
10561 bp->link_params.hw_led_mode,
10562 bp->link_params.chip_id);
10563
10564 return 0;
10565}
10566
10567static struct ethtool_ops bnx2x_ethtool_ops = {
10568 .get_settings = bnx2x_get_settings,
10569 .set_settings = bnx2x_set_settings,
10570 .get_drvinfo = bnx2x_get_drvinfo,
10571 .get_regs_len = bnx2x_get_regs_len,
10572 .get_regs = bnx2x_get_regs,
10573 .get_wol = bnx2x_get_wol,
10574 .set_wol = bnx2x_set_wol,
10575 .get_msglevel = bnx2x_get_msglevel,
10576 .set_msglevel = bnx2x_set_msglevel,
10577 .nway_reset = bnx2x_nway_reset,
10578 .get_link = bnx2x_get_link,
10579 .get_eeprom_len = bnx2x_get_eeprom_len,
10580 .get_eeprom = bnx2x_get_eeprom,
10581 .set_eeprom = bnx2x_set_eeprom,
10582 .get_coalesce = bnx2x_get_coalesce,
10583 .set_coalesce = bnx2x_set_coalesce,
10584 .get_ringparam = bnx2x_get_ringparam,
10585 .set_ringparam = bnx2x_set_ringparam,
10586 .get_pauseparam = bnx2x_get_pauseparam,
10587 .set_pauseparam = bnx2x_set_pauseparam,
10588 .get_rx_csum = bnx2x_get_rx_csum,
10589 .set_rx_csum = bnx2x_set_rx_csum,
10590 .get_tx_csum = ethtool_op_get_tx_csum,
10591 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10592 .set_flags = bnx2x_set_flags,
10593 .get_flags = ethtool_op_get_flags,
10594 .get_sg = ethtool_op_get_sg,
10595 .set_sg = ethtool_op_set_sg,
10596 .get_tso = ethtool_op_get_tso,
10597 .set_tso = bnx2x_set_tso,
10598 .self_test_count = bnx2x_self_test_count,
10599 .self_test = bnx2x_self_test,
10600 .get_strings = bnx2x_get_strings,
10601 .phys_id = bnx2x_phys_id,
10602 .get_stats_count = bnx2x_get_stats_count,
10603 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10604};
10605
10606/* end of ethtool_ops */
10607
10608/****************************************************************************
10609* General service functions
10610****************************************************************************/
10611
10612static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10613{
10614 u16 pmcsr;
10615
10616 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10617
10618 switch (state) {
10619 case PCI_D0:
10620 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10621 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10622 PCI_PM_CTRL_PME_STATUS));
10623
10624 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10625 /* delay required during transition out of D3hot */
10626 msleep(20);
10627 break;
10628
10629 case PCI_D3hot:
10630 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10631 pmcsr |= 3;
10632
10633 if (bp->wol)
10634 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10635
10636 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10637 pmcsr);
10638
10639 /* No more memory access after this point until
10640 * device is brought back to D0.
10641 */
10642 break;
10643
10644 default:
10645 return -EINVAL;
10646 }
10647 return 0;
10648}
10649
10650static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10651{
10652 u16 rx_cons_sb;
10653
10654 /* Tell compiler that status block fields can change */
10655 barrier();
10656 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10657 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10658 rx_cons_sb++;
10659 return (fp->rx_comp_cons != rx_cons_sb);
10660}
10661
10662/*
10663 * net_device service functions
10664 */
10665
10666static int bnx2x_poll(struct napi_struct *napi, int budget)
10667{
10668 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10669 napi);
10670 struct bnx2x *bp = fp->bp;
10671 int work_done = 0;
10672
10673#ifdef BNX2X_STOP_ON_ERROR
10674 if (unlikely(bp->panic))
10675 goto poll_panic;
10676#endif
10677
10678 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10679 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10680
10681 bnx2x_update_fpsb_idx(fp);
10682
10683 if (bnx2x_has_rx_work(fp)) {
10684 work_done = bnx2x_rx_int(fp, budget);
10685
10686 /* must not complete if we consumed full budget */
10687 if (work_done >= budget)
10688 goto poll_again;
10689 }
10690
10691 /* bnx2x_has_rx_work() reads the status block, thus we need to
10692 * ensure that status block indices have been actually read
10693 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10694 * so that we won't write the "newer" value of the status block to IGU
10695 * (if there was a DMA right after bnx2x_has_rx_work and
10696 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10697 * may be postponed to right before bnx2x_ack_sb). In this case
10698 * there will never be another interrupt until there is another update
10699 * of the status block, while there is still unhandled work.
10700 */
10701 rmb();
10702
10703 if (!bnx2x_has_rx_work(fp)) {
10704#ifdef BNX2X_STOP_ON_ERROR
10705poll_panic:
10706#endif
10707 napi_complete(napi);
10708
10709 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10710 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10711 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10712 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10713 }
10714
10715poll_again:
10716 return work_done;
10717}
10718
10719
10720/* we split the first BD into headers and data BDs
10721 * to ease the pain of our fellow microcode engineers
10722 * we use one mapping for both BDs
10723 * So far this has only been observed to happen
10724 * in Other Operating Systems(TM)
10725 */
10726static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10727 struct bnx2x_fastpath *fp,
10728 struct sw_tx_bd *tx_buf,
10729 struct eth_tx_start_bd **tx_bd, u16 hlen,
10730 u16 bd_prod, int nbd)
10731{
10732 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10733 struct eth_tx_bd *d_tx_bd;
10734 dma_addr_t mapping;
10735 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10736
10737 /* first fix first BD */
10738 h_tx_bd->nbd = cpu_to_le16(nbd);
10739 h_tx_bd->nbytes = cpu_to_le16(hlen);
10740
10741 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10742 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10743 h_tx_bd->addr_lo, h_tx_bd->nbd);
10744
10745 /* now get a new data BD
10746 * (after the pbd) and fill it */
10747 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10748 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10749
10750 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10751 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10752
10753 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10754 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10755 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10756
10757 /* this marks the BD as one that has no individual mapping */
10758 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10759
10760 DP(NETIF_MSG_TX_QUEUED,
10761 "TSO split data size is %d (%x:%x)\n",
10762 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10763
10764 /* update tx_bd */
10765 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10766
10767 return bd_prod;
10768}
10769
10770static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10771{
10772 if (fix > 0)
10773 csum = (u16) ~csum_fold(csum_sub(csum,
10774 csum_partial(t_header - fix, fix, 0)));
10775
10776 else if (fix < 0)
10777 csum = (u16) ~csum_fold(csum_add(csum,
10778 csum_partial(t_header, -fix, 0)));
10779
10780 return swab16(csum);
10781}
10782
10783static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10784{
10785 u32 rc;
10786
10787 if (skb->ip_summed != CHECKSUM_PARTIAL)
10788 rc = XMIT_PLAIN;
10789
10790 else {
10791 if (skb->protocol == htons(ETH_P_IPV6)) {
10792 rc = XMIT_CSUM_V6;
10793 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10794 rc |= XMIT_CSUM_TCP;
10795
10796 } else {
10797 rc = XMIT_CSUM_V4;
10798 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10799 rc |= XMIT_CSUM_TCP;
10800 }
10801 }
10802
10803 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10804 rc |= XMIT_GSO_V4;
10805
10806 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10807 rc |= XMIT_GSO_V6;
10808
10809 return rc;
10810}
10811
10812#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10813/* check if packet requires linearization (packet is too fragmented)
10814 no need to check fragmentation if page size > 8K (there will be no
10815 violation to FW restrictions) */
10816static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10817 u32 xmit_type)
10818{
10819 int to_copy = 0;
10820 int hlen = 0;
10821 int first_bd_sz = 0;
10822
10823 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10824 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10825
10826 if (xmit_type & XMIT_GSO) {
10827 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10828 /* Check if LSO packet needs to be copied:
10829 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10830 int wnd_size = MAX_FETCH_BD - 3;
10831 /* Number of windows to check */
10832 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10833 int wnd_idx = 0;
10834 int frag_idx = 0;
10835 u32 wnd_sum = 0;
10836
10837 /* Headers length */
10838 hlen = (int)(skb_transport_header(skb) - skb->data) +
10839 tcp_hdrlen(skb);
10840
10841 /* Amount of data (w/o headers) on linear part of SKB*/
10842 first_bd_sz = skb_headlen(skb) - hlen;
10843
10844 wnd_sum = first_bd_sz;
10845
10846 /* Calculate the first sum - it's special */
10847 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10848 wnd_sum +=
10849 skb_shinfo(skb)->frags[frag_idx].size;
10850
10851 /* If there was data on linear skb data - check it */
10852 if (first_bd_sz > 0) {
10853 if (unlikely(wnd_sum < lso_mss)) {
10854 to_copy = 1;
10855 goto exit_lbl;
10856 }
10857
10858 wnd_sum -= first_bd_sz;
10859 }
10860
10861 /* Others are easier: run through the frag list and
10862 check all windows */
10863 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10864 wnd_sum +=
10865 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10866
10867 if (unlikely(wnd_sum < lso_mss)) {
10868 to_copy = 1;
10869 break;
10870 }
10871 wnd_sum -=
10872 skb_shinfo(skb)->frags[wnd_idx].size;
10873 }
10874 } else {
10875 /* in non-LSO too fragmented packet should always
10876 be linearized */
10877 to_copy = 1;
10878 }
10879 }
10880
10881exit_lbl:
10882 if (unlikely(to_copy))
10883 DP(NETIF_MSG_TX_QUEUED,
10884 "Linearization IS REQUIRED for %s packet. "
10885 "num_frags %d hlen %d first_bd_sz %d\n",
10886 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10887 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10888
10889 return to_copy;
10890}
10891#endif
10892
10893/* called with netif_tx_lock
10894 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10895 * netif_wake_queue()
10896 */
10897static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10898{
10899 struct bnx2x *bp = netdev_priv(dev);
10900 struct bnx2x_fastpath *fp, *fp_stat;
10901 struct netdev_queue *txq;
10902 struct sw_tx_bd *tx_buf;
10903 struct eth_tx_start_bd *tx_start_bd;
10904 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
10905 struct eth_tx_parse_bd *pbd = NULL;
10906 u16 pkt_prod, bd_prod;
10907 int nbd, fp_index;
10908 dma_addr_t mapping;
10909 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10910 int i;
10911 u8 hlen = 0;
10912 __le16 pkt_size = 0;
10913
10914#ifdef BNX2X_STOP_ON_ERROR
10915 if (unlikely(bp->panic))
10916 return NETDEV_TX_BUSY;
10917#endif
10918
10919 fp_index = skb_get_queue_mapping(skb);
10920 txq = netdev_get_tx_queue(dev, fp_index);
10921
10922 fp = &bp->fp[fp_index + bp->num_rx_queues];
10923 fp_stat = &bp->fp[fp_index];
10924
10925 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10926 fp_stat->eth_q_stats.driver_xoff++;
10927 netif_tx_stop_queue(txq);
10928 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10929 return NETDEV_TX_BUSY;
10930 }
10931
10932 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10933 " gso type %x xmit_type %x\n",
10934 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10935 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10936
10937#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10938 /* First, check if we need to linearize the skb (due to FW
10939 restrictions). No need to check fragmentation if page size > 8K
10940 (there will be no violation to FW restrictions) */
10941 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10942 /* Statistics of linearization */
10943 bp->lin_cnt++;
10944 if (skb_linearize(skb) != 0) {
10945 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10946 "silently dropping this SKB\n");
10947 dev_kfree_skb_any(skb);
10948 return NETDEV_TX_OK;
10949 }
10950 }
10951#endif
10952
10953 /*
10954 Please read carefully. First we use one BD which we mark as start,
10955 then we have a parsing info BD (used for TSO or xsum),
10956 and only then we have the rest of the TSO BDs.
10957 (don't forget to mark the last one as last,
10958 and to unmap only AFTER you write to the BD ...)
10959 And above all, all pdb sizes are in words - NOT DWORDS!
10960 */
10961
10962 pkt_prod = fp->tx_pkt_prod++;
10963 bd_prod = TX_BD(fp->tx_bd_prod);
10964
10965 /* get a tx_buf and first BD */
10966 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10967 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
10968
10969 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10970 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10971 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
10972 /* header nbd */
10973 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
10974
10975 /* remember the first BD of the packet */
10976 tx_buf->first_bd = fp->tx_bd_prod;
10977 tx_buf->skb = skb;
10978 tx_buf->flags = 0;
10979
10980 DP(NETIF_MSG_TX_QUEUED,
10981 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10982 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
10983
10984#ifdef BCM_VLAN
10985 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10986 (bp->flags & HW_VLAN_TX_FLAG)) {
10987 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10988 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10989 } else
10990#endif
10991 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10992
10993 /* turn on parsing and get a BD */
10994 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10995 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
10996
10997 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10998
10999 if (xmit_type & XMIT_CSUM) {
11000 hlen = (skb_network_header(skb) - skb->data) / 2;
11001
11002 /* for now NS flag is not used in Linux */
11003 pbd->global_data =
11004 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11005 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11006
11007 pbd->ip_hlen = (skb_transport_header(skb) -
11008 skb_network_header(skb)) / 2;
11009
11010 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11011
11012 pbd->total_hlen = cpu_to_le16(hlen);
11013 hlen = hlen*2;
11014
11015 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11016
11017 if (xmit_type & XMIT_CSUM_V4)
11018 tx_start_bd->bd_flags.as_bitfield |=
11019 ETH_TX_BD_FLAGS_IP_CSUM;
11020 else
11021 tx_start_bd->bd_flags.as_bitfield |=
11022 ETH_TX_BD_FLAGS_IPV6;
11023
11024 if (xmit_type & XMIT_CSUM_TCP) {
11025 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11026
11027 } else {
11028 s8 fix = SKB_CS_OFF(skb); /* signed! */
11029
11030 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11031
11032 DP(NETIF_MSG_TX_QUEUED,
11033 "hlen %d fix %d csum before fix %x\n",
11034 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11035
11036 /* HW bug: fixup the CSUM */
11037 pbd->tcp_pseudo_csum =
11038 bnx2x_csum_fix(skb_transport_header(skb),
11039 SKB_CS(skb), fix);
11040
11041 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11042 pbd->tcp_pseudo_csum);
11043 }
11044 }
11045
11046 mapping = pci_map_single(bp->pdev, skb->data,
11047 skb_headlen(skb), PCI_DMA_TODEVICE);
11048
11049 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11050 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11051 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11052 tx_start_bd->nbd = cpu_to_le16(nbd);
11053 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11054 pkt_size = tx_start_bd->nbytes;
11055
11056 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
11057 " nbytes %d flags %x vlan %x\n",
11058 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11059 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11060 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11061
11062 if (xmit_type & XMIT_GSO) {
11063
11064 DP(NETIF_MSG_TX_QUEUED,
11065 "TSO packet len %d hlen %d total len %d tso size %d\n",
11066 skb->len, hlen, skb_headlen(skb),
11067 skb_shinfo(skb)->gso_size);
11068
11069 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11070
11071 if (unlikely(skb_headlen(skb) > hlen))
11072 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11073 hlen, bd_prod, ++nbd);
11074
11075 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11076 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11077 pbd->tcp_flags = pbd_tcp_flags(skb);
11078
11079 if (xmit_type & XMIT_GSO_V4) {
11080 pbd->ip_id = swab16(ip_hdr(skb)->id);
11081 pbd->tcp_pseudo_csum =
11082 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11083 ip_hdr(skb)->daddr,
11084 0, IPPROTO_TCP, 0));
11085
11086 } else
11087 pbd->tcp_pseudo_csum =
11088 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11089 &ipv6_hdr(skb)->daddr,
11090 0, IPPROTO_TCP, 0));
11091
11092 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11093 }
11094 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11095
11096 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11097 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11098
11099 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11100 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11101 if (total_pkt_bd == NULL)
11102 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11103
11104 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11105 frag->size, PCI_DMA_TODEVICE);
11106
11107 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11108 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11109 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11110 le16_add_cpu(&pkt_size, frag->size);
11111
11112 DP(NETIF_MSG_TX_QUEUED,
11113 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11114 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11115 le16_to_cpu(tx_data_bd->nbytes));
11116 }
11117
11118 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11119
11120 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11121
11122 /* now send a tx doorbell, counting the next BD
11123 * if the packet contains or ends with it
11124 */
11125 if (TX_BD_POFF(bd_prod) < nbd)
11126 nbd++;
11127
11128 if (total_pkt_bd != NULL)
11129 total_pkt_bd->total_pkt_bytes = pkt_size;
11130
11131 if (pbd)
11132 DP(NETIF_MSG_TX_QUEUED,
11133 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11134 " tcp_flags %x xsum %x seq %u hlen %u\n",
11135 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11136 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11137 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11138
11139 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11140
11141 /*
11142 * Make sure that the BD data is updated before updating the producer
11143 * since FW might read the BD right after the producer is updated.
11144 * This is only applicable for weak-ordered memory model archs such
11145 * as IA-64. The following barrier is also mandatory since FW will
11146 * assumes packets must have BDs.
11147 */
11148 wmb();
11149
11150 fp->tx_db.data.prod += nbd;
11151 barrier();
11152 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11153
11154 mmiowb();
11155
11156 fp->tx_bd_prod += nbd;
11157
11158 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11159 netif_tx_stop_queue(txq);
11160 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11161 if we put Tx into XOFF state. */
11162 smp_mb();
11163 fp_stat->eth_q_stats.driver_xoff++;
11164 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11165 netif_tx_wake_queue(txq);
11166 }
11167 fp_stat->tx_pkt++;
11168
11169 return NETDEV_TX_OK;
11170}
11171
11172/* called with rtnl_lock */
11173static int bnx2x_open(struct net_device *dev)
11174{
11175 struct bnx2x *bp = netdev_priv(dev);
11176
11177 netif_carrier_off(dev);
11178
11179 bnx2x_set_power_state(bp, PCI_D0);
11180
11181 return bnx2x_nic_load(bp, LOAD_OPEN);
11182}
11183
11184/* called with rtnl_lock */
11185static int bnx2x_close(struct net_device *dev)
11186{
11187 struct bnx2x *bp = netdev_priv(dev);
11188
11189 /* Unload the driver, release IRQs */
11190 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11191 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11192 if (!CHIP_REV_IS_SLOW(bp))
11193 bnx2x_set_power_state(bp, PCI_D3hot);
11194
11195 return 0;
11196}
11197
11198/* called with netif_tx_lock from dev_mcast.c */
11199static void bnx2x_set_rx_mode(struct net_device *dev)
11200{
11201 struct bnx2x *bp = netdev_priv(dev);
11202 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11203 int port = BP_PORT(bp);
11204
11205 if (bp->state != BNX2X_STATE_OPEN) {
11206 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11207 return;
11208 }
11209
11210 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11211
11212 if (dev->flags & IFF_PROMISC)
11213 rx_mode = BNX2X_RX_MODE_PROMISC;
11214
11215 else if ((dev->flags & IFF_ALLMULTI) ||
11216 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11217 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11218
11219 else { /* some multicasts */
11220 if (CHIP_IS_E1(bp)) {
11221 int i, old, offset;
11222 struct dev_mc_list *mclist;
11223 struct mac_configuration_cmd *config =
11224 bnx2x_sp(bp, mcast_config);
11225
11226 for (i = 0, mclist = dev->mc_list;
11227 mclist && (i < dev->mc_count);
11228 i++, mclist = mclist->next) {
11229
11230 config->config_table[i].
11231 cam_entry.msb_mac_addr =
11232 swab16(*(u16 *)&mclist->dmi_addr[0]);
11233 config->config_table[i].
11234 cam_entry.middle_mac_addr =
11235 swab16(*(u16 *)&mclist->dmi_addr[2]);
11236 config->config_table[i].
11237 cam_entry.lsb_mac_addr =
11238 swab16(*(u16 *)&mclist->dmi_addr[4]);
11239 config->config_table[i].cam_entry.flags =
11240 cpu_to_le16(port);
11241 config->config_table[i].
11242 target_table_entry.flags = 0;
11243 config->config_table[i].target_table_entry.
11244 clients_bit_vector =
11245 cpu_to_le32(1 << BP_L_ID(bp));
11246 config->config_table[i].
11247 target_table_entry.vlan_id = 0;
11248
11249 DP(NETIF_MSG_IFUP,
11250 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11251 config->config_table[i].
11252 cam_entry.msb_mac_addr,
11253 config->config_table[i].
11254 cam_entry.middle_mac_addr,
11255 config->config_table[i].
11256 cam_entry.lsb_mac_addr);
11257 }
11258 old = config->hdr.length;
11259 if (old > i) {
11260 for (; i < old; i++) {
11261 if (CAM_IS_INVALID(config->
11262 config_table[i])) {
11263 /* already invalidated */
11264 break;
11265 }
11266 /* invalidate */
11267 CAM_INVALIDATE(config->
11268 config_table[i]);
11269 }
11270 }
11271
11272 if (CHIP_REV_IS_SLOW(bp))
11273 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11274 else
11275 offset = BNX2X_MAX_MULTICAST*(1 + port);
11276
11277 config->hdr.length = i;
11278 config->hdr.offset = offset;
11279 config->hdr.client_id = bp->fp->cl_id;
11280 config->hdr.reserved1 = 0;
11281
11282 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11283 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11284 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11285 0);
11286 } else { /* E1H */
11287 /* Accept one or more multicasts */
11288 struct dev_mc_list *mclist;
11289 u32 mc_filter[MC_HASH_SIZE];
11290 u32 crc, bit, regidx;
11291 int i;
11292
11293 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11294
11295 for (i = 0, mclist = dev->mc_list;
11296 mclist && (i < dev->mc_count);
11297 i++, mclist = mclist->next) {
11298
11299 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11300 mclist->dmi_addr);
11301
11302 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11303 bit = (crc >> 24) & 0xff;
11304 regidx = bit >> 5;
11305 bit &= 0x1f;
11306 mc_filter[regidx] |= (1 << bit);
11307 }
11308
11309 for (i = 0; i < MC_HASH_SIZE; i++)
11310 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11311 mc_filter[i]);
11312 }
11313 }
11314
11315 bp->rx_mode = rx_mode;
11316 bnx2x_set_storm_rx_mode(bp);
11317}
11318
11319/* called with rtnl_lock */
11320static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11321{
11322 struct sockaddr *addr = p;
11323 struct bnx2x *bp = netdev_priv(dev);
11324
11325 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11326 return -EINVAL;
11327
11328 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11329 if (netif_running(dev)) {
11330 if (CHIP_IS_E1(bp))
11331 bnx2x_set_mac_addr_e1(bp, 1);
11332 else
11333 bnx2x_set_mac_addr_e1h(bp, 1);
11334 }
11335
11336 return 0;
11337}
11338
11339/* called with rtnl_lock */
11340static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11341 int devad, u16 addr)
11342{
11343 struct bnx2x *bp = netdev_priv(netdev);
11344 u16 value;
11345 int rc;
11346 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11347
11348 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11349 prtad, devad, addr);
11350
11351 if (prtad != bp->mdio.prtad) {
11352 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11353 prtad, bp->mdio.prtad);
11354 return -EINVAL;
11355 }
11356
11357 /* The HW expects different devad if CL22 is used */
11358 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11359
11360 bnx2x_acquire_phy_lock(bp);
11361 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11362 devad, addr, &value);
11363 bnx2x_release_phy_lock(bp);
11364 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11365
11366 if (!rc)
11367 rc = value;
11368 return rc;
11369}
11370
11371/* called with rtnl_lock */
11372static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11373 u16 addr, u16 value)
11374{
11375 struct bnx2x *bp = netdev_priv(netdev);
11376 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11377 int rc;
11378
11379 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11380 " value 0x%x\n", prtad, devad, addr, value);
11381
11382 if (prtad != bp->mdio.prtad) {
11383 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11384 prtad, bp->mdio.prtad);
11385 return -EINVAL;
11386 }
11387
11388 /* The HW expects different devad if CL22 is used */
11389 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11390
11391 bnx2x_acquire_phy_lock(bp);
11392 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11393 devad, addr, value);
11394 bnx2x_release_phy_lock(bp);
11395 return rc;
11396}
11397
11398/* called with rtnl_lock */
11399static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11400{
11401 struct bnx2x *bp = netdev_priv(dev);
11402 struct mii_ioctl_data *mdio = if_mii(ifr);
11403
11404 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11405 mdio->phy_id, mdio->reg_num, mdio->val_in);
11406
11407 if (!netif_running(dev))
11408 return -EAGAIN;
11409
11410 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11411}
11412
11413/* called with rtnl_lock */
11414static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11415{
11416 struct bnx2x *bp = netdev_priv(dev);
11417 int rc = 0;
11418
11419 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11420 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11421 return -EINVAL;
11422
11423 /* This does not race with packet allocation
11424 * because the actual alloc size is
11425 * only updated as part of load
11426 */
11427 dev->mtu = new_mtu;
11428
11429 if (netif_running(dev)) {
11430 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11431 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11432 }
11433
11434 return rc;
11435}
11436
11437static void bnx2x_tx_timeout(struct net_device *dev)
11438{
11439 struct bnx2x *bp = netdev_priv(dev);
11440
11441#ifdef BNX2X_STOP_ON_ERROR
11442 if (!bp->panic)
11443 bnx2x_panic();
11444#endif
11445 /* This allows the netif to be shutdown gracefully before resetting */
11446 schedule_work(&bp->reset_task);
11447}
11448
11449#ifdef BCM_VLAN
11450/* called with rtnl_lock */
11451static void bnx2x_vlan_rx_register(struct net_device *dev,
11452 struct vlan_group *vlgrp)
11453{
11454 struct bnx2x *bp = netdev_priv(dev);
11455
11456 bp->vlgrp = vlgrp;
11457
11458 /* Set flags according to the required capabilities */
11459 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11460
11461 if (dev->features & NETIF_F_HW_VLAN_TX)
11462 bp->flags |= HW_VLAN_TX_FLAG;
11463
11464 if (dev->features & NETIF_F_HW_VLAN_RX)
11465 bp->flags |= HW_VLAN_RX_FLAG;
11466
11467 if (netif_running(dev))
11468 bnx2x_set_client_config(bp);
11469}
11470
11471#endif
11472
11473#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11474static void poll_bnx2x(struct net_device *dev)
11475{
11476 struct bnx2x *bp = netdev_priv(dev);
11477
11478 disable_irq(bp->pdev->irq);
11479 bnx2x_interrupt(bp->pdev->irq, dev);
11480 enable_irq(bp->pdev->irq);
11481}
11482#endif
11483
11484static const struct net_device_ops bnx2x_netdev_ops = {
11485 .ndo_open = bnx2x_open,
11486 .ndo_stop = bnx2x_close,
11487 .ndo_start_xmit = bnx2x_start_xmit,
11488 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11489 .ndo_set_mac_address = bnx2x_change_mac_addr,
11490 .ndo_validate_addr = eth_validate_addr,
11491 .ndo_do_ioctl = bnx2x_ioctl,
11492 .ndo_change_mtu = bnx2x_change_mtu,
11493 .ndo_tx_timeout = bnx2x_tx_timeout,
11494#ifdef BCM_VLAN
11495 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11496#endif
11497#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11498 .ndo_poll_controller = poll_bnx2x,
11499#endif
11500};
11501
11502static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11503 struct net_device *dev)
11504{
11505 struct bnx2x *bp;
11506 int rc;
11507
11508 SET_NETDEV_DEV(dev, &pdev->dev);
11509 bp = netdev_priv(dev);
11510
11511 bp->dev = dev;
11512 bp->pdev = pdev;
11513 bp->flags = 0;
11514 bp->func = PCI_FUNC(pdev->devfn);
11515
11516 rc = pci_enable_device(pdev);
11517 if (rc) {
11518 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11519 goto err_out;
11520 }
11521
11522 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11523 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11524 " aborting\n");
11525 rc = -ENODEV;
11526 goto err_out_disable;
11527 }
11528
11529 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11530 printk(KERN_ERR PFX "Cannot find second PCI device"
11531 " base address, aborting\n");
11532 rc = -ENODEV;
11533 goto err_out_disable;
11534 }
11535
11536 if (atomic_read(&pdev->enable_cnt) == 1) {
11537 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11538 if (rc) {
11539 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11540 " aborting\n");
11541 goto err_out_disable;
11542 }
11543
11544 pci_set_master(pdev);
11545 pci_save_state(pdev);
11546 }
11547
11548 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11549 if (bp->pm_cap == 0) {
11550 printk(KERN_ERR PFX "Cannot find power management"
11551 " capability, aborting\n");
11552 rc = -EIO;
11553 goto err_out_release;
11554 }
11555
11556 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11557 if (bp->pcie_cap == 0) {
11558 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11559 " aborting\n");
11560 rc = -EIO;
11561 goto err_out_release;
11562 }
11563
11564 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11565 bp->flags |= USING_DAC_FLAG;
11566 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11567 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11568 " failed, aborting\n");
11569 rc = -EIO;
11570 goto err_out_release;
11571 }
11572
11573 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11574 printk(KERN_ERR PFX "System does not support DMA,"
11575 " aborting\n");
11576 rc = -EIO;
11577 goto err_out_release;
11578 }
11579
11580 dev->mem_start = pci_resource_start(pdev, 0);
11581 dev->base_addr = dev->mem_start;
11582 dev->mem_end = pci_resource_end(pdev, 0);
11583
11584 dev->irq = pdev->irq;
11585
11586 bp->regview = pci_ioremap_bar(pdev, 0);
11587 if (!bp->regview) {
11588 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11589 rc = -ENOMEM;
11590 goto err_out_release;
11591 }
11592
11593 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11594 min_t(u64, BNX2X_DB_SIZE,
11595 pci_resource_len(pdev, 2)));
11596 if (!bp->doorbells) {
11597 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11598 rc = -ENOMEM;
11599 goto err_out_unmap;
11600 }
11601
11602 bnx2x_set_power_state(bp, PCI_D0);
11603
11604 /* clean indirect addresses */
11605 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11606 PCICFG_VENDOR_ID_OFFSET);
11607 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11608 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11609 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11610 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11611
11612 dev->watchdog_timeo = TX_TIMEOUT;
11613
11614 dev->netdev_ops = &bnx2x_netdev_ops;
11615 dev->ethtool_ops = &bnx2x_ethtool_ops;
11616 dev->features |= NETIF_F_SG;
11617 dev->features |= NETIF_F_HW_CSUM;
11618 if (bp->flags & USING_DAC_FLAG)
11619 dev->features |= NETIF_F_HIGHDMA;
11620 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11621 dev->features |= NETIF_F_TSO6;
11622#ifdef BCM_VLAN
11623 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11624 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11625
11626 dev->vlan_features |= NETIF_F_SG;
11627 dev->vlan_features |= NETIF_F_HW_CSUM;
11628 if (bp->flags & USING_DAC_FLAG)
11629 dev->vlan_features |= NETIF_F_HIGHDMA;
11630 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11631 dev->vlan_features |= NETIF_F_TSO6;
11632#endif
11633
11634 /* get_port_hwinfo() will set prtad and mmds properly */
11635 bp->mdio.prtad = MDIO_PRTAD_NONE;
11636 bp->mdio.mmds = 0;
11637 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11638 bp->mdio.dev = dev;
11639 bp->mdio.mdio_read = bnx2x_mdio_read;
11640 bp->mdio.mdio_write = bnx2x_mdio_write;
11641
11642 return 0;
11643
11644err_out_unmap:
11645 if (bp->regview) {
11646 iounmap(bp->regview);
11647 bp->regview = NULL;
11648 }
11649 if (bp->doorbells) {
11650 iounmap(bp->doorbells);
11651 bp->doorbells = NULL;
11652 }
11653
11654err_out_release:
11655 if (atomic_read(&pdev->enable_cnt) == 1)
11656 pci_release_regions(pdev);
11657
11658err_out_disable:
11659 pci_disable_device(pdev);
11660 pci_set_drvdata(pdev, NULL);
11661
11662err_out:
11663 return rc;
11664}
11665
11666static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11667{
11668 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11669
11670 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11671 return val;
11672}
11673
11674/* return value of 1=2.5GHz 2=5GHz */
11675static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11676{
11677 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11678
11679 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11680 return val;
11681}
11682static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11683{
11684 struct bnx2x_fw_file_hdr *fw_hdr;
11685 struct bnx2x_fw_file_section *sections;
11686 u16 *ops_offsets;
11687 u32 offset, len, num_ops;
11688 int i;
11689 const struct firmware *firmware = bp->firmware;
11690 const u8 * fw_ver;
11691
11692 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11693 return -EINVAL;
11694
11695 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11696 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11697
11698 /* Make sure none of the offsets and sizes make us read beyond
11699 * the end of the firmware data */
11700 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11701 offset = be32_to_cpu(sections[i].offset);
11702 len = be32_to_cpu(sections[i].len);
11703 if (offset + len > firmware->size) {
11704 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11705 return -EINVAL;
11706 }
11707 }
11708
11709 /* Likewise for the init_ops offsets */
11710 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11711 ops_offsets = (u16 *)(firmware->data + offset);
11712 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11713
11714 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11715 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11716 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11717 return -EINVAL;
11718 }
11719 }
11720
11721 /* Check FW version */
11722 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11723 fw_ver = firmware->data + offset;
11724 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11725 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11726 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11727 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11728 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11729 " Should be %d.%d.%d.%d\n",
11730 fw_ver[0], fw_ver[1], fw_ver[2],
11731 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11732 BCM_5710_FW_MINOR_VERSION,
11733 BCM_5710_FW_REVISION_VERSION,
11734 BCM_5710_FW_ENGINEERING_VERSION);
11735 return -EINVAL;
11736 }
11737
11738 return 0;
11739}
11740
11741static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11742{
11743 u32 i;
11744 const __be32 *source = (const __be32*)_source;
11745 u32 *target = (u32*)_target;
11746
11747 for (i = 0; i < n/4; i++)
11748 target[i] = be32_to_cpu(source[i]);
11749}
11750
11751/*
11752 Ops array is stored in the following format:
11753 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11754 */
11755static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11756{
11757 u32 i, j, tmp;
11758 const __be32 *source = (const __be32*)_source;
11759 struct raw_op *target = (struct raw_op*)_target;
11760
11761 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11762 tmp = be32_to_cpu(source[j]);
11763 target[i].op = (tmp >> 24) & 0xff;
11764 target[i].offset = tmp & 0xffffff;
11765 target[i].raw_data = be32_to_cpu(source[j+1]);
11766 }
11767}
11768static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11769{
11770 u32 i;
11771 u16 *target = (u16*)_target;
11772 const __be16 *source = (const __be16*)_source;
11773
11774 for (i = 0; i < n/2; i++)
11775 target[i] = be16_to_cpu(source[i]);
11776}
11777
11778#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11779 do { \
11780 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11781 bp->arr = kmalloc(len, GFP_KERNEL); \
11782 if (!bp->arr) { \
11783 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11784 goto lbl; \
11785 } \
11786 func(bp->firmware->data + \
11787 be32_to_cpu(fw_hdr->arr.offset), \
11788 (u8*)bp->arr, len); \
11789 } while (0)
11790
11791
11792static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11793{
11794 char fw_file_name[40] = {0};
11795 int rc, offset;
11796 struct bnx2x_fw_file_hdr *fw_hdr;
11797
11798 /* Create a FW file name */
11799 if (CHIP_IS_E1(bp))
11800 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11801 else
11802 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11803
11804 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11805 BCM_5710_FW_MAJOR_VERSION,
11806 BCM_5710_FW_MINOR_VERSION,
11807 BCM_5710_FW_REVISION_VERSION,
11808 BCM_5710_FW_ENGINEERING_VERSION);
11809
11810 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11811
11812 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11813 if (rc) {
11814 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11815 goto request_firmware_exit;
11816 }
11817
11818 rc = bnx2x_check_firmware(bp);
11819 if (rc) {
11820 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11821 goto request_firmware_exit;
11822 }
11823
11824 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11825
11826 /* Initialize the pointers to the init arrays */
11827 /* Blob */
11828 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11829
11830 /* Opcodes */
11831 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11832
11833 /* Offsets */
11834 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11835
11836 /* STORMs firmware */
11837 bp->tsem_int_table_data = bp->firmware->data +
11838 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11839 bp->tsem_pram_data = bp->firmware->data +
11840 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11841 bp->usem_int_table_data = bp->firmware->data +
11842 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11843 bp->usem_pram_data = bp->firmware->data +
11844 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11845 bp->xsem_int_table_data = bp->firmware->data +
11846 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11847 bp->xsem_pram_data = bp->firmware->data +
11848 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11849 bp->csem_int_table_data = bp->firmware->data +
11850 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11851 bp->csem_pram_data = bp->firmware->data +
11852 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11853
11854 return 0;
11855init_offsets_alloc_err:
11856 kfree(bp->init_ops);
11857init_ops_alloc_err:
11858 kfree(bp->init_data);
11859request_firmware_exit:
11860 release_firmware(bp->firmware);
11861
11862 return rc;
11863}
11864
11865
11866
11867static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11868 const struct pci_device_id *ent)
11869{
11870 static int version_printed;
11871 struct net_device *dev = NULL;
11872 struct bnx2x *bp;
11873 int rc;
11874
11875 if (version_printed++ == 0)
11876 printk(KERN_INFO "%s", version);
11877
11878 /* dev zeroed in init_etherdev */
11879 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11880 if (!dev) {
11881 printk(KERN_ERR PFX "Cannot allocate net device\n");
11882 return -ENOMEM;
11883 }
11884
11885 bp = netdev_priv(dev);
11886 bp->msglevel = debug;
11887
11888 rc = bnx2x_init_dev(pdev, dev);
11889 if (rc < 0) {
11890 free_netdev(dev);
11891 return rc;
11892 }
11893
11894 pci_set_drvdata(pdev, dev);
11895
11896 rc = bnx2x_init_bp(bp);
11897 if (rc)
11898 goto init_one_exit;
11899
11900 /* Set init arrays */
11901 rc = bnx2x_init_firmware(bp, &pdev->dev);
11902 if (rc) {
11903 printk(KERN_ERR PFX "Error loading firmware\n");
11904 goto init_one_exit;
11905 }
11906
11907 rc = register_netdev(dev);
11908 if (rc) {
11909 dev_err(&pdev->dev, "Cannot register net device\n");
11910 goto init_one_exit;
11911 }
11912
11913 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11914 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11915 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11916 bnx2x_get_pcie_width(bp),
11917 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11918 dev->base_addr, bp->pdev->irq);
11919 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11920
11921 return 0;
11922
11923init_one_exit:
11924 if (bp->regview)
11925 iounmap(bp->regview);
11926
11927 if (bp->doorbells)
11928 iounmap(bp->doorbells);
11929
11930 free_netdev(dev);
11931
11932 if (atomic_read(&pdev->enable_cnt) == 1)
11933 pci_release_regions(pdev);
11934
11935 pci_disable_device(pdev);
11936 pci_set_drvdata(pdev, NULL);
11937
11938 return rc;
11939}
11940
11941static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11942{
11943 struct net_device *dev = pci_get_drvdata(pdev);
11944 struct bnx2x *bp;
11945
11946 if (!dev) {
11947 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11948 return;
11949 }
11950 bp = netdev_priv(dev);
11951
11952 unregister_netdev(dev);
11953
11954 kfree(bp->init_ops_offsets);
11955 kfree(bp->init_ops);
11956 kfree(bp->init_data);
11957 release_firmware(bp->firmware);
11958
11959 if (bp->regview)
11960 iounmap(bp->regview);
11961
11962 if (bp->doorbells)
11963 iounmap(bp->doorbells);
11964
11965 free_netdev(dev);
11966
11967 if (atomic_read(&pdev->enable_cnt) == 1)
11968 pci_release_regions(pdev);
11969
11970 pci_disable_device(pdev);
11971 pci_set_drvdata(pdev, NULL);
11972}
11973
11974static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11975{
11976 struct net_device *dev = pci_get_drvdata(pdev);
11977 struct bnx2x *bp;
11978
11979 if (!dev) {
11980 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11981 return -ENODEV;
11982 }
11983 bp = netdev_priv(dev);
11984
11985 rtnl_lock();
11986
11987 pci_save_state(pdev);
11988
11989 if (!netif_running(dev)) {
11990 rtnl_unlock();
11991 return 0;
11992 }
11993
11994 netif_device_detach(dev);
11995
11996 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11997
11998 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11999
12000 rtnl_unlock();
12001
12002 return 0;
12003}
12004
12005static int bnx2x_resume(struct pci_dev *pdev)
12006{
12007 struct net_device *dev = pci_get_drvdata(pdev);
12008 struct bnx2x *bp;
12009 int rc;
12010
12011 if (!dev) {
12012 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12013 return -ENODEV;
12014 }
12015 bp = netdev_priv(dev);
12016
12017 rtnl_lock();
12018
12019 pci_restore_state(pdev);
12020
12021 if (!netif_running(dev)) {
12022 rtnl_unlock();
12023 return 0;
12024 }
12025
12026 bnx2x_set_power_state(bp, PCI_D0);
12027 netif_device_attach(dev);
12028
12029 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12030
12031 rtnl_unlock();
12032
12033 return rc;
12034}
12035
12036static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12037{
12038 int i;
12039
12040 bp->state = BNX2X_STATE_ERROR;
12041
12042 bp->rx_mode = BNX2X_RX_MODE_NONE;
12043
12044 bnx2x_netif_stop(bp, 0);
12045
12046 del_timer_sync(&bp->timer);
12047 bp->stats_state = STATS_STATE_DISABLED;
12048 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12049
12050 /* Release IRQs */
12051 bnx2x_free_irq(bp);
12052
12053 if (CHIP_IS_E1(bp)) {
12054 struct mac_configuration_cmd *config =
12055 bnx2x_sp(bp, mcast_config);
12056
12057 for (i = 0; i < config->hdr.length; i++)
12058 CAM_INVALIDATE(config->config_table[i]);
12059 }
12060
12061 /* Free SKBs, SGEs, TPA pool and driver internals */
12062 bnx2x_free_skbs(bp);
12063 for_each_rx_queue(bp, i)
12064 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12065 for_each_rx_queue(bp, i)
12066 netif_napi_del(&bnx2x_fp(bp, i, napi));
12067 bnx2x_free_mem(bp);
12068
12069 bp->state = BNX2X_STATE_CLOSED;
12070
12071 netif_carrier_off(bp->dev);
12072
12073 return 0;
12074}
12075
12076static void bnx2x_eeh_recover(struct bnx2x *bp)
12077{
12078 u32 val;
12079
12080 mutex_init(&bp->port.phy_mutex);
12081
12082 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12083 bp->link_params.shmem_base = bp->common.shmem_base;
12084 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12085
12086 if (!bp->common.shmem_base ||
12087 (bp->common.shmem_base < 0xA0000) ||
12088 (bp->common.shmem_base >= 0xC0000)) {
12089 BNX2X_DEV_INFO("MCP not active\n");
12090 bp->flags |= NO_MCP_FLAG;
12091 return;
12092 }
12093
12094 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12095 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12096 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12097 BNX2X_ERR("BAD MCP validity signature\n");
12098
12099 if (!BP_NOMCP(bp)) {
12100 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12101 & DRV_MSG_SEQ_NUMBER_MASK);
12102 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12103 }
12104}
12105
12106/**
12107 * bnx2x_io_error_detected - called when PCI error is detected
12108 * @pdev: Pointer to PCI device
12109 * @state: The current pci connection state
12110 *
12111 * This function is called after a PCI bus error affecting
12112 * this device has been detected.
12113 */
12114static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12115 pci_channel_state_t state)
12116{
12117 struct net_device *dev = pci_get_drvdata(pdev);
12118 struct bnx2x *bp = netdev_priv(dev);
12119
12120 rtnl_lock();
12121
12122 netif_device_detach(dev);
12123
12124 if (state == pci_channel_io_perm_failure) {
12125 rtnl_unlock();
12126 return PCI_ERS_RESULT_DISCONNECT;
12127 }
12128
12129 if (netif_running(dev))
12130 bnx2x_eeh_nic_unload(bp);
12131
12132 pci_disable_device(pdev);
12133
12134 rtnl_unlock();
12135
12136 /* Request a slot reset */
12137 return PCI_ERS_RESULT_NEED_RESET;
12138}
12139
12140/**
12141 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12142 * @pdev: Pointer to PCI device
12143 *
12144 * Restart the card from scratch, as if from a cold-boot.
12145 */
12146static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12147{
12148 struct net_device *dev = pci_get_drvdata(pdev);
12149 struct bnx2x *bp = netdev_priv(dev);
12150
12151 rtnl_lock();
12152
12153 if (pci_enable_device(pdev)) {
12154 dev_err(&pdev->dev,
12155 "Cannot re-enable PCI device after reset\n");
12156 rtnl_unlock();
12157 return PCI_ERS_RESULT_DISCONNECT;
12158 }
12159
12160 pci_set_master(pdev);
12161 pci_restore_state(pdev);
12162
12163 if (netif_running(dev))
12164 bnx2x_set_power_state(bp, PCI_D0);
12165
12166 rtnl_unlock();
12167
12168 return PCI_ERS_RESULT_RECOVERED;
12169}
12170
12171/**
12172 * bnx2x_io_resume - called when traffic can start flowing again
12173 * @pdev: Pointer to PCI device
12174 *
12175 * This callback is called when the error recovery driver tells us that
12176 * its OK to resume normal operation.
12177 */
12178static void bnx2x_io_resume(struct pci_dev *pdev)
12179{
12180 struct net_device *dev = pci_get_drvdata(pdev);
12181 struct bnx2x *bp = netdev_priv(dev);
12182
12183 rtnl_lock();
12184
12185 bnx2x_eeh_recover(bp);
12186
12187 if (netif_running(dev))
12188 bnx2x_nic_load(bp, LOAD_NORMAL);
12189
12190 netif_device_attach(dev);
12191
12192 rtnl_unlock();
12193}
12194
12195static struct pci_error_handlers bnx2x_err_handler = {
12196 .error_detected = bnx2x_io_error_detected,
12197 .slot_reset = bnx2x_io_slot_reset,
12198 .resume = bnx2x_io_resume,
12199};
12200
12201static struct pci_driver bnx2x_pci_driver = {
12202 .name = DRV_MODULE_NAME,
12203 .id_table = bnx2x_pci_tbl,
12204 .probe = bnx2x_init_one,
12205 .remove = __devexit_p(bnx2x_remove_one),
12206 .suspend = bnx2x_suspend,
12207 .resume = bnx2x_resume,
12208 .err_handler = &bnx2x_err_handler,
12209};
12210
12211static int __init bnx2x_init(void)
12212{
12213 int ret;
12214
12215 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12216 if (bnx2x_wq == NULL) {
12217 printk(KERN_ERR PFX "Cannot create workqueue\n");
12218 return -ENOMEM;
12219 }
12220
12221 ret = pci_register_driver(&bnx2x_pci_driver);
12222 if (ret) {
12223 printk(KERN_ERR PFX "Cannot register driver\n");
12224 destroy_workqueue(bnx2x_wq);
12225 }
12226 return ret;
12227}
12228
12229static void __exit bnx2x_cleanup(void)
12230{
12231 pci_unregister_driver(&bnx2x_pci_driver);
12232
12233 destroy_workqueue(bnx2x_wq);
12234}
12235
12236module_init(bnx2x_init);
12237module_exit(bnx2x_cleanup);
12238
12239