]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x.h
bnx2x: remove old FW files
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x.h
CommitLineData
a2fbb9ea
ET
1/* bnx2x.h: Broadcom Everest network driver.
2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 */
13
14#ifndef BNX2X_H
15#define BNX2X_H
16
34f80b04
EG
17/* compilation time flags */
18
19/* define this to make the driver freeze on error to allow getting debug info
20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */
22
25141580
DK
23#define DRV_MODULE_VERSION "1.52.53-7"
24#define DRV_MODULE_RELDATE "2010/09/12"
de0c62db
DK
25#define BNX2X_BC_VER 0x040200
26
0c6671b0
EG
27#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
28#define BCM_VLAN 1
29#endif
30
1ac218c8
VZ
31#define BNX2X_MULTI_QUEUE
32
33#define BNX2X_NEW_NAPI
34
35
993ac7b5
MC
36#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
37#define BCM_CNIC 1
5d1e859c 38#include "../cnic_if.h"
993ac7b5 39#endif
0c6671b0 40
1ac218c8
VZ
41#ifdef BCM_CNIC
42#define BNX2X_MIN_MSIX_VEC_CNT 3
43#define BNX2X_MSIX_VEC_FP_START 2
44#else
45#define BNX2X_MIN_MSIX_VEC_CNT 2
46#define BNX2X_MSIX_VEC_FP_START 1
47#endif
01cd4528
EG
48
49#include <linux/mdio.h>
9f6c9258 50#include <linux/pci.h>
359d8b15
EG
51#include "bnx2x_reg.h"
52#include "bnx2x_fw_defs.h"
53#include "bnx2x_hsi.h"
54#include "bnx2x_link.h"
6c719d00 55#include "bnx2x_stats.h"
359d8b15 56
a2fbb9ea
ET
57/* error/debug prints */
58
34f80b04 59#define DRV_MODULE_NAME "bnx2x"
a2fbb9ea
ET
60
61/* for messages that are currently off */
34f80b04
EG
62#define BNX2X_MSG_OFF 0
63#define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */
64#define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */
65#define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */
66#define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */
f1410647
ET
67#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */
68#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */
a2fbb9ea 69
34f80b04 70#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */
a2fbb9ea
ET
71
72/* regular debug print */
7995c64e
JP
73#define DP(__mask, __fmt, __args...) \
74do { \
75 if (bp->msg_enable & (__mask)) \
76 printk(DP_LEVEL "[%s:%d(%s)]" __fmt, \
77 __func__, __LINE__, \
78 bp->dev ? (bp->dev->name) : "?", \
79 ##__args); \
80} while (0)
a2fbb9ea 81
34f80b04 82/* errors debug print */
7995c64e
JP
83#define BNX2X_DBG_ERR(__fmt, __args...) \
84do { \
85 if (netif_msg_probe(bp)) \
86 pr_err("[%s:%d(%s)]" __fmt, \
87 __func__, __LINE__, \
88 bp->dev ? (bp->dev->name) : "?", \
89 ##__args); \
90} while (0)
a2fbb9ea 91
34f80b04 92/* for errors (never masked) */
7995c64e
JP
93#define BNX2X_ERR(__fmt, __args...) \
94do { \
95 pr_err("[%s:%d(%s)]" __fmt, \
96 __func__, __LINE__, \
97 bp->dev ? (bp->dev->name) : "?", \
98 ##__args); \
cdaa7cb8
VZ
99 } while (0)
100
101#define BNX2X_ERROR(__fmt, __args...) do { \
102 pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \
103 } while (0)
104
f1410647 105
a2fbb9ea 106/* before we have a dev->name use dev_info() */
7995c64e
JP
107#define BNX2X_DEV_INFO(__fmt, __args...) \
108do { \
109 if (netif_msg_probe(bp)) \
110 dev_info(&bp->pdev->dev, __fmt, ##__args); \
111} while (0)
a2fbb9ea 112
6c719d00 113void bnx2x_panic_dump(struct bnx2x *bp);
a2fbb9ea
ET
114
115#ifdef BNX2X_STOP_ON_ERROR
116#define bnx2x_panic() do { \
117 bp->panic = 1; \
118 BNX2X_ERR("driver assert\n"); \
34f80b04 119 bnx2x_int_disable(bp); \
a2fbb9ea
ET
120 bnx2x_panic_dump(bp); \
121 } while (0)
122#else
123#define bnx2x_panic() do { \
e3553b29 124 bp->panic = 1; \
a2fbb9ea
ET
125 BNX2X_ERR("driver assert\n"); \
126 bnx2x_panic_dump(bp); \
127 } while (0)
128#endif
129
523224a3 130#define bnx2x_mc_addr(ha) ((ha)->addr)
a2fbb9ea 131
34f80b04
EG
132#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
133#define U64_HI(x) (u32)(((u64)(x)) >> 32)
134#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
a2fbb9ea 135
a2fbb9ea 136
523224a3 137#define REG_ADDR(bp, offset) ((bp->regview) + (offset))
a2fbb9ea 138
34f80b04
EG
139#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
140#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
523224a3 141#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset))
34f80b04
EG
142
143#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
a2fbb9ea 144#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
34f80b04 145#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset))
a2fbb9ea 146
34f80b04
EG
147#define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset)
148#define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val)
a2fbb9ea 149
c18487ee
YR
150#define REG_RD_DMAE(bp, offset, valp, len32) \
151 do { \
152 bnx2x_read_dmae(bp, offset, len32);\
573f2035 153 memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \
c18487ee
YR
154 } while (0)
155
34f80b04 156#define REG_WR_DMAE(bp, offset, valp, len32) \
a2fbb9ea 157 do { \
573f2035 158 memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \
a2fbb9ea
ET
159 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
160 offset, len32); \
161 } while (0)
162
523224a3
DK
163#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
164 REG_WR_DMAE(bp, offset, valp, len32)
165
3359fced 166#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
573f2035
EG
167 do { \
168 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
169 bnx2x_write_big_buf_wb(bp, addr, len32); \
170 } while (0)
171
34f80b04
EG
172#define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \
173 offsetof(struct shmem_region, field))
174#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
175#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
a2fbb9ea 176
2691d51d
EG
177#define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \
178 offsetof(struct shmem2_region, field))
179#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
180#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
523224a3
DK
181#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
182 offsetof(struct mf_cfg, field))
2691d51d 183
523224a3
DK
184#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
185#define MF_CFG_WR(bp, field, val) REG_WR(bp,\
186 MF_CFG_ADDR(bp, field), (val))
72fd0718 187
345b5d52 188#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
3196a88a 189#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
a2fbb9ea 190
523224a3
DK
191/* SP SB indices */
192
193/* General SP events - stats query, cfc delete, etc */
194#define HC_SP_INDEX_ETH_DEF_CONS 3
195
196/* EQ completions */
197#define HC_SP_INDEX_EQ_CONS 7
198
199/* iSCSI L2 */
200#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
201#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
202
203/**
204 * CIDs and CLIDs:
205 * CLIDs below is a CLID for func 0, then the CLID for other
206 * functions will be calculated by the formula:
207 *
208 * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
209 *
210 */
211/* iSCSI L2 */
212#define BNX2X_ISCSI_ETH_CL_ID 17
213#define BNX2X_ISCSI_ETH_CID 17
214
215/** Additional rings budgeting */
216#ifdef BCM_CNIC
217#define CNIC_CONTEXT_USE 1
218#else
219#define CNIC_CONTEXT_USE 0
220#endif /* BCM_CNIC */
221
72fd0718
VZ
222#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
223 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
224
523224a3
DK
225#define SM_RX_ID 0
226#define SM_TX_ID 1
a2fbb9ea 227
7a9b2557 228/* fast path */
a2fbb9ea 229
a2fbb9ea 230struct sw_rx_bd {
34f80b04 231 struct sk_buff *skb;
1a983142 232 DEFINE_DMA_UNMAP_ADDR(mapping);
a2fbb9ea
ET
233};
234
235struct sw_tx_bd {
34f80b04
EG
236 struct sk_buff *skb;
237 u16 first_bd;
ca00392c
EG
238 u8 flags;
239/* Set on the first BD descriptor when there is a split BD */
240#define BNX2X_TSO_SPLIT_BD (1<<0)
a2fbb9ea
ET
241};
242
7a9b2557
VZ
243struct sw_rx_page {
244 struct page *page;
1a983142 245 DEFINE_DMA_UNMAP_ADDR(mapping);
7a9b2557
VZ
246};
247
ca00392c
EG
248union db_prod {
249 struct doorbell_set_prod data;
250 u32 raw;
251};
252
7a9b2557
VZ
253
254/* MC hsi */
255#define BCM_PAGE_SHIFT 12
256#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT)
257#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1))
258#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
259
260#define PAGES_PER_SGE_SHIFT 0
261#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
4f40f2cb
EG
262#define SGE_PAGE_SIZE PAGE_SIZE
263#define SGE_PAGE_SHIFT PAGE_SHIFT
5b6402d1 264#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
7a9b2557
VZ
265
266/* SGE ring related macros */
267#define NUM_RX_SGE_PAGES 2
268#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
269#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
33471629 270/* RX_SGE_CNT is promised to be a power of 2 */
7a9b2557
VZ
271#define RX_SGE_MASK (RX_SGE_CNT - 1)
272#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
273#define MAX_RX_SGE (NUM_RX_SGE - 1)
274#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
275 (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
276#define RX_SGE(x) ((x) & MAX_RX_SGE)
277
278/* SGE producer mask related macros */
279/* Number of bits in one sge_mask array element */
280#define RX_SGE_MASK_ELEM_SZ 64
281#define RX_SGE_MASK_ELEM_SHIFT 6
282#define RX_SGE_MASK_ELEM_MASK ((u64)RX_SGE_MASK_ELEM_SZ - 1)
283
284/* Creates a bitmask of all ones in less significant bits.
285 idx - index of the most significant bit in the created mask */
286#define RX_SGE_ONES_MASK(idx) \
287 (((u64)0x1 << (((idx) & RX_SGE_MASK_ELEM_MASK) + 1)) - 1)
288#define RX_SGE_MASK_ELEM_ONE_MASK ((u64)(~0))
289
290/* Number of u64 elements in SGE mask array */
291#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
292 RX_SGE_MASK_ELEM_SZ)
293#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
294#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
295
523224a3
DK
296union host_hc_status_block {
297 /* pointer to fp status block e1x */
298 struct host_hc_status_block_e1x *e1x_sb;
299};
7a9b2557 300
a2fbb9ea
ET
301struct bnx2x_fastpath {
302
34f80b04 303 struct napi_struct napi;
523224a3
DK
304 union host_hc_status_block status_blk;
305 /* chip independed shortcuts into sb structure */
306 __le16 *sb_index_values;
307 __le16 *sb_running_index;
308 /* chip independed shortcut into rx_prods_offset memory */
309 u32 ustorm_rx_prods_offset;
310
34f80b04 311 dma_addr_t status_blk_mapping;
a2fbb9ea 312
34f80b04 313 struct sw_tx_bd *tx_buf_ring;
a2fbb9ea 314
ca00392c 315 union eth_tx_bd_types *tx_desc_ring;
34f80b04 316 dma_addr_t tx_desc_mapping;
a2fbb9ea 317
7a9b2557
VZ
318 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
319 struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
a2fbb9ea
ET
320
321 struct eth_rx_bd *rx_desc_ring;
34f80b04 322 dma_addr_t rx_desc_mapping;
a2fbb9ea
ET
323
324 union eth_rx_cqe *rx_comp_ring;
34f80b04
EG
325 dma_addr_t rx_comp_mapping;
326
7a9b2557
VZ
327 /* SGE ring */
328 struct eth_rx_sge *rx_sge_ring;
329 dma_addr_t rx_sge_mapping;
330
331 u64 sge_mask[RX_SGE_MASK_LEN];
332
34f80b04
EG
333 int state;
334#define BNX2X_FP_STATE_CLOSED 0
335#define BNX2X_FP_STATE_IRQ 0x80000
336#define BNX2X_FP_STATE_OPENING 0x90000
337#define BNX2X_FP_STATE_OPEN 0xa0000
338#define BNX2X_FP_STATE_HALTING 0xb0000
339#define BNX2X_FP_STATE_HALTED 0xc0000
523224a3
DK
340#define BNX2X_FP_STATE_TERMINATING 0xd0000
341#define BNX2X_FP_STATE_TERMINATED 0xe0000
34f80b04
EG
342
343 u8 index; /* number in fp array */
344 u8 cl_id; /* eth client id */
523224a3
DK
345 u8 cl_qzone_id;
346 u8 fw_sb_id; /* status block number in FW */
347 u8 igu_sb_id; /* status block number in HW */
348 u32 cid;
34f80b04 349
ca00392c
EG
350 union db_prod tx_db;
351
34f80b04
EG
352 u16 tx_pkt_prod;
353 u16 tx_pkt_cons;
354 u16 tx_bd_prod;
355 u16 tx_bd_cons;
4781bfad 356 __le16 *tx_cons_sb;
34f80b04 357
523224a3 358 __le16 fp_hc_idx;
34f80b04
EG
359
360 u16 rx_bd_prod;
361 u16 rx_bd_cons;
362 u16 rx_comp_prod;
363 u16 rx_comp_cons;
7a9b2557
VZ
364 u16 rx_sge_prod;
365 /* The last maximal completed SGE */
366 u16 last_max_sge;
4781bfad 367 __le16 *rx_cons_sb;
523224a3 368
34f80b04 369
ab6ad5a4 370
34f80b04 371 unsigned long tx_pkt,
a2fbb9ea 372 rx_pkt,
66e855f3 373 rx_calls;
ab6ad5a4 374
7a9b2557
VZ
375 /* TPA related */
376 struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
377 u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
378#define BNX2X_TPA_START 1
379#define BNX2X_TPA_STOP 2
380 u8 disable_tpa;
381#ifdef BNX2X_STOP_ON_ERROR
382 u64 tpa_queue_used;
383#endif
a2fbb9ea 384
de832a55
EG
385 struct tstorm_per_client_stats old_tclient;
386 struct ustorm_per_client_stats old_uclient;
387 struct xstorm_per_client_stats old_xclient;
388 struct bnx2x_eth_q_stats eth_q_stats;
389
ca00392c
EG
390 /* The size is calculated using the following:
391 sizeof name field from netdev structure +
392 4 ('-Xx-' string) +
393 4 (for the digits and to make it DWORD aligned) */
394#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
395 char name[FP_NAME_SIZE];
34f80b04 396 struct bnx2x *bp; /* parent */
a2fbb9ea
ET
397};
398
34f80b04 399#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
7a9b2557
VZ
400
401
402/* MC hsi */
403#define MAX_FETCH_BD 13 /* HW max BDs per packet */
404#define RX_COPY_THRESH 92
405
406#define NUM_TX_RINGS 16
ca00392c 407#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
7a9b2557
VZ
408#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
409#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
410#define MAX_TX_BD (NUM_TX_BD - 1)
411#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
523224a3
DK
412#define INIT_JUMBO_TX_RING_SIZE MAX_TX_AVAIL
413#define INIT_TX_RING_SIZE MAX_TX_AVAIL
7a9b2557
VZ
414#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
415 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
416#define TX_BD(x) ((x) & MAX_TX_BD)
417#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
418
419/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
420#define NUM_RX_RINGS 8
421#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
422#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2)
423#define RX_DESC_MASK (RX_DESC_CNT - 1)
424#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
425#define MAX_RX_BD (NUM_RX_BD - 1)
426#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
25141580 427#define MIN_RX_AVAIL 128
523224a3
DK
428#define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL
429#define INIT_RX_RING_SIZE MAX_RX_AVAIL
7a9b2557
VZ
430#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
431 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
432#define RX_BD(x) ((x) & MAX_RX_BD)
433
434/* As long as CQE is 4 times bigger than BD entry we have to allocate
435 4 times more pages for CQ ring in order to keep it balanced with
436 BD ring */
437#define NUM_RCQ_RINGS (NUM_RX_RINGS * 4)
438#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
439#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1)
440#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
441#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
442#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
443#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
444 (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
445#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
446
447
33471629 448/* This is needed for determining of last_max */
34f80b04 449#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
a2fbb9ea 450
7a9b2557
VZ
451#define __SGE_MASK_SET_BIT(el, bit) \
452 do { \
453 el = ((el) | ((u64)0x1 << (bit))); \
454 } while (0)
455
456#define __SGE_MASK_CLEAR_BIT(el, bit) \
457 do { \
458 el = ((el) & (~((u64)0x1 << (bit)))); \
459 } while (0)
460
461#define SGE_MASK_SET_BIT(fp, idx) \
462 __SGE_MASK_SET_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
463 ((idx) & RX_SGE_MASK_ELEM_MASK))
464
465#define SGE_MASK_CLEAR_BIT(fp, idx) \
466 __SGE_MASK_CLEAR_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
467 ((idx) & RX_SGE_MASK_ELEM_MASK))
468
469
470/* used on a CID received from the HW */
471#define SW_CID(x) (le32_to_cpu(x) & \
472 (COMMON_RAMROD_ETH_RX_CQE_CID >> 7))
473#define CQE_CMD(x) (le32_to_cpu(x) >> \
474 COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
475
bb2a0f7a
YG
476#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \
477 le32_to_cpu((bd)->addr_lo))
478#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
479
523224a3
DK
480#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
481#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
7a9b2557
VZ
482#define DPM_TRIGER_TYPE 0x40
483#define DOORBELL(bp, cid, val) \
484 do { \
523224a3 485 writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
7a9b2557
VZ
486 DPM_TRIGER_TYPE); \
487 } while (0)
488
489
490/* TX CSUM helpers */
491#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \
492 skb->csum_offset)
493#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
494 skb->csum_offset))
495
496#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
497
498#define XMIT_PLAIN 0
499#define XMIT_CSUM_V4 0x1
500#define XMIT_CSUM_V6 0x2
501#define XMIT_CSUM_TCP 0x4
502#define XMIT_GSO_V4 0x8
503#define XMIT_GSO_V6 0x10
504
505#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6)
506#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6)
507
508
34f80b04 509/* stuff added to make the code fit 80Col */
a2fbb9ea 510
34f80b04 511#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
a2fbb9ea 512
7a9b2557
VZ
513#define TPA_TYPE_START ETH_FAST_PATH_RX_CQE_START_FLG
514#define TPA_TYPE_END ETH_FAST_PATH_RX_CQE_END_FLG
515#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \
516 (TPA_TYPE_START | TPA_TYPE_END))
517
1adcd8be
EG
518#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
519
520#define BNX2X_IP_CSUM_ERR(cqe) \
521 (!((cqe)->fast_path_cqe.status_flags & \
522 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
523 ((cqe)->fast_path_cqe.type_error_flags & \
524 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
525
526#define BNX2X_L4_CSUM_ERR(cqe) \
527 (!((cqe)->fast_path_cqe.status_flags & \
528 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
529 ((cqe)->fast_path_cqe.type_error_flags & \
530 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
531
532#define BNX2X_RX_CSUM_OK(cqe) \
533 (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
7a9b2557 534
052a38e0
EG
535#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
536 (((le16_to_cpu(flags) & \
537 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
538 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \
539 == PRS_FLAG_OVERETH_IPV4)
7a9b2557 540#define BNX2X_RX_SUM_FIX(cqe) \
052a38e0 541 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
7a9b2557 542
523224a3
DK
543#define U_SB_ETH_RX_CQ_INDEX 1
544#define U_SB_ETH_RX_BD_INDEX 2
545#define C_SB_ETH_TX_CQ_INDEX 5
a2fbb9ea 546
34f80b04 547#define BNX2X_RX_SB_INDEX \
523224a3 548 (&fp->sb_index_values[U_SB_ETH_RX_CQ_INDEX])
a2fbb9ea 549
34f80b04 550#define BNX2X_TX_SB_INDEX \
523224a3 551 (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX])
7a9b2557
VZ
552
553/* end of fast path */
554
34f80b04 555/* common */
a2fbb9ea 556
34f80b04 557struct bnx2x_common {
a2fbb9ea 558
ad8d3948 559 u32 chip_id;
a2fbb9ea 560/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
34f80b04 561#define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0)
ad8d3948 562
34f80b04 563#define CHIP_NUM(bp) (bp->common.chip_id >> 16)
ad8d3948
EG
564#define CHIP_NUM_57710 0x164e
565#define CHIP_NUM_57711 0x164f
566#define CHIP_NUM_57711E 0x1650
567#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
568#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
569#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
570#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
571 CHIP_IS_57711E(bp))
572#define IS_E1H_OFFSET CHIP_IS_E1H(bp)
573
34f80b04 574#define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000)
ad8d3948
EG
575#define CHIP_REV_Ax 0x00000000
576/* assume maximum 5 revisions */
577#define CHIP_REV_IS_SLOW(bp) (CHIP_REV(bp) > 0x00005000)
578/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
579#define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \
580 !(CHIP_REV(bp) & 0x00001000))
581/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
582#define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \
583 (CHIP_REV(bp) & 0x00001000))
584
585#define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
586 ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
587
34f80b04
EG
588#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
589#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
a2fbb9ea 590
34f80b04
EG
591 int flash_size;
592#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
593#define NVRAM_TIMEOUT_COUNT 30000
594#define NVRAM_PAGE_SIZE 256
a2fbb9ea 595
34f80b04 596 u32 shmem_base;
2691d51d 597 u32 shmem2_base;
523224a3 598 u32 mf_cfg_base;
34f80b04
EG
599
600 u32 hw_config;
c18487ee 601
34f80b04 602 u32 bc_ver;
523224a3
DK
603
604 u8 int_block;
605#define INT_BLOCK_HC 0
606 u8 chip_port_mode;
607#define CHIP_PORT_MODE_NONE 0x2
34f80b04 608};
c18487ee 609
34f80b04
EG
610
611/* end of common */
612
613/* port */
614
615struct bnx2x_port {
616 u32 pmf;
c18487ee 617
a22f0788 618 u32 link_config[LINK_CONFIG_SIZE];
a2fbb9ea 619
a22f0788 620 u32 supported[LINK_CONFIG_SIZE];
34f80b04
EG
621/* link settings - missing defines */
622#define SUPPORTED_2500baseX_Full (1 << 15)
623
a22f0788 624 u32 advertising[LINK_CONFIG_SIZE];
a2fbb9ea 625/* link settings - missing defines */
34f80b04 626#define ADVERTISED_2500baseX_Full (1 << 15)
a2fbb9ea 627
34f80b04 628 u32 phy_addr;
c18487ee
YR
629
630 /* used to synchronize phy accesses */
631 struct mutex phy_mutex;
46c6a674 632 int need_hw_lock;
c18487ee 633
34f80b04 634 u32 port_stx;
a2fbb9ea 635
34f80b04
EG
636 struct nig_stats old_nig_stats;
637};
a2fbb9ea 638
34f80b04
EG
639/* end of port */
640
523224a3
DK
641/* e1h Classification CAM line allocations */
642enum {
643 CAM_ETH_LINE = 0,
644 CAM_ISCSI_ETH_LINE,
645 CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE
646};
bb2a0f7a 647
523224a3 648#define BNX2X_VF_ID_INVALID 0xFF
34f80b04 649
523224a3
DK
650/*
651 * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
652 * control by the number of fast-path status blocks supported by the
653 * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
654 * status block represents an independent interrupts context that can
655 * serve a regular L2 networking queue. However special L2 queues such
656 * as the FCoE queue do not require a FP-SB and other components like
657 * the CNIC may consume FP-SB reducing the number of possible L2 queues
658 *
659 * If the maximum number of FP-SB available is X then:
660 * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
661 * regular L2 queues is Y=X-1
662 * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
663 * c. If the FCoE L2 queue is supported the actual number of L2 queues
664 * is Y+1
665 * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
666 * slow-path interrupts) or Y+2 if CNIC is supported (one additional
667 * FP interrupt context for the CNIC).
668 * e. The number of HW context (CID count) is always X or X+1 if FCoE
669 * L2 queue is supported. the cid for the FCoE L2 queue is always X.
670 */
671
672#define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */
673#define MAX_CONTEXT FP_SB_MAX_E1x
674
675/*
676 * cid_cnt paramter below refers to the value returned by
677 * 'bnx2x_get_l2_cid_count()' routine
678 */
679
680/*
681 * The number of FP context allocated by the driver == max number of regular
682 * L2 queues + 1 for the FCoE L2 queue
683 */
684#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE)
34f80b04
EG
685
686union cdu_context {
687 struct eth_context eth;
688 char pad[1024];
689};
690
523224a3
DK
691/* CDU host DB constants */
692#define CDU_ILT_PAGE_SZ_HW 3
693#define CDU_ILT_PAGE_SZ (4096 << CDU_ILT_PAGE_SZ_HW) /* 32K */
694#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
695
696#ifdef BCM_CNIC
697#define CNIC_ISCSI_CID_MAX 256
698#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX)
699#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
700#endif
701
702#define QM_ILT_PAGE_SZ_HW 3
703#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 32K */
704#define QM_CID_ROUND 1024
705
706#ifdef BCM_CNIC
707/* TM (timers) host DB constants */
708#define TM_ILT_PAGE_SZ_HW 2
709#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 16K */
710/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
711#define TM_CONN_NUM 1024
712#define TM_ILT_SZ (8 * TM_CONN_NUM)
713#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
714
715/* SRC (Searcher) host DB constants */
716#define SRC_ILT_PAGE_SZ_HW 3
717#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 32K */
718#define SRC_HASH_BITS 10
719#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */
720#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM)
721#define SRC_T2_SZ SRC_ILT_SZ
722#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
723#endif
724
bb2a0f7a 725#define MAX_DMAE_C 8
34f80b04
EG
726
727/* DMA memory not used in fastpath */
728struct bnx2x_slowpath {
34f80b04
EG
729 struct eth_stats_query fw_stats;
730 struct mac_configuration_cmd mac_config;
731 struct mac_configuration_cmd mcast_config;
523224a3 732 struct client_init_ramrod_data client_init_data;
34f80b04
EG
733
734 /* used by dmae command executer */
735 struct dmae_command dmae[MAX_DMAE_C];
736
bb2a0f7a
YG
737 u32 stats_comp;
738 union mac_stats mac_stats;
739 struct nig_stats nig_stats;
740 struct host_port_stats port_stats;
741 struct host_func_stats func_stats;
6fe49bb9 742 struct host_func_stats func_stats_base;
34f80b04
EG
743
744 u32 wb_comp;
34f80b04
EG
745 u32 wb_data[4];
746};
747
748#define bnx2x_sp(bp, var) (&bp->slowpath->var)
749#define bnx2x_sp_mapping(bp, var) \
750 (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
751
752
753/* attn group wiring */
754#define MAX_DYNAMIC_ATTN_GRPS 8
755
756struct attn_route {
757 u32 sig[4];
758};
759
523224a3
DK
760struct iro {
761 u32 base;
762 u16 m1;
763 u16 m2;
764 u16 m3;
765 u16 size;
766};
767
768struct hw_context {
769 union cdu_context *vcxt;
770 dma_addr_t cxt_mapping;
771 size_t size;
772};
773
774/* forward */
775struct bnx2x_ilt;
776
72fd0718
VZ
777typedef enum {
778 BNX2X_RECOVERY_DONE,
779 BNX2X_RECOVERY_INIT,
780 BNX2X_RECOVERY_WAIT,
781} bnx2x_recovery_state_t;
782
523224a3
DK
783/**
784 * Event queue (EQ or event ring) MC hsi
785 * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
786 */
787#define NUM_EQ_PAGES 1
788#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem))
789#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1)
790#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
791#define EQ_DESC_MASK (NUM_EQ_DESC - 1)
792#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
793
794/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
795#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \
796 (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
797
798/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
799#define EQ_DESC(x) ((x) & EQ_DESC_MASK)
800
801#define BNX2X_EQ_INDEX \
802 (&bp->def_status_blk->sp_sb.\
803 index_values[HC_SP_INDEX_EQ_CONS])
804
34f80b04
EG
805struct bnx2x {
806 /* Fields used in the tx and intr/napi performance paths
807 * are grouped together in the beginning of the structure
808 */
523224a3 809 struct bnx2x_fastpath *fp;
34f80b04
EG
810 void __iomem *regview;
811 void __iomem *doorbells;
523224a3 812 u16 db_size;
34f80b04
EG
813
814 struct net_device *dev;
815 struct pci_dev *pdev;
816
523224a3
DK
817 struct iro *iro_arr;
818#define IRO (bp->iro_arr)
819
34f80b04 820 atomic_t intr_sem;
72fd0718
VZ
821
822 bnx2x_recovery_state_t recovery_state;
823 int is_leader;
523224a3 824 struct msix_entry *msix_table;
8badd27a
EG
825#define INT_MODE_INTx 1
826#define INT_MODE_MSI 2
34f80b04
EG
827
828 int tx_ring_size;
829
830#ifdef BCM_VLAN
831 struct vlan_group *vlgrp;
832#endif
a2fbb9ea 833
34f80b04 834 u32 rx_csum;
437cf2f1 835 u32 rx_buf_size;
523224a3
DK
836/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
837#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
34f80b04
EG
838#define ETH_MIN_PACKET_SIZE 60
839#define ETH_MAX_PACKET_SIZE 1500
840#define ETH_MAX_JUMBO_PACKET_SIZE 9600
a2fbb9ea 841
0f00846d
EG
842 /* Max supported alignment is 256 (8 shift) */
843#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
844 L1_CACHE_SHIFT : 8)
845#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT)
523224a3 846#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
0f00846d 847
523224a3
DK
848 struct host_sp_status_block *def_status_blk;
849#define DEF_SB_IGU_ID 16
850#define DEF_SB_ID HC_SP_SB_ID
851 __le16 def_idx;
4781bfad 852 __le16 def_att_idx;
34f80b04
EG
853 u32 attn_state;
854 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
34f80b04
EG
855
856 /* slow path ring */
857 struct eth_spe *spq;
858 dma_addr_t spq_mapping;
859 u16 spq_prod_idx;
860 struct eth_spe *spq_prod_bd;
861 struct eth_spe *spq_last_bd;
4781bfad 862 __le16 *dsb_sp_prod;
34f80b04
EG
863 u16 spq_left; /* serialize spq */
864 /* used to synchronize spq accesses */
865 spinlock_t spq_lock;
866
523224a3
DK
867 /* event queue */
868 union event_ring_elem *eq_ring;
869 dma_addr_t eq_mapping;
870 u16 eq_prod;
871 u16 eq_cons;
872 __le16 *eq_cons_sb;
873
bb2a0f7a
YG
874 /* Flags for marking that there is a STAT_QUERY or
875 SET_MAC ramrod pending */
e665bfda
MC
876 int stats_pending;
877 int set_mac_pending;
34f80b04 878
33471629 879 /* End of fields used in the performance code paths */
34f80b04
EG
880
881 int panic;
7995c64e 882 int msg_enable;
34f80b04
EG
883
884 u32 flags;
885#define PCIX_FLAG 1
886#define PCI_32BIT_FLAG 2
1c06328c 887#define ONE_PORT_FLAG 4
34f80b04
EG
888#define NO_WOL_FLAG 8
889#define USING_DAC_FLAG 0x10
890#define USING_MSIX_FLAG 0x20
8badd27a 891#define USING_MSI_FLAG 0x40
7a9b2557 892#define TPA_ENABLE_FLAG 0x80
34f80b04
EG
893#define NO_MCP_FLAG 0x100
894#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
0c6671b0
EG
895#define HW_VLAN_TX_FLAG 0x400
896#define HW_VLAN_RX_FLAG 0x800
f34d28ea 897#define MF_FUNC_DIS 0x1000
34f80b04
EG
898
899 int func;
523224a3
DK
900 int base_fw_ndsb;
901
34f80b04
EG
902#define BP_PORT(bp) (bp->func % PORT_MAX)
903#define BP_FUNC(bp) (bp->func)
904#define BP_E1HVN(bp) (bp->func >> 1)
905#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
34f80b04 906
37b091ba
MC
907#ifdef BCM_CNIC
908#define BCM_CNIC_CID_START 16
909#define BCM_ISCSI_ETH_CL_ID 17
910#endif
911
34f80b04
EG
912 int pm_cap;
913 int pcie_cap;
8d5726c4 914 int mrrs;
34f80b04 915
1cf167f2 916 struct delayed_work sp_task;
72fd0718 917 struct delayed_work reset_task;
34f80b04 918 struct timer_list timer;
34f80b04
EG
919 int current_interval;
920
921 u16 fw_seq;
922 u16 fw_drv_pulse_wr_seq;
923 u32 func_stx;
924
925 struct link_params link_params;
926 struct link_vars link_vars;
01cd4528 927 struct mdio_if_info mdio;
a2fbb9ea 928
34f80b04
EG
929 struct bnx2x_common common;
930 struct bnx2x_port port;
931
8a1c38d1
EG
932 struct cmng_struct_per_port cmng;
933 u32 vn_weight_sum;
934
34f80b04
EG
935 u32 mf_config;
936 u16 e1hov;
937 u8 e1hmf;
3196a88a 938#define IS_E1HMF(bp) (bp->e1hmf != 0)
a2fbb9ea 939
f1410647
ET
940 u8 wol;
941
34f80b04 942 int rx_ring_size;
a2fbb9ea 943
34f80b04
EG
944 u16 tx_quick_cons_trip_int;
945 u16 tx_quick_cons_trip;
946 u16 tx_ticks_int;
947 u16 tx_ticks;
a2fbb9ea 948
34f80b04
EG
949 u16 rx_quick_cons_trip_int;
950 u16 rx_quick_cons_trip;
951 u16 rx_ticks_int;
952 u16 rx_ticks;
cdaa7cb8
VZ
953/* Maximal coalescing timeout in us */
954#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
a2fbb9ea 955
34f80b04 956 u32 lin_cnt;
a2fbb9ea 957
34f80b04 958 int state;
356e2385 959#define BNX2X_STATE_CLOSED 0
34f80b04
EG
960#define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000
961#define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000
a2fbb9ea 962#define BNX2X_STATE_OPEN 0x3000
34f80b04 963#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
a2fbb9ea
ET
964#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
965#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
523224a3 966#define BNX2X_STATE_FUNC_STARTED 0x7000
34f80b04
EG
967#define BNX2X_STATE_DIAG 0xe000
968#define BNX2X_STATE_ERROR 0xf000
a2fbb9ea 969
555f6c78 970 int multi_mode;
54b9ddaa 971 int num_queues;
5d7cd496
DK
972 int disable_tpa;
973 int int_mode;
a2fbb9ea 974
523224a3
DK
975 struct tstorm_eth_mac_filter_config mac_filters;
976#define BNX2X_ACCEPT_NONE 0x0000
977#define BNX2X_ACCEPT_UNICAST 0x0001
978#define BNX2X_ACCEPT_MULTICAST 0x0002
979#define BNX2X_ACCEPT_ALL_UNICAST 0x0004
980#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008
981#define BNX2X_ACCEPT_BROADCAST 0x0010
982#define BNX2X_PROMISCUOUS_MODE 0x10000
983
34f80b04
EG
984 u32 rx_mode;
985#define BNX2X_RX_MODE_NONE 0
986#define BNX2X_RX_MODE_NORMAL 1
987#define BNX2X_RX_MODE_ALLMULTI 2
988#define BNX2X_RX_MODE_PROMISC 3
989#define BNX2X_MAX_MULTICAST 64
990#define BNX2X_MAX_EMUL_MULTI 16
a2fbb9ea 991
523224a3
DK
992 u8 igu_dsb_id;
993 u8 igu_base_sb;
994 u8 igu_sb_cnt;
34f80b04 995 dma_addr_t def_status_blk_mapping;
a2fbb9ea 996
34f80b04
EG
997 struct bnx2x_slowpath *slowpath;
998 dma_addr_t slowpath_mapping;
523224a3
DK
999 struct hw_context context;
1000
1001 struct bnx2x_ilt *ilt;
1002#define BP_ILT(bp) ((bp)->ilt)
1003#define ILT_MAX_LINES 128
1004
1005 int l2_cid_count;
1006#define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \
1007 ILT_PAGE_CIDS))
1008#define BNX2X_DB_SIZE(bp) ((bp)->l2_cid_count * (1 << BNX2X_DB_SHIFT))
1009
1010 int qm_cid_count;
a2fbb9ea 1011
a18f5128
EG
1012 int dropless_fc;
1013
37b091ba
MC
1014#ifdef BCM_CNIC
1015 u32 cnic_flags;
1016#define BNX2X_CNIC_FLAG_MAC_SET 1
1017
1018 void *t1;
1019 dma_addr_t t1_mapping;
1020 void *t2;
1021 dma_addr_t t2_mapping;
1022 void *timers;
1023 dma_addr_t timers_mapping;
1024 void *qm;
1025 dma_addr_t qm_mapping;
1026 struct cnic_ops *cnic_ops;
1027 void *cnic_data;
1028 u32 cnic_tag;
1029 struct cnic_eth_dev cnic_eth_dev;
523224a3 1030 union host_hc_status_block cnic_sb;
37b091ba 1031 dma_addr_t cnic_sb_mapping;
523224a3
DK
1032#define CNIC_SB_ID(bp) ((bp)->base_fw_ndsb + BP_L_ID(bp))
1033#define CNIC_IGU_SB_ID(bp) ((bp)->igu_base_sb)
37b091ba
MC
1034 struct eth_spe *cnic_kwq;
1035 struct eth_spe *cnic_kwq_prod;
1036 struct eth_spe *cnic_kwq_cons;
1037 struct eth_spe *cnic_kwq_last;
1038 u16 cnic_kwq_pending;
1039 u16 cnic_spq_pending;
1040 struct mutex cnic_mutex;
1041 u8 iscsi_mac[6];
1042#endif
1043
ad8d3948
EG
1044 int dmae_ready;
1045 /* used to synchronize dmae accesses */
1046 struct mutex dmae_mutex;
ad8d3948 1047
c4ff7cbf
EG
1048 /* used to protect the FW mail box */
1049 struct mutex fw_mb_mutex;
1050
bb2a0f7a
YG
1051 /* used to synchronize stats collecting */
1052 int stats_state;
a13773a5
VZ
1053
1054 /* used for synchronization of concurrent threads statistics handling */
1055 spinlock_t stats_lock;
1056
bb2a0f7a
YG
1057 /* used by dmae command loader */
1058 struct dmae_command stats_dmae;
1059 int executer_idx;
ad8d3948 1060
bb2a0f7a 1061 u16 stats_counter;
bb2a0f7a
YG
1062 struct bnx2x_eth_stats eth_stats;
1063
1064 struct z_stream_s *strm;
1065 void *gunzip_buf;
1066 dma_addr_t gunzip_mapping;
1067 int gunzip_outlen;
ad8d3948 1068#define FW_BUF_SIZE 0x8000
573f2035
EG
1069#define GUNZIP_BUF(bp) (bp->gunzip_buf)
1070#define GUNZIP_PHYS(bp) (bp->gunzip_mapping)
1071#define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen)
a2fbb9ea 1072
ab6ad5a4 1073 struct raw_op *init_ops;
94a78b79 1074 /* Init blocks offsets inside init_ops */
ab6ad5a4 1075 u16 *init_ops_offsets;
94a78b79 1076 /* Data blob - has 32 bit granularity */
ab6ad5a4 1077 u32 *init_data;
94a78b79 1078 /* Zipped PRAM blobs - raw data */
ab6ad5a4
EG
1079 const u8 *tsem_int_table_data;
1080 const u8 *tsem_pram_data;
1081 const u8 *usem_int_table_data;
1082 const u8 *usem_pram_data;
1083 const u8 *xsem_int_table_data;
1084 const u8 *xsem_pram_data;
1085 const u8 *csem_int_table_data;
1086 const u8 *csem_pram_data;
573f2035
EG
1087#define INIT_OPS(bp) (bp->init_ops)
1088#define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets)
1089#define INIT_DATA(bp) (bp->init_data)
1090#define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data)
1091#define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data)
1092#define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data)
1093#define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data)
1094#define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data)
1095#define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data)
1096#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
1097#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
1098
34f24c7f 1099 char fw_ver[32];
ab6ad5a4 1100 const struct firmware *firmware;
a2fbb9ea
ET
1101};
1102
523224a3
DK
1103/**
1104 * Init queue/func interface
1105 */
1106/* queue init flags */
1107#define QUEUE_FLG_TPA 0x0001
1108#define QUEUE_FLG_CACHE_ALIGN 0x0002
1109#define QUEUE_FLG_STATS 0x0004
1110#define QUEUE_FLG_OV 0x0008
1111#define QUEUE_FLG_VLAN 0x0010
1112#define QUEUE_FLG_COS 0x0020
1113#define QUEUE_FLG_HC 0x0040
1114#define QUEUE_FLG_DHC 0x0080
1115#define QUEUE_FLG_OOO 0x0100
1116
1117#define QUEUE_DROP_IP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR
1118#define QUEUE_DROP_TCP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR
1119#define QUEUE_DROP_TTL0 TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0
1120#define QUEUE_DROP_UDP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR
1121
1122
1123
1124/* rss capabilities */
1125#define RSS_IPV4_CAP 0x0001
1126#define RSS_IPV4_TCP_CAP 0x0002
1127#define RSS_IPV6_CAP 0x0004
1128#define RSS_IPV6_TCP_CAP 0x0008
a2fbb9ea 1129
54b9ddaa
VZ
1130#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
1131 : MAX_CONTEXT)
1132#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1133#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
3196a88a 1134
523224a3
DK
1135
1136#define RSS_IPV4_CAP_MASK \
1137 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
1138
1139#define RSS_IPV4_TCP_CAP_MASK \
1140 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
1141
1142#define RSS_IPV6_CAP_MASK \
1143 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
1144
1145#define RSS_IPV6_TCP_CAP_MASK \
1146 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
1147
1148/* func init flags */
1149#define FUNC_FLG_RSS 0x0001
1150#define FUNC_FLG_STATS 0x0002
1151/* removed FUNC_FLG_UNMATCHED 0x0004 */
1152#define FUNC_FLG_TPA 0x0008
1153#define FUNC_FLG_SPQ 0x0010
1154#define FUNC_FLG_LEADING 0x0020 /* PF only */
1155
1156#define FUNC_CONFIG(flgs) ((flgs) & (FUNC_FLG_RSS | FUNC_FLG_TPA | \
1157 FUNC_FLG_LEADING))
1158
1159struct rxq_pause_params {
1160 u16 bd_th_lo;
1161 u16 bd_th_hi;
1162 u16 rcq_th_lo;
1163 u16 rcq_th_hi;
1164 u16 sge_th_lo; /* valid iff QUEUE_FLG_TPA */
1165 u16 sge_th_hi; /* valid iff QUEUE_FLG_TPA */
1166 u16 pri_map;
1167};
1168
1169struct bnx2x_rxq_init_params {
1170 /* cxt*/
1171 struct eth_context *cxt;
1172
1173 /* dma */
1174 dma_addr_t dscr_map;
1175 dma_addr_t sge_map;
1176 dma_addr_t rcq_map;
1177 dma_addr_t rcq_np_map;
1178
1179 u16 flags;
1180 u16 drop_flags;
1181 u16 mtu;
1182 u16 buf_sz;
1183 u16 fw_sb_id;
1184 u16 cl_id;
1185 u16 spcl_id;
1186 u16 cl_qzone_id;
1187
1188 /* valid iff QUEUE_FLG_STATS */
1189 u16 stat_id;
1190
1191 /* valid iff QUEUE_FLG_TPA */
1192 u16 tpa_agg_sz;
1193 u16 sge_buf_sz;
1194 u16 max_sges_pkt;
1195
1196 /* valid iff QUEUE_FLG_CACHE_ALIGN */
1197 u8 cache_line_log;
1198
1199 u8 sb_cq_index;
1200 u32 cid;
1201
1202 /* desired interrupts per sec. valid iff QUEUE_FLG_HC */
1203 u32 hc_rate;
1204};
1205
1206struct bnx2x_txq_init_params {
1207 /* cxt*/
1208 struct eth_context *cxt;
1209
1210 /* dma */
1211 dma_addr_t dscr_map;
1212
1213 u16 flags;
1214 u16 fw_sb_id;
1215 u8 sb_cq_index;
1216 u8 cos; /* valid iff QUEUE_FLG_COS */
1217 u16 stat_id; /* valid iff QUEUE_FLG_STATS */
1218 u16 traffic_type;
1219 u32 cid;
1220 u16 hc_rate; /* desired interrupts per sec.*/
1221 /* valid iff QUEUE_FLG_HC */
1222
1223};
1224
1225struct bnx2x_client_ramrod_params {
1226 int *pstate;
1227 int state;
1228 u16 index;
1229 u16 cl_id;
1230 u32 cid;
1231 u8 poll;
1232#define CLIENT_IS_LEADING_RSS 0x02
1233 u8 flags;
1234};
1235
1236struct bnx2x_client_init_params {
1237 struct rxq_pause_params pause;
1238 struct bnx2x_rxq_init_params rxq_params;
1239 struct bnx2x_txq_init_params txq_params;
1240 struct bnx2x_client_ramrod_params ramrod_params;
1241};
1242
1243struct bnx2x_rss_params {
1244 int mode;
1245 u16 cap;
1246 u16 result_mask;
1247};
1248
1249struct bnx2x_func_init_params {
1250
1251 /* rss */
1252 struct bnx2x_rss_params *rss; /* valid iff FUNC_FLG_RSS */
1253
1254 /* dma */
1255 dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
1256 dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */
1257
1258 u16 func_flgs;
1259 u16 func_id; /* abs fid */
1260 u16 pf_id;
1261 u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
1262};
1263
555f6c78
EG
1264#define for_each_queue(bp, var) \
1265 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
3196a88a 1266#define for_each_nondefault_queue(bp, var) \
54b9ddaa 1267 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
3196a88a
EG
1268
1269
c18487ee
YR
1270void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
1271void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
1272 u32 len32);
4acac6a5 1273int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
17de50b7 1274int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
4acac6a5 1275int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
a22f0788 1276u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
573f2035
EG
1277void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
1278void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
1279 u32 addr, u32 len);
de0c62db
DK
1280void bnx2x_calc_fc_adv(struct bnx2x *bp);
1281int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1282 u32 data_hi, u32 data_lo, int common);
1283void bnx2x_update_coalesce(struct bnx2x *bp);
a22f0788 1284int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
34f80b04
EG
1285static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1286 int wait)
1287{
1288 u32 val;
1289
1290 do {
1291 val = REG_RD(bp, reg);
1292 if (val == expected)
1293 break;
1294 ms -= wait;
1295 msleep(wait);
1296
1297 } while (ms > 0);
1298
1299 return val;
1300}
523224a3
DK
1301#define BNX2X_ILT_ZALLOC(x, y, size) \
1302 do { \
1303 x = pci_alloc_consistent(bp->pdev, size, y); \
1304 if (x) \
1305 memset(x, 0, size); \
1306 } while (0)
1307
1308#define BNX2X_ILT_FREE(x, y, size) \
1309 do { \
1310 if (x) { \
1311 pci_free_consistent(bp->pdev, size, x, y); \
1312 x = NULL; \
1313 y = 0; \
1314 } \
1315 } while (0)
1316
1317#define ILOG2(x) (ilog2((x)))
1318
1319#define ILT_NUM_PAGE_ENTRIES (3072)
1320/* In 57710/11 we use whole table since we have 8 func
1321 */
1322#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8)
1323
1324#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
1325/*
1326 * the phys address is shifted right 12 bits and has an added
1327 * 1=valid bit added to the 53rd bit
1328 * then since this is a wide register(TM)
1329 * we split it into two 32 bit writes
1330 */
1331#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
1332#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
34f80b04
EG
1333
1334
1335/* load/unload mode */
1336#define LOAD_NORMAL 0
1337#define LOAD_OPEN 1
1338#define LOAD_DIAG 2
1339#define UNLOAD_NORMAL 0
1340#define UNLOAD_CLOSE 1
72fd0718 1341#define UNLOAD_RECOVERY 2
34f80b04 1342
bb2a0f7a 1343
ad8d3948
EG
1344/* DMAE command defines */
1345#define DMAE_CMD_SRC_PCI 0
1346#define DMAE_CMD_SRC_GRC DMAE_COMMAND_SRC
1347
1348#define DMAE_CMD_DST_PCI (1 << DMAE_COMMAND_DST_SHIFT)
1349#define DMAE_CMD_DST_GRC (2 << DMAE_COMMAND_DST_SHIFT)
1350
1351#define DMAE_CMD_C_DST_PCI 0
1352#define DMAE_CMD_C_DST_GRC (1 << DMAE_COMMAND_C_DST_SHIFT)
1353
1354#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
1355
1356#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
1357#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
1358#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
1359#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
1360
1361#define DMAE_CMD_PORT_0 0
1362#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT
1363
1364#define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET
1365#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
1366#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
1367
1368#define DMAE_LEN32_RD_MAX 0x80
02e3c6cb 1369#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
ad8d3948
EG
1370
1371#define DMAE_COMP_VAL 0xe0d0d0ae
1372
1373#define MAX_DMAE_C_PER_PORT 8
ab6ad5a4 1374#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
ad8d3948 1375 BP_E1HVN(bp))
ab6ad5a4 1376#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
ad8d3948
EG
1377 E1HVN_MAX)
1378
1379
25047950
ET
1380/* PCIE link and speed */
1381#define PCICFG_LINK_WIDTH 0x1f00000
1382#define PCICFG_LINK_WIDTH_SHIFT 20
1383#define PCICFG_LINK_SPEED 0xf0000
1384#define PCICFG_LINK_SPEED_SHIFT 16
a2fbb9ea 1385
bb2a0f7a 1386
d3d4f495 1387#define BNX2X_NUM_TESTS 7
bb2a0f7a 1388
b5bf9068
EG
1389#define BNX2X_PHY_LOOPBACK 0
1390#define BNX2X_MAC_LOOPBACK 1
1391#define BNX2X_PHY_LOOPBACK_FAILED 1
1392#define BNX2X_MAC_LOOPBACK_FAILED 2
bb2a0f7a
YG
1393#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
1394 BNX2X_PHY_LOOPBACK_FAILED)
96fc1784 1395
7a9b2557
VZ
1396
1397#define STROM_ASSERT_ARRAY_SIZE 50
1398
96fc1784 1399
34f80b04 1400/* must be used on a CID before placing it on a HW ring */
ab6ad5a4
EG
1401#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
1402 (BP_E1HVN(bp) << 17) | (x))
7a9b2557
VZ
1403
1404#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
1405#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
1406
1407
523224a3 1408#define BNX2X_BTR 4
7a9b2557 1409#define MAX_SPQ_PENDING 8
a2fbb9ea 1410
a2fbb9ea 1411
34f80b04
EG
1412/* CMNG constants
1413 derived from lab experiments, and not from system spec calculations !!! */
1414#define DEF_MIN_RATE 100
1415/* resolution of the rate shaping timer - 100 usec */
1416#define RS_PERIODIC_TIMEOUT_USEC 100
1417/* resolution of fairness algorithm in usecs -
33471629 1418 coefficient for calculating the actual t fair */
34f80b04
EG
1419#define T_FAIR_COEF 10000000
1420/* number of bytes in single QM arbitration cycle -
33471629 1421 coefficient for calculating the fairness timer */
34f80b04
EG
1422#define QM_ARB_BYTES 40000
1423#define FAIR_MEM 2
1424
1425
1426#define ATTN_NIG_FOR_FUNC (1L << 8)
1427#define ATTN_SW_TIMER_4_FUNC (1L << 9)
1428#define GPIO_2_FUNC (1L << 10)
1429#define GPIO_3_FUNC (1L << 11)
1430#define GPIO_4_FUNC (1L << 12)
1431#define ATTN_GENERAL_ATTN_1 (1L << 13)
1432#define ATTN_GENERAL_ATTN_2 (1L << 14)
1433#define ATTN_GENERAL_ATTN_3 (1L << 15)
1434#define ATTN_GENERAL_ATTN_4 (1L << 13)
1435#define ATTN_GENERAL_ATTN_5 (1L << 14)
1436#define ATTN_GENERAL_ATTN_6 (1L << 15)
1437
1438#define ATTN_HARD_WIRED_MASK 0xff00
1439#define ATTENTION_ID 4
a2fbb9ea
ET
1440
1441
34f80b04
EG
1442/* stuff added to make the code fit 80Col */
1443
1444#define BNX2X_PMF_LINK_ASSERT \
1445 GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp))
1446
a2fbb9ea
ET
1447#define BNX2X_MC_ASSERT_BITS \
1448 (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1449 GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1450 GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1451 GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
1452
1453#define BNX2X_MCP_ASSERT \
1454 GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
1455
34f80b04
EG
1456#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
1457#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
1458 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
1459 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
1460 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
1461 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
1462 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
1463
a2fbb9ea
ET
1464#define HW_INTERRUT_ASSERT_SET_0 \
1465 (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
1466 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
1467 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
1468 AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT)
34f80b04 1469#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
a2fbb9ea
ET
1470 AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
1471 AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
1472 AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
1473 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR)
1474#define HW_INTERRUT_ASSERT_SET_1 \
1475 (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
1476 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
1477 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
1478 AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \
1479 AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \
1480 AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \
1481 AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \
1482 AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \
1483 AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
1484 AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
1485 AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
34f80b04 1486#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR |\
a2fbb9ea
ET
1487 AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
1488 AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
1489 AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
ab6ad5a4
EG
1490 AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
1491 AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
a2fbb9ea
ET
1492 AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
1493 AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
1494 AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
1495 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
1496 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR)
1497#define HW_INTERRUT_ASSERT_SET_2 \
1498 (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
1499 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
1500 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
1501 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
1502 AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
34f80b04 1503#define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
a2fbb9ea
ET
1504 AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \
1505 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
1506 AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
1507 AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
1508 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
1509 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
1510
72fd0718
VZ
1511#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
1512 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
1513 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
1514 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
a2fbb9ea 1515
c68ed255 1516#define RSS_FLAGS(bp) \
34f80b04
EG
1517 (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
1518 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
1519 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
1520 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
555f6c78
EG
1521 (bp->multi_mode << \
1522 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
34f80b04 1523#define MULTI_MASK 0x7f
a2fbb9ea 1524
a2fbb9ea 1525#define BNX2X_SP_DSB_INDEX \
523224a3
DK
1526 (&bp->def_status_blk->sp_sb.\
1527 index_values[HC_SP_INDEX_ETH_DEF_CONS])
1528#define SET_FLAG(value, mask, flag) \
1529 do {\
1530 (value) &= ~(mask);\
1531 (value) |= ((flag) << (mask##_SHIFT));\
1532 } while (0)
a2fbb9ea 1533
523224a3
DK
1534#define GET_FLAG(value, mask) \
1535 (((value) &= (mask)) >> (mask##_SHIFT))
a2fbb9ea
ET
1536
1537#define CAM_IS_INVALID(x) \
523224a3
DK
1538 (GET_FLAG(x.flags, \
1539 MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
1540 (T_ETH_MAC_COMMAND_INVALIDATE))
a2fbb9ea
ET
1541
1542#define CAM_INVALIDATE(x) \
34f80b04
EG
1543 (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
1544
1545
1546/* Number of u32 elements in MC hash array */
1547#define MC_HASH_SIZE 8
1548#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
1549 TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
a2fbb9ea
ET
1550
1551
34f80b04
EG
1552#ifndef PXP2_REG_PXP2_INT_STS
1553#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
1554#endif
1555
34f24c7f
VZ
1556#define BNX2X_VPD_LEN 128
1557#define VENDOR_ID_LEN 4
1558
523224a3
DK
1559/* Congestion management fairness mode */
1560#define CMNG_FNS_NONE 0
1561#define CMNG_FNS_MINMAX 1
1562
1563#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
1564#define HC_SEG_ACCESS_ATTN 4
1565#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/
1566
b0efbb99
DK
1567#ifdef BNX2X_MAIN
1568#define BNX2X_EXTERN
1569#else
1570#define BNX2X_EXTERN extern
1571#endif
1572
1573BNX2X_EXTERN int load_count[3]; /* 0-common, 1-port0, 2-port1 */
1574
a2fbb9ea
ET
1575/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
1576
de0c62db
DK
1577extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1578
6c719d00
DK
1579void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1580
523224a3
DK
1581#define WAIT_RAMROD_POLL 0x01
1582#define WAIT_RAMROD_COMMON 0x02
1583
1584int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
1585 int *state_p, int flags);
a2fbb9ea 1586#endif /* bnx2x.h */