]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_cmn.h
bnx2x, cnic, bnx2i: use new FW/HSI
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_cmn.h
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17#ifndef BNX2X_CMN_H
18#define BNX2X_CMN_H
19
20#include <linux/types.h>
21#include <linux/netdevice.h>
22
23
24#include "bnx2x.h"
25
26
27/*********************** Interfaces ****************************
28 * Functions that need to be implemented by each driver version
29 */
30
31/**
32 * Initialize link parameters structure variables.
33 *
34 * @param bp
35 * @param load_mode
36 *
37 * @return u8
38 */
39u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
40
41/**
42 * Configure hw according to link parameters structure.
43 *
44 * @param bp
45 */
46void bnx2x_link_set(struct bnx2x *bp);
47
48/**
49 * Query link status
50 *
51 * @param bp
a22f0788 52 * @param is_serdes
9f6c9258
DK
53 *
54 * @return 0 - link is UP
55 */
a22f0788 56u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
9f6c9258
DK
57
58/**
59 * Handles link status change
60 *
61 * @param bp
62 */
63void bnx2x__link_status_update(struct bnx2x *bp);
64
65/**
66 * MSI-X slowpath interrupt handler
67 *
68 * @param irq
69 * @param dev_instance
70 *
71 * @return irqreturn_t
72 */
73irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
74
75/**
76 * non MSI-X interrupt handler
77 *
78 * @param irq
79 * @param dev_instance
80 *
81 * @return irqreturn_t
82 */
83irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
84#ifdef BCM_CNIC
85
86/**
87 * Send command to cnic driver
88 *
89 * @param bp
90 * @param cmd
91 */
92int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
93
94/**
95 * Provides cnic information for proper interrupt handling
96 *
97 * @param bp
98 */
99void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
100#endif
101
102/**
103 * Enable HW interrupts.
104 *
105 * @param bp
106 */
107void bnx2x_int_enable(struct bnx2x *bp);
108
523224a3
DK
109/**
110 * Disable HW interrupts.
111 *
112 * @param bp
113 */
114void bnx2x_int_disable(struct bnx2x *bp);
115
9f6c9258
DK
116/**
117 * Disable interrupts. This function ensures that there are no
118 * ISRs or SP DPCs (sp_task) are running after it returns.
119 *
120 * @param bp
121 * @param disable_hw if true, disable HW interrupts.
122 */
123void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
124
6891dd25
DK
125/**
126 * Loads device firmware
127 *
128 * @param bp
129 *
130 * @return int
131 */
132int bnx2x_init_firmware(struct bnx2x *bp);
133
9f6c9258
DK
134/**
135 * Init HW blocks according to current initialization stage:
136 * COMMON, PORT or FUNCTION.
137 *
138 * @param bp
139 * @param load_code: COMMON, PORT or FUNCTION
140 *
141 * @return int
142 */
143int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
144
145/**
146 * Init driver internals:
147 * - rings
148 * - status blocks
149 * - etc.
150 *
151 * @param bp
152 * @param load_code COMMON, PORT or FUNCTION
153 */
154void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
155
156/**
157 * Allocate driver's memory.
158 *
159 * @param bp
160 *
161 * @return int
162 */
163int bnx2x_alloc_mem(struct bnx2x *bp);
164
165/**
166 * Release driver's memory.
167 *
168 * @param bp
169 */
170void bnx2x_free_mem(struct bnx2x *bp);
171
172/**
523224a3 173 * Setup eth Client.
9f6c9258
DK
174 *
175 * @param bp
523224a3
DK
176 * @param fp
177 * @param is_leading
9f6c9258
DK
178 *
179 * @return int
180 */
523224a3
DK
181int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
182 int is_leading);
9f6c9258
DK
183
184/**
523224a3 185 * Bring down an eth client.
9f6c9258
DK
186 *
187 * @param bp
523224a3 188 * @param p
9f6c9258
DK
189 *
190 * @return int
191 */
523224a3
DK
192int bnx2x_stop_fw_client(struct bnx2x *bp,
193 struct bnx2x_client_ramrod_params *p);
9f6c9258
DK
194
195/**
523224a3 196 * Set number of quueus according to mode
9f6c9258
DK
197 *
198 * @param bp
199 *
200 */
201void bnx2x_set_num_queues_msix(struct bnx2x *bp);
202
203/**
204 * Cleanup chip internals:
205 * - Cleanup MAC configuration.
206 * - Close clients.
207 * - etc.
208 *
209 * @param bp
210 * @param unload_mode
211 */
212void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
213
214/**
215 * Acquire HW lock.
216 *
217 * @param bp
218 * @param resource Resource bit which was locked
219 *
220 * @return int
221 */
222int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
223
224/**
225 * Release HW lock.
226 *
227 * @param bp driver handle
228 * @param resource Resource bit which was locked
229 *
230 * @return int
231 */
232int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
233
234/**
235 * Configure eth MAC address in the HW according to the value in
236 * netdev->dev_addr for 57711
237 *
238 * @param bp driver handle
239 * @param set
240 */
523224a3 241void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
9f6c9258
DK
242
243#ifdef BCM_CNIC
244/**
245 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
246 * MAC(s). The function will wait until the ramrod completion
247 * returns.
248 *
249 * @param bp driver handle
250 * @param set set or clear the CAM entry
251 *
252 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
253 */
254int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
255#endif
256
257/**
258 * Initialize status block in FW and HW
259 *
260 * @param bp driver handle
9f6c9258
DK
261 * @param dma_addr_t mapping
262 * @param int sb_id
523224a3
DK
263 * @param int vfid
264 * @param u8 vf_valid
265 * @param int fw_sb_id
266 * @param int igu_sb_id
9f6c9258 267 */
523224a3
DK
268void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
269 u8 vf_valid, int fw_sb_id, int igu_sb_id);
9f6c9258
DK
270
271/**
272 * Reconfigure FW/HW according to dev->flags rx mode
273 *
274 * @param dev net_device
275 *
276 */
277void bnx2x_set_rx_mode(struct net_device *dev);
278
279/**
280 * Configure MAC filtering rules in a FW.
281 *
282 * @param bp driver handle
283 */
284void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
285
286/* Parity errors related */
287void bnx2x_inc_load_cnt(struct bnx2x *bp);
288u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
289bool bnx2x_chk_parity_attn(struct bnx2x *bp);
290bool bnx2x_reset_is_done(struct bnx2x *bp);
291void bnx2x_disable_close_the_gate(struct bnx2x *bp);
292
293/**
294 * Perform statistics handling according to event
295 *
296 * @param bp driver handle
297 * @param even tbnx2x_stats_event
298 */
299void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
300
9f6c9258
DK
301/**
302 * Handle sp events
303 *
304 * @param fp fastpath handle for the event
305 * @param rr_cqe eth_rx_cqe
306 */
307void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
308
523224a3
DK
309/**
310 * Init/halt function before/after sending
311 * CLIENT_SETUP/CFC_DEL for the first/last client.
312 *
313 * @param bp
314 *
315 * @return int
316 */
317int bnx2x_func_start(struct bnx2x *bp);
318int bnx2x_func_stop(struct bnx2x *bp);
319
320/**
321 * Prepare ILT configurations according to current driver
322 * parameters.
323 *
324 * @param bp
325 */
326void bnx2x_ilt_set_info(struct bnx2x *bp);
9f6c9258
DK
327
328static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
329{
9f6c9258 330 barrier(); /* status block is written to by the chip */
523224a3 331 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
9f6c9258
DK
332}
333
334static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
335 struct bnx2x_fastpath *fp,
336 u16 bd_prod, u16 rx_comp_prod,
337 u16 rx_sge_prod)
338{
339 struct ustorm_eth_rx_producers rx_prods = {0};
340 int i;
341
342 /* Update producers */
343 rx_prods.bd_prod = bd_prod;
344 rx_prods.cqe_prod = rx_comp_prod;
345 rx_prods.sge_prod = rx_sge_prod;
346
347 /*
348 * Make sure that the BD and SGE data is updated before updating the
349 * producers since FW might read the BD/SGE right after the producer
350 * is updated.
351 * This is only applicable for weak-ordered memory model archs such
352 * as IA-64. The following barrier is also mandatory since FW will
353 * assumes BDs must have buffers.
354 */
355 wmb();
356
357 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
523224a3
DK
358 REG_WR(bp,
359 BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4,
9f6c9258
DK
360 ((u32 *)&rx_prods)[i]);
361
362 mmiowb(); /* keep prod updates ordered */
363
364 DP(NETIF_MSG_RX_STATUS,
365 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
366 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
367}
368
369
370
371static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
372 u8 storm, u16 index, u8 op, u8 update)
373{
374 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
375 COMMAND_REG_INT_ACK);
376 struct igu_ack_register igu_ack;
377
378 igu_ack.status_block_index = index;
379 igu_ack.sb_id_and_flags =
380 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
381 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
382 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
383 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
384
385 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
386 (*(u32 *)&igu_ack), hc_addr);
387 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
388
389 /* Make sure that ACK is written */
390 mmiowb();
391 barrier();
392}
393static inline u16 bnx2x_ack_int(struct bnx2x *bp)
394{
395 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
396 COMMAND_REG_SIMD_MASK);
397 u32 result = REG_RD(bp, hc_addr);
398
399 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
400 result, hc_addr);
401
402 return result;
403}
404
405/*
406 * fast path service functions
407 */
408
409static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
410{
411 /* Tell compiler that consumer and producer can change */
412 barrier();
807540ba 413 return fp->tx_pkt_prod != fp->tx_pkt_cons;
9f6c9258
DK
414}
415
416static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
417{
418 s16 used;
419 u16 prod;
420 u16 cons;
421
422 prod = fp->tx_bd_prod;
423 cons = fp->tx_bd_cons;
424
425 /* NUM_TX_RINGS = number of "next-page" entries
426 It will be used as a threshold */
427 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
428
429#ifdef BNX2X_STOP_ON_ERROR
430 WARN_ON(used < 0);
431 WARN_ON(used > fp->bp->tx_ring_size);
432 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
433#endif
434
435 return (s16)(fp->bp->tx_ring_size) - used;
436}
437
438static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
439{
440 u16 hw_cons;
441
442 /* Tell compiler that status block fields can change */
443 barrier();
444 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
445 return hw_cons != fp->tx_pkt_cons;
446}
447
523224a3
DK
448static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
449{
450 u16 rx_cons_sb;
451
452 /* Tell compiler that status block fields can change */
453 barrier();
454 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
455 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
456 rx_cons_sb++;
457 return (fp->rx_comp_cons != rx_cons_sb);
458}
9f6c9258
DK
459static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
460 struct bnx2x_fastpath *fp, u16 index)
461{
462 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
463 struct page *page = sw_buf->page;
464 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
465
466 /* Skip "next page" elements */
467 if (!page)
468 return;
469
470 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
471 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
472 __free_pages(page, PAGES_PER_SGE_SHIFT);
473
474 sw_buf->page = NULL;
475 sge->addr_hi = 0;
476 sge->addr_lo = 0;
477}
478
523224a3
DK
479
480
481
482
483static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
9f6c9258 484{
523224a3 485 int i, j;
9f6c9258 486
523224a3
DK
487 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
488 int idx = RX_SGE_CNT * i - 1;
489
490 for (j = 0; j < 2; j++) {
491 SGE_MASK_CLEAR_BIT(fp, idx);
492 idx--;
493 }
494 }
495}
496
497static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
498{
499 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
500 memset(fp->sge_mask, 0xff,
501 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
502
503 /* Clear the two last indices in the page to 1:
504 these are the indices that correspond to the "next" element,
505 hence will never be indicated and should be removed from
506 the calculations. */
507 bnx2x_clear_sge_mask_next_elems(fp);
9f6c9258
DK
508}
509
510static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
511 struct bnx2x_fastpath *fp, u16 index)
512{
513 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
514 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
515 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
516 dma_addr_t mapping;
517
518 if (unlikely(page == NULL))
519 return -ENOMEM;
520
521 mapping = dma_map_page(&bp->pdev->dev, page, 0,
522 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
523 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
524 __free_pages(page, PAGES_PER_SGE_SHIFT);
525 return -ENOMEM;
526 }
527
528 sw_buf->page = page;
529 dma_unmap_addr_set(sw_buf, mapping, mapping);
530
531 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
532 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
533
534 return 0;
535}
536static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
537 struct bnx2x_fastpath *fp, u16 index)
538{
539 struct sk_buff *skb;
540 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
541 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
542 dma_addr_t mapping;
543
544 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
545 if (unlikely(skb == NULL))
546 return -ENOMEM;
547
548 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
549 DMA_FROM_DEVICE);
550 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
551 dev_kfree_skb(skb);
552 return -ENOMEM;
553 }
554
555 rx_buf->skb = skb;
556 dma_unmap_addr_set(rx_buf, mapping, mapping);
557
558 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
559 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
560
561 return 0;
562}
563
564/* note that we are not allocating a new skb,
565 * we are just moving one from cons to prod
566 * we are not creating a new mapping,
567 * so there is no need to check for dma_mapping_error().
568 */
569static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
570 struct sk_buff *skb, u16 cons, u16 prod)
571{
572 struct bnx2x *bp = fp->bp;
573 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
574 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
575 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
576 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
577
578 dma_sync_single_for_device(&bp->pdev->dev,
579 dma_unmap_addr(cons_rx_buf, mapping),
580 RX_COPY_THRESH, DMA_FROM_DEVICE);
581
582 prod_rx_buf->skb = cons_rx_buf->skb;
583 dma_unmap_addr_set(prod_rx_buf, mapping,
584 dma_unmap_addr(cons_rx_buf, mapping));
585 *prod_bd = *cons_bd;
586}
523224a3
DK
587static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
588 struct bnx2x_fastpath *fp, int last)
9f6c9258 589{
523224a3 590 int i;
9f6c9258 591
523224a3
DK
592 for (i = 0; i < last; i++)
593 bnx2x_free_rx_sge(bp, fp, i);
9f6c9258
DK
594}
595
9f6c9258
DK
596static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
597 struct bnx2x_fastpath *fp, int last)
598{
599 int i;
600
601 for (i = 0; i < last; i++) {
602 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
603 struct sk_buff *skb = rx_buf->skb;
604
605 if (skb == NULL) {
606 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
607 continue;
608 }
609
610 if (fp->tpa_state[i] == BNX2X_TPA_START)
611 dma_unmap_single(&bp->pdev->dev,
612 dma_unmap_addr(rx_buf, mapping),
613 bp->rx_buf_size, DMA_FROM_DEVICE);
614
615 dev_kfree_skb(skb);
616 rx_buf->skb = NULL;
617 }
618}
619
620
523224a3 621static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
9f6c9258
DK
622{
623 int i, j;
624
625 for_each_queue(bp, j) {
626 struct bnx2x_fastpath *fp = &bp->fp[j];
627
628 for (i = 1; i <= NUM_TX_RINGS; i++) {
629 struct eth_tx_next_bd *tx_next_bd =
630 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
631
632 tx_next_bd->addr_hi =
633 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
634 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
635 tx_next_bd->addr_lo =
636 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
637 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
638 }
639
523224a3 640 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
9f6c9258
DK
641 fp->tx_db.data.zero_fill1 = 0;
642 fp->tx_db.data.prod = 0;
643
644 fp->tx_pkt_prod = 0;
645 fp->tx_pkt_cons = 0;
646 fp->tx_bd_prod = 0;
647 fp->tx_bd_cons = 0;
9f6c9258
DK
648 fp->tx_pkt = 0;
649 }
650}
523224a3 651static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
9f6c9258 652{
523224a3 653 int i;
9f6c9258 654
523224a3
DK
655 for (i = 1; i <= NUM_RX_RINGS; i++) {
656 struct eth_rx_bd *rx_bd;
657
658 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
659 rx_bd->addr_hi =
660 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
661 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
662 rx_bd->addr_lo =
663 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
664 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
665 }
9f6c9258
DK
666}
667
523224a3
DK
668static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
669{
670 int i;
671
672 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
673 struct eth_rx_sge *sge;
674
675 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
676 sge->addr_hi =
677 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
678 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
679
680 sge->addr_lo =
681 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
682 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
683 }
684}
685
686static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
687{
688 int i;
689 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
690 struct eth_rx_cqe_next_page *nextpg;
691
692 nextpg = (struct eth_rx_cqe_next_page *)
693 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
694 nextpg->addr_hi =
695 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
696 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
697 nextpg->addr_lo =
698 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
699 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
700 }
701}
702
703
704
705static inline void __storm_memset_struct(struct bnx2x *bp,
706 u32 addr, size_t size, u32 *data)
707{
708 int i;
709 for (i = 0; i < size/4; i++)
710 REG_WR(bp, addr + (i * 4), data[i]);
711}
712
713static inline void storm_memset_mac_filters(struct bnx2x *bp,
714 struct tstorm_eth_mac_filter_config *mac_filters,
715 u16 abs_fid)
716{
717 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
718
719 u32 addr = BAR_TSTRORM_INTMEM +
720 TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid);
721
722 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
723}
724
725static inline void storm_memset_cmng(struct bnx2x *bp,
726 struct cmng_struct_per_port *cmng,
727 u8 port)
728{
729 size_t size = sizeof(struct cmng_struct_per_port);
730
731 u32 addr = BAR_XSTRORM_INTMEM +
732 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
733
734 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
735}
9f6c9258
DK
736/* HW Lock for shared dual port PHYs */
737void bnx2x_acquire_phy_lock(struct bnx2x *bp);
738void bnx2x_release_phy_lock(struct bnx2x *bp);
739
740void bnx2x_link_report(struct bnx2x *bp);
741int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
742int bnx2x_tx_int(struct bnx2x_fastpath *fp);
743void bnx2x_init_rx_rings(struct bnx2x *bp);
744netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
745
746int bnx2x_change_mac_addr(struct net_device *dev, void *p);
747void bnx2x_tx_timeout(struct net_device *dev);
748void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
749void bnx2x_netif_start(struct bnx2x *bp);
750void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
751void bnx2x_free_irq(struct bnx2x *bp, bool disable_only);
752int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
753int bnx2x_resume(struct pci_dev *pdev);
754void bnx2x_free_skbs(struct bnx2x *bp);
755int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
756int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
757int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
758int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
759
523224a3
DK
760/**
761 * Allocate/release memories outsize main driver structure
762 *
763 * @param bp
764 *
765 * @return int
766 */
767int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
768void bnx2x_free_mem_bp(struct bnx2x *bp);
769
770#define BNX2X_FW_IP_HDR_ALIGN_PAD 2 /* FW places hdr with this padding */
771
9f6c9258 772#endif /* BNX2X_CMN_H */