1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
19 #include <linux/etherdevice.h>
21 #include <linux/ipv6.h>
22 #include "bnx2x_cmn.h"
25 #include <linux/if_vlan.h>
28 static int bnx2x_poll(struct napi_struct *napi, int budget);
30 /* free skb in the packet ring at pos idx
31 * return idx of last bd freed
33 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
36 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
37 struct eth_tx_start_bd *tx_start_bd;
38 struct eth_tx_bd *tx_data_bd;
39 struct sk_buff *skb = tx_buf->skb;
40 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
43 /* prefetch skb end pointer to speedup dev_kfree_skb() */
46 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
50 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
51 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
52 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
53 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
55 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
56 #ifdef BNX2X_STOP_ON_ERROR
57 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
58 BNX2X_ERR("BAD nbd!\n");
62 new_cons = nbd + tx_buf->first_bd;
65 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
67 /* Skip a parse bd... */
69 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
71 /* ...and the TSO split header bd since they have no mapping */
72 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
74 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
80 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
81 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
82 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
83 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
85 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
97 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
99 struct bnx2x *bp = fp->bp;
100 struct netdev_queue *txq;
101 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
103 #ifdef BNX2X_STOP_ON_ERROR
104 if (unlikely(bp->panic))
108 txq = netdev_get_tx_queue(bp->dev, fp->index);
109 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
110 sw_cons = fp->tx_pkt_cons;
112 while (sw_cons != hw_cons) {
115 pkt_cons = TX_BD(sw_cons);
117 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
119 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
120 hw_cons, sw_cons, pkt_cons);
122 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
124 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
127 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
131 fp->tx_pkt_cons = sw_cons;
132 fp->tx_bd_cons = bd_cons;
134 /* Need to make the tx_bd_cons update visible to start_xmit()
135 * before checking for netif_tx_queue_stopped(). Without the
136 * memory barrier, there is a small possibility that
137 * start_xmit() will miss it and cause the queue to be stopped
142 /* TBD need a thresh? */
143 if (unlikely(netif_tx_queue_stopped(txq))) {
144 /* Taking tx_lock() is needed to prevent reenabling the queue
145 * while it's empty. This could have happen if rx_action() gets
146 * suspended in bnx2x_tx_int() after the condition before
147 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
149 * stops the queue->sees fresh tx_bd_cons->releases the queue->
150 * sends some packets consuming the whole queue again->
154 __netif_tx_lock(txq, smp_processor_id());
156 if ((netif_tx_queue_stopped(txq)) &&
157 (bp->state == BNX2X_STATE_OPEN) &&
158 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
159 netif_tx_wake_queue(txq);
161 __netif_tx_unlock(txq);
166 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
169 u16 last_max = fp->last_max_sge;
171 if (SUB_S16(idx, last_max) > 0)
172 fp->last_max_sge = idx;
175 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
176 struct eth_fast_path_rx_cqe *fp_cqe)
178 struct bnx2x *bp = fp->bp;
179 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
180 le16_to_cpu(fp_cqe->len_on_bd)) >>
182 u16 last_max, last_elem, first_elem;
189 /* First mark all used pages */
190 for (i = 0; i < sge_len; i++)
191 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
193 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
194 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
196 /* Here we assume that the last SGE index is the biggest */
197 prefetch((void *)(fp->sge_mask));
198 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
200 last_max = RX_SGE(fp->last_max_sge);
201 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
202 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
204 /* If ring is not full */
205 if (last_elem + 1 != first_elem)
208 /* Now update the prod */
209 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
210 if (likely(fp->sge_mask[i]))
213 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
214 delta += RX_SGE_MASK_ELEM_SZ;
218 fp->rx_sge_prod += delta;
219 /* clear page-end entries */
220 bnx2x_clear_sge_mask_next_elems(fp);
223 DP(NETIF_MSG_RX_STATUS,
224 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
225 fp->last_max_sge, fp->rx_sge_prod);
228 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
229 struct sk_buff *skb, u16 cons, u16 prod)
231 struct bnx2x *bp = fp->bp;
232 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
233 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
234 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
237 /* move empty skb from pool to prod and map it */
238 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
239 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
240 bp->rx_buf_size, DMA_FROM_DEVICE);
241 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
243 /* move partial skb from cons to pool (don't unmap yet) */
244 fp->tpa_pool[queue] = *cons_rx_buf;
246 /* mark bin state as start - print error if current state != stop */
247 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
248 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
250 fp->tpa_state[queue] = BNX2X_TPA_START;
252 /* point prod_bd to new skb */
253 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
254 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
256 #ifdef BNX2X_STOP_ON_ERROR
257 fp->tpa_queue_used |= (1 << queue);
258 #ifdef _ASM_GENERIC_INT_L64_H
259 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
261 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
267 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
269 struct eth_fast_path_rx_cqe *fp_cqe,
272 struct sw_rx_page *rx_pg, old_rx_pg;
273 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
274 u32 i, frag_len, frag_size, pages;
278 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
279 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
281 /* This is needed in order to enable forwarding support */
283 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
284 max(frag_size, (u32)len_on_bd));
286 #ifdef BNX2X_STOP_ON_ERROR
287 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
291 fp_cqe->pkt_len, len_on_bd);
297 /* Run through the SGL and compose the fragmented skb */
298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
299 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
301 /* FW gives the indices of the SGE as if the ring is an array
302 (meaning that "next" element will consume 2 indices) */
303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
304 rx_pg = &fp->rx_page_ring[sge_idx];
307 /* If we fail to allocate a substitute page, we simply stop
308 where we are and drop the whole packet */
309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
311 fp->eth_q_stats.rx_skb_alloc_failed++;
315 /* Unmap the page as we r going to pass it to the stack */
316 dma_unmap_page(&bp->pdev->dev,
317 dma_unmap_addr(&old_rx_pg, mapping),
318 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
320 /* Add one frag and update the appropriate fields in the skb */
321 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
323 skb->data_len += frag_len;
324 skb->truesize += frag_len;
325 skb->len += frag_len;
327 frag_size -= frag_len;
333 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
334 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
337 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
338 struct sk_buff *skb = rx_buf->skb;
340 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
342 /* Unmap skb in the pool anyway, as we are going to change
343 pool entry status to BNX2X_TPA_STOP even if new skb allocation
345 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
346 bp->rx_buf_size, DMA_FROM_DEVICE);
348 if (likely(new_skb)) {
349 /* fix ip xsum and give it to the stack */
350 /* (no need to map the new skb) */
353 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
355 int is_not_hwaccel_vlan_cqe =
356 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
360 prefetch(((char *)(skb)) + 128);
362 #ifdef BNX2X_STOP_ON_ERROR
363 if (pad + len > bp->rx_buf_size) {
364 BNX2X_ERR("skb_put is about to fail... "
365 "pad %d len %d rx_buf_size %d\n",
366 pad, len, bp->rx_buf_size);
372 skb_reserve(skb, pad);
375 skb->protocol = eth_type_trans(skb, bp->dev);
376 skb->ip_summed = CHECKSUM_UNNECESSARY;
381 iph = (struct iphdr *)skb->data;
383 /* If there is no Rx VLAN offloading -
384 take VLAN tag into an account */
385 if (unlikely(is_not_hwaccel_vlan_cqe))
386 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
389 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
392 if (!bnx2x_fill_frag_skb(bp, fp, skb,
393 &cqe->fast_path_cqe, cqe_idx)) {
395 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
396 (!is_not_hwaccel_vlan_cqe))
397 vlan_gro_receive(&fp->napi, bp->vlgrp,
398 le16_to_cpu(cqe->fast_path_cqe.
402 napi_gro_receive(&fp->napi, skb);
404 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
405 " - dropping packet!\n");
410 /* put new skb in bin */
411 fp->tpa_pool[queue].skb = new_skb;
414 /* else drop the packet and keep the buffer in the bin */
415 DP(NETIF_MSG_RX_STATUS,
416 "Failed to allocate new skb - dropping packet!\n");
417 fp->eth_q_stats.rx_skb_alloc_failed++;
420 fp->tpa_state[queue] = BNX2X_TPA_STOP;
423 /* Set Toeplitz hash value in the skb using the value from the
424 * CQE (calculated by HW).
426 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
429 /* Set Toeplitz hash from CQE */
430 if ((bp->dev->features & NETIF_F_RXHASH) &&
431 (cqe->fast_path_cqe.status_flags &
432 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
434 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
437 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
439 struct bnx2x *bp = fp->bp;
440 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
441 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
444 #ifdef BNX2X_STOP_ON_ERROR
445 if (unlikely(bp->panic))
449 /* CQ "next element" is of the size of the regular element,
450 that's why it's ok here */
451 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
452 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
455 bd_cons = fp->rx_bd_cons;
456 bd_prod = fp->rx_bd_prod;
457 bd_prod_fw = bd_prod;
458 sw_comp_cons = fp->rx_comp_cons;
459 sw_comp_prod = fp->rx_comp_prod;
461 /* Memory barrier necessary as speculative reads of the rx
462 * buffer can be ahead of the index in the status block
466 DP(NETIF_MSG_RX_STATUS,
467 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
468 fp->index, hw_comp_cons, sw_comp_cons);
470 while (sw_comp_cons != hw_comp_cons) {
471 struct sw_rx_bd *rx_buf = NULL;
473 union eth_rx_cqe *cqe;
477 comp_ring_cons = RCQ_BD(sw_comp_cons);
478 bd_prod = RX_BD(bd_prod);
479 bd_cons = RX_BD(bd_cons);
481 /* Prefetch the page containing the BD descriptor
482 at producer's index. It will be needed when new skb is
484 prefetch((void *)(PAGE_ALIGN((unsigned long)
485 (&fp->rx_desc_ring[bd_prod])) -
488 cqe = &fp->rx_comp_ring[comp_ring_cons];
489 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
491 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
492 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
493 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
494 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
495 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
496 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
498 /* is this a slowpath msg? */
499 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
500 bnx2x_sp_event(fp, cqe);
503 /* this is an rx packet */
505 rx_buf = &fp->rx_buf_ring[bd_cons];
508 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
509 pad = cqe->fast_path_cqe.placement_offset;
511 /* If CQE is marked both TPA_START and TPA_END
512 it is a non-TPA CQE */
513 if ((!fp->disable_tpa) &&
514 (TPA_TYPE(cqe_fp_flags) !=
515 (TPA_TYPE_START | TPA_TYPE_END))) {
516 u16 queue = cqe->fast_path_cqe.queue_index;
518 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
519 DP(NETIF_MSG_RX_STATUS,
520 "calling tpa_start on queue %d\n",
523 bnx2x_tpa_start(fp, queue, skb,
526 /* Set Toeplitz hash for an LRO skb */
527 bnx2x_set_skb_rxhash(bp, cqe, skb);
532 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
533 DP(NETIF_MSG_RX_STATUS,
534 "calling tpa_stop on queue %d\n",
537 if (!BNX2X_RX_SUM_FIX(cqe))
538 BNX2X_ERR("STOP on none TCP "
541 /* This is a size of the linear data
543 len = le16_to_cpu(cqe->fast_path_cqe.
545 bnx2x_tpa_stop(bp, fp, queue, pad,
546 len, cqe, comp_ring_cons);
547 #ifdef BNX2X_STOP_ON_ERROR
552 bnx2x_update_sge_prod(fp,
553 &cqe->fast_path_cqe);
558 dma_sync_single_for_device(&bp->pdev->dev,
559 dma_unmap_addr(rx_buf, mapping),
560 pad + RX_COPY_THRESH,
562 prefetch(((char *)(skb)) + 128);
564 /* is this an error packet? */
565 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
567 "ERROR flags %x rx packet %u\n",
568 cqe_fp_flags, sw_comp_cons);
569 fp->eth_q_stats.rx_err_discard_pkt++;
573 /* Since we don't have a jumbo ring
574 * copy small packets if mtu > 1500
576 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
577 (len <= RX_COPY_THRESH)) {
578 struct sk_buff *new_skb;
580 new_skb = netdev_alloc_skb(bp->dev,
582 if (new_skb == NULL) {
584 "ERROR packet dropped "
585 "because of alloc failure\n");
586 fp->eth_q_stats.rx_skb_alloc_failed++;
591 skb_copy_from_linear_data_offset(skb, pad,
592 new_skb->data + pad, len);
593 skb_reserve(new_skb, pad);
594 skb_put(new_skb, len);
596 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
601 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
602 dma_unmap_single(&bp->pdev->dev,
603 dma_unmap_addr(rx_buf, mapping),
606 skb_reserve(skb, pad);
611 "ERROR packet dropped because "
612 "of alloc failure\n");
613 fp->eth_q_stats.rx_skb_alloc_failed++;
615 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
619 skb->protocol = eth_type_trans(skb, bp->dev);
621 /* Set Toeplitz hash for a none-LRO skb */
622 bnx2x_set_skb_rxhash(bp, cqe, skb);
624 skb->ip_summed = CHECKSUM_NONE;
626 if (likely(BNX2X_RX_CSUM_OK(cqe)))
627 skb->ip_summed = CHECKSUM_UNNECESSARY;
629 fp->eth_q_stats.hw_csum_err++;
633 skb_record_rx_queue(skb, fp->index);
636 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
637 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
639 vlan_gro_receive(&fp->napi, bp->vlgrp,
640 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
643 napi_gro_receive(&fp->napi, skb);
649 bd_cons = NEXT_RX_IDX(bd_cons);
650 bd_prod = NEXT_RX_IDX(bd_prod);
651 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
654 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
655 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
657 if (rx_pkt == budget)
661 fp->rx_bd_cons = bd_cons;
662 fp->rx_bd_prod = bd_prod_fw;
663 fp->rx_comp_cons = sw_comp_cons;
664 fp->rx_comp_prod = sw_comp_prod;
666 /* Update producers */
667 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
670 fp->rx_pkt += rx_pkt;
676 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
678 struct bnx2x_fastpath *fp = fp_cookie;
679 struct bnx2x *bp = fp->bp;
681 /* Return here if interrupt is disabled */
682 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
683 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
687 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
688 fp->index, fp->sb_id);
689 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
691 #ifdef BNX2X_STOP_ON_ERROR
692 if (unlikely(bp->panic))
696 /* Handle Rx and Tx according to MSI-X vector */
697 prefetch(fp->rx_cons_sb);
698 prefetch(fp->tx_cons_sb);
699 prefetch(&fp->status_blk->u_status_block.status_block_index);
700 prefetch(&fp->status_blk->c_status_block.status_block_index);
701 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
707 /* HW Lock for shared dual port PHYs */
708 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
710 mutex_lock(&bp->port.phy_mutex);
712 if (bp->port.need_hw_lock)
713 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
716 void bnx2x_release_phy_lock(struct bnx2x *bp)
718 if (bp->port.need_hw_lock)
719 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
721 mutex_unlock(&bp->port.phy_mutex);
724 void bnx2x_link_report(struct bnx2x *bp)
726 if (bp->flags & MF_FUNC_DIS) {
727 netif_carrier_off(bp->dev);
728 netdev_err(bp->dev, "NIC Link is Down\n");
732 if (bp->link_vars.link_up) {
735 if (bp->state == BNX2X_STATE_OPEN)
736 netif_carrier_on(bp->dev);
737 netdev_info(bp->dev, "NIC Link is Up, ");
739 line_speed = bp->link_vars.line_speed;
744 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
745 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
746 if (vn_max_rate < line_speed)
747 line_speed = vn_max_rate;
749 pr_cont("%d Mbps ", line_speed);
751 if (bp->link_vars.duplex == DUPLEX_FULL)
752 pr_cont("full duplex");
754 pr_cont("half duplex");
756 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
757 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
758 pr_cont(", receive ");
759 if (bp->link_vars.flow_ctrl &
761 pr_cont("& transmit ");
763 pr_cont(", transmit ");
765 pr_cont("flow control ON");
769 } else { /* link_down */
770 netif_carrier_off(bp->dev);
771 netdev_err(bp->dev, "NIC Link is Down\n");
775 void bnx2x_init_rx_rings(struct bnx2x *bp)
777 int func = BP_FUNC(bp);
778 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
779 ETH_MAX_AGGREGATION_QUEUES_E1H;
780 u16 ring_prod, cqe_ring_prod;
783 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
785 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
787 if (bp->flags & TPA_ENABLE_FLAG) {
789 for_each_queue(bp, j) {
790 struct bnx2x_fastpath *fp = &bp->fp[j];
792 for (i = 0; i < max_agg_queues; i++) {
793 fp->tpa_pool[i].skb =
794 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
795 if (!fp->tpa_pool[i].skb) {
796 BNX2X_ERR("Failed to allocate TPA "
797 "skb pool for queue[%d] - "
798 "disabling TPA on this "
800 bnx2x_free_tpa_pool(bp, fp, i);
804 dma_unmap_addr_set((struct sw_rx_bd *)
805 &bp->fp->tpa_pool[i],
807 fp->tpa_state[i] = BNX2X_TPA_STOP;
812 for_each_queue(bp, j) {
813 struct bnx2x_fastpath *fp = &bp->fp[j];
816 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
817 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
819 /* "next page" elements initialization */
821 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
822 struct eth_rx_sge *sge;
824 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
826 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
827 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
829 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
830 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
833 bnx2x_init_sge_ring_bit_mask(fp);
836 for (i = 1; i <= NUM_RX_RINGS; i++) {
837 struct eth_rx_bd *rx_bd;
839 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
841 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
842 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
844 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
845 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
849 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
850 struct eth_rx_cqe_next_page *nextpg;
852 nextpg = (struct eth_rx_cqe_next_page *)
853 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
855 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
856 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
858 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
859 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
862 /* Allocate SGEs and initialize the ring elements */
863 for (i = 0, ring_prod = 0;
864 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
866 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
867 BNX2X_ERR("was only able to allocate "
869 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
870 /* Cleanup already allocated elements */
871 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
872 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
877 ring_prod = NEXT_SGE_IDX(ring_prod);
879 fp->rx_sge_prod = ring_prod;
881 /* Allocate BDs and initialize BD ring */
882 fp->rx_comp_cons = 0;
883 cqe_ring_prod = ring_prod = 0;
884 for (i = 0; i < bp->rx_ring_size; i++) {
885 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
886 BNX2X_ERR("was only able to allocate "
887 "%d rx skbs on queue[%d]\n", i, j);
888 fp->eth_q_stats.rx_skb_alloc_failed++;
891 ring_prod = NEXT_RX_IDX(ring_prod);
892 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
893 WARN_ON(ring_prod <= i);
896 fp->rx_bd_prod = ring_prod;
897 /* must not have more available CQEs than BDs */
898 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
900 fp->rx_pkt = fp->rx_calls = 0;
903 * this will generate an interrupt (to the TSTORM)
904 * must only be done after chip is initialized
906 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
911 REG_WR(bp, BAR_USTRORM_INTMEM +
912 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
913 U64_LO(fp->rx_comp_mapping));
914 REG_WR(bp, BAR_USTRORM_INTMEM +
915 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
916 U64_HI(fp->rx_comp_mapping));
919 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
923 for_each_queue(bp, i) {
924 struct bnx2x_fastpath *fp = &bp->fp[i];
926 u16 bd_cons = fp->tx_bd_cons;
927 u16 sw_prod = fp->tx_pkt_prod;
928 u16 sw_cons = fp->tx_pkt_cons;
930 while (sw_cons != sw_prod) {
931 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
937 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
941 for_each_queue(bp, j) {
942 struct bnx2x_fastpath *fp = &bp->fp[j];
944 for (i = 0; i < NUM_RX_BD; i++) {
945 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
946 struct sk_buff *skb = rx_buf->skb;
951 dma_unmap_single(&bp->pdev->dev,
952 dma_unmap_addr(rx_buf, mapping),
953 bp->rx_buf_size, DMA_FROM_DEVICE);
958 if (!fp->disable_tpa)
959 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
960 ETH_MAX_AGGREGATION_QUEUES_E1 :
961 ETH_MAX_AGGREGATION_QUEUES_E1H);
965 void bnx2x_free_skbs(struct bnx2x *bp)
967 bnx2x_free_tx_skbs(bp);
968 bnx2x_free_rx_skbs(bp);
971 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
975 free_irq(bp->msix_table[0].vector, bp->dev);
976 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
977 bp->msix_table[0].vector);
982 for_each_queue(bp, i) {
983 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
984 "state %x\n", i, bp->msix_table[i + offset].vector,
985 bnx2x_fp(bp, i, state));
987 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
991 void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
993 if (bp->flags & USING_MSIX_FLAG) {
995 bnx2x_free_msix_irqs(bp);
996 pci_disable_msix(bp->pdev);
997 bp->flags &= ~USING_MSIX_FLAG;
999 } else if (bp->flags & USING_MSI_FLAG) {
1001 free_irq(bp->pdev->irq, bp->dev);
1002 pci_disable_msi(bp->pdev);
1003 bp->flags &= ~USING_MSI_FLAG;
1005 } else if (!disable_only)
1006 free_irq(bp->pdev->irq, bp->dev);
1009 static int bnx2x_enable_msix(struct bnx2x *bp)
1011 int i, rc, offset = 1;
1014 bp->msix_table[0].entry = igu_vec;
1015 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
1018 igu_vec = BP_L_ID(bp) + offset;
1019 bp->msix_table[1].entry = igu_vec;
1020 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
1023 for_each_queue(bp, i) {
1024 igu_vec = BP_L_ID(bp) + offset + i;
1025 bp->msix_table[i + offset].entry = igu_vec;
1026 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1027 "(fastpath #%u)\n", i + offset, igu_vec, i);
1030 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
1031 BNX2X_NUM_QUEUES(bp) + offset);
1034 * reconfigure number of tx/rx queues according to available
1037 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1038 /* vectors available for FP */
1039 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
1042 "Trying to use less MSI-X vectors: %d\n", rc);
1044 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1048 "MSI-X is not attainable rc %d\n", rc);
1052 bp->num_queues = min(bp->num_queues, fp_vec);
1054 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1057 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1061 bp->flags |= USING_MSIX_FLAG;
1066 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1068 int i, rc, offset = 1;
1070 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1071 bp->dev->name, bp->dev);
1073 BNX2X_ERR("request sp irq failed\n");
1080 for_each_queue(bp, i) {
1081 struct bnx2x_fastpath *fp = &bp->fp[i];
1082 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1085 rc = request_irq(bp->msix_table[i + offset].vector,
1086 bnx2x_msix_fp_int, 0, fp->name, fp);
1088 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1089 bnx2x_free_msix_irqs(bp);
1093 fp->state = BNX2X_FP_STATE_IRQ;
1096 i = BNX2X_NUM_QUEUES(bp);
1097 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1099 bp->msix_table[0].vector,
1100 0, bp->msix_table[offset].vector,
1101 i - 1, bp->msix_table[offset + i - 1].vector);
1106 static int bnx2x_enable_msi(struct bnx2x *bp)
1110 rc = pci_enable_msi(bp->pdev);
1112 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1115 bp->flags |= USING_MSI_FLAG;
1120 static int bnx2x_req_irq(struct bnx2x *bp)
1122 unsigned long flags;
1125 if (bp->flags & USING_MSI_FLAG)
1128 flags = IRQF_SHARED;
1130 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1131 bp->dev->name, bp->dev);
1133 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1138 static void bnx2x_napi_enable(struct bnx2x *bp)
1142 for_each_queue(bp, i)
1143 napi_enable(&bnx2x_fp(bp, i, napi));
1146 static void bnx2x_napi_disable(struct bnx2x *bp)
1150 for_each_queue(bp, i)
1151 napi_disable(&bnx2x_fp(bp, i, napi));
1154 void bnx2x_netif_start(struct bnx2x *bp)
1158 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1159 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1162 if (netif_running(bp->dev)) {
1163 bnx2x_napi_enable(bp);
1164 bnx2x_int_enable(bp);
1165 if (bp->state == BNX2X_STATE_OPEN)
1166 netif_tx_wake_all_queues(bp->dev);
1171 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1173 bnx2x_int_disable_sync(bp, disable_hw);
1174 bnx2x_napi_disable(bp);
1175 netif_tx_disable(bp->dev);
1177 static int bnx2x_set_num_queues(struct bnx2x *bp)
1181 switch (bp->int_mode) {
1185 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
1188 /* Set number of queues according to bp->multi_mode value */
1189 bnx2x_set_num_queues_msix(bp);
1191 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
1194 /* if we can't use MSI-X we only need one fp,
1195 * so try to enable MSI-X with the requested number of fp's
1196 * and fallback to MSI or legacy INTx with one fp
1198 rc = bnx2x_enable_msix(bp);
1200 /* failed to enable MSI-X */
1204 bp->dev->real_num_tx_queues = bp->num_queues;
1208 /* must be called with rtnl_lock */
1209 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1214 #ifdef BNX2X_STOP_ON_ERROR
1215 if (unlikely(bp->panic))
1219 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1221 rc = bnx2x_set_num_queues(bp);
1223 if (bnx2x_alloc_mem(bp)) {
1224 bnx2x_free_irq(bp, true);
1228 for_each_queue(bp, i)
1229 bnx2x_fp(bp, i, disable_tpa) =
1230 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1232 for_each_queue(bp, i)
1233 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
1236 bnx2x_napi_enable(bp);
1238 if (bp->flags & USING_MSIX_FLAG) {
1239 rc = bnx2x_req_msix_irqs(bp);
1241 bnx2x_free_irq(bp, true);
1245 /* Fall to INTx if failed to enable MSI-X due to lack of
1246 memory (in bnx2x_set_num_queues()) */
1247 if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
1248 bnx2x_enable_msi(bp);
1250 rc = bnx2x_req_irq(bp);
1252 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1253 bnx2x_free_irq(bp, true);
1256 if (bp->flags & USING_MSI_FLAG) {
1257 bp->dev->irq = bp->pdev->irq;
1258 netdev_info(bp->dev, "using MSI IRQ %d\n",
1263 /* Send LOAD_REQUEST command to MCP
1264 Returns the type of LOAD command:
1265 if it is the first port to be initialized
1266 common blocks should be initialized, otherwise - not
1268 if (!BP_NOMCP(bp)) {
1269 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
1271 BNX2X_ERR("MCP response failure, aborting\n");
1275 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1276 rc = -EBUSY; /* other port in diagnostic mode */
1281 int port = BP_PORT(bp);
1283 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
1284 load_count[0], load_count[1], load_count[2]);
1286 load_count[1 + port]++;
1287 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
1288 load_count[0], load_count[1], load_count[2]);
1289 if (load_count[0] == 1)
1290 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1291 else if (load_count[1 + port] == 1)
1292 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1294 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1297 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1298 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1302 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1305 rc = bnx2x_init_hw(bp, load_code);
1307 BNX2X_ERR("HW init failed, aborting\n");
1308 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
1309 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
1310 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
1314 /* Setup NIC internals and enable interrupts */
1315 bnx2x_nic_init(bp, load_code);
1317 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
1318 (bp->common.shmem2_base))
1319 SHMEM2_WR(bp, dcc_support,
1320 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1321 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1323 /* Send LOAD_DONE command to MCP */
1324 if (!BP_NOMCP(bp)) {
1325 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
1327 BNX2X_ERR("MCP response failure, aborting\n");
1333 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1335 rc = bnx2x_setup_leading(bp);
1337 BNX2X_ERR("Setup leading failed!\n");
1338 #ifndef BNX2X_STOP_ON_ERROR
1346 if (CHIP_IS_E1H(bp))
1347 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1348 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1349 bp->flags |= MF_FUNC_DIS;
1352 if (bp->state == BNX2X_STATE_OPEN) {
1354 /* Enable Timer scan */
1355 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1357 for_each_nondefault_queue(bp, i) {
1358 rc = bnx2x_setup_multi(bp, i);
1368 bnx2x_set_eth_mac_addr_e1(bp, 1);
1370 bnx2x_set_eth_mac_addr_e1h(bp, 1);
1372 /* Set iSCSI L2 MAC */
1373 mutex_lock(&bp->cnic_mutex);
1374 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1375 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1376 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1377 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
1380 mutex_unlock(&bp->cnic_mutex);
1385 bnx2x_initial_phy_init(bp, load_mode);
1387 /* Start fast path */
1388 switch (load_mode) {
1390 if (bp->state == BNX2X_STATE_OPEN) {
1391 /* Tx queue should be only reenabled */
1392 netif_tx_wake_all_queues(bp->dev);
1394 /* Initialize the receive filter. */
1395 bnx2x_set_rx_mode(bp->dev);
1399 netif_tx_start_all_queues(bp->dev);
1400 if (bp->state != BNX2X_STATE_OPEN)
1401 netif_tx_disable(bp->dev);
1402 /* Initialize the receive filter. */
1403 bnx2x_set_rx_mode(bp->dev);
1407 /* Initialize the receive filter. */
1408 bnx2x_set_rx_mode(bp->dev);
1409 bp->state = BNX2X_STATE_DIAG;
1417 bnx2x__link_status_update(bp);
1419 /* start the timer */
1420 mod_timer(&bp->timer, jiffies + bp->current_interval);
1423 bnx2x_setup_cnic_irq_info(bp);
1424 if (bp->state == BNX2X_STATE_OPEN)
1425 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1427 bnx2x_inc_load_cnt(bp);
1433 /* Disable Timer scan */
1434 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1437 bnx2x_int_disable_sync(bp, 1);
1438 if (!BP_NOMCP(bp)) {
1439 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
1440 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
1443 /* Free SKBs, SGEs, TPA pool and driver internals */
1444 bnx2x_free_skbs(bp);
1445 for_each_queue(bp, i)
1446 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1449 bnx2x_free_irq(bp, false);
1451 bnx2x_napi_disable(bp);
1452 for_each_queue(bp, i)
1453 netif_napi_del(&bnx2x_fp(bp, i, napi));
1459 /* must be called with rtnl_lock */
1460 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1464 if (bp->state == BNX2X_STATE_CLOSED) {
1465 /* Interface has been removed - nothing to recover */
1466 bp->recovery_state = BNX2X_RECOVERY_DONE;
1468 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1475 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1477 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1479 /* Set "drop all" */
1480 bp->rx_mode = BNX2X_RX_MODE_NONE;
1481 bnx2x_set_storm_rx_mode(bp);
1483 /* Disable HW interrupts, NAPI and Tx */
1484 bnx2x_netif_stop(bp, 1);
1485 netif_carrier_off(bp->dev);
1487 del_timer_sync(&bp->timer);
1488 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
1489 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1490 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1493 bnx2x_free_irq(bp, false);
1495 /* Cleanup the chip if needed */
1496 if (unload_mode != UNLOAD_RECOVERY)
1497 bnx2x_chip_cleanup(bp, unload_mode);
1501 /* Free SKBs, SGEs, TPA pool and driver internals */
1502 bnx2x_free_skbs(bp);
1503 for_each_queue(bp, i)
1504 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1505 for_each_queue(bp, i)
1506 netif_napi_del(&bnx2x_fp(bp, i, napi));
1509 bp->state = BNX2X_STATE_CLOSED;
1511 /* The last driver must disable a "close the gate" if there is no
1512 * parity attention or "process kill" pending.
1514 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1515 bnx2x_reset_is_done(bp))
1516 bnx2x_disable_close_the_gate(bp);
1518 /* Reset MCP mail box sequence if there is on going recovery */
1519 if (unload_mode == UNLOAD_RECOVERY)
1524 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1528 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1532 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1533 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1534 PCI_PM_CTRL_PME_STATUS));
1536 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1537 /* delay required during transition out of D3hot */
1542 /* If there are other clients above don't
1543 shut down the power */
1544 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1546 /* Don't shut down the power for emulation and FPGA */
1547 if (CHIP_REV_IS_SLOW(bp))
1550 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1554 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1556 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1559 /* No more memory access after this point until
1560 * device is brought back to D0.
1573 * net_device service functions
1576 static int bnx2x_poll(struct napi_struct *napi, int budget)
1579 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1581 struct bnx2x *bp = fp->bp;
1584 #ifdef BNX2X_STOP_ON_ERROR
1585 if (unlikely(bp->panic)) {
1586 napi_complete(napi);
1591 if (bnx2x_has_tx_work(fp))
1594 if (bnx2x_has_rx_work(fp)) {
1595 work_done += bnx2x_rx_int(fp, budget - work_done);
1597 /* must not complete if we consumed full budget */
1598 if (work_done >= budget)
1602 /* Fall out from the NAPI loop if needed */
1603 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1604 bnx2x_update_fpsb_idx(fp);
1605 /* bnx2x_has_rx_work() reads the status block, thus we need
1606 * to ensure that status block indices have been actually read
1607 * (bnx2x_update_fpsb_idx) prior to this check
1608 * (bnx2x_has_rx_work) so that we won't write the "newer"
1609 * value of the status block to IGU (if there was a DMA right
1610 * after bnx2x_has_rx_work and if there is no rmb, the memory
1611 * reading (bnx2x_update_fpsb_idx) may be postponed to right
1612 * before bnx2x_ack_sb). In this case there will never be
1613 * another interrupt until there is another update of the
1614 * status block, while there is still unhandled work.
1618 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1619 napi_complete(napi);
1620 /* Re-enable interrupts */
1621 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1622 le16_to_cpu(fp->fp_c_idx),
1624 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1625 le16_to_cpu(fp->fp_u_idx),
1636 /* we split the first BD into headers and data BDs
1637 * to ease the pain of our fellow microcode engineers
1638 * we use one mapping for both BDs
1639 * So far this has only been observed to happen
1640 * in Other Operating Systems(TM)
1642 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1643 struct bnx2x_fastpath *fp,
1644 struct sw_tx_bd *tx_buf,
1645 struct eth_tx_start_bd **tx_bd, u16 hlen,
1646 u16 bd_prod, int nbd)
1648 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1649 struct eth_tx_bd *d_tx_bd;
1651 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1653 /* first fix first BD */
1654 h_tx_bd->nbd = cpu_to_le16(nbd);
1655 h_tx_bd->nbytes = cpu_to_le16(hlen);
1657 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1658 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1659 h_tx_bd->addr_lo, h_tx_bd->nbd);
1661 /* now get a new data BD
1662 * (after the pbd) and fill it */
1663 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1664 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1666 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1667 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1669 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1670 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1671 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1673 /* this marks the BD as one that has no individual mapping */
1674 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1676 DP(NETIF_MSG_TX_QUEUED,
1677 "TSO split data size is %d (%x:%x)\n",
1678 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1681 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1686 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1689 csum = (u16) ~csum_fold(csum_sub(csum,
1690 csum_partial(t_header - fix, fix, 0)));
1693 csum = (u16) ~csum_fold(csum_add(csum,
1694 csum_partial(t_header, -fix, 0)));
1696 return swab16(csum);
1699 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1703 if (skb->ip_summed != CHECKSUM_PARTIAL)
1707 if (skb->protocol == htons(ETH_P_IPV6)) {
1709 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1710 rc |= XMIT_CSUM_TCP;
1714 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1715 rc |= XMIT_CSUM_TCP;
1719 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1720 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1722 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1723 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1728 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1729 /* check if packet requires linearization (packet is too fragmented)
1730 no need to check fragmentation if page size > 8K (there will be no
1731 violation to FW restrictions) */
1732 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1737 int first_bd_sz = 0;
1739 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1740 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1742 if (xmit_type & XMIT_GSO) {
1743 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1744 /* Check if LSO packet needs to be copied:
1745 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1746 int wnd_size = MAX_FETCH_BD - 3;
1747 /* Number of windows to check */
1748 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1753 /* Headers length */
1754 hlen = (int)(skb_transport_header(skb) - skb->data) +
1757 /* Amount of data (w/o headers) on linear part of SKB*/
1758 first_bd_sz = skb_headlen(skb) - hlen;
1760 wnd_sum = first_bd_sz;
1762 /* Calculate the first sum - it's special */
1763 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1765 skb_shinfo(skb)->frags[frag_idx].size;
1767 /* If there was data on linear skb data - check it */
1768 if (first_bd_sz > 0) {
1769 if (unlikely(wnd_sum < lso_mss)) {
1774 wnd_sum -= first_bd_sz;
1777 /* Others are easier: run through the frag list and
1778 check all windows */
1779 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1781 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1783 if (unlikely(wnd_sum < lso_mss)) {
1788 skb_shinfo(skb)->frags[wnd_idx].size;
1791 /* in non-LSO too fragmented packet should always
1798 if (unlikely(to_copy))
1799 DP(NETIF_MSG_TX_QUEUED,
1800 "Linearization IS REQUIRED for %s packet. "
1801 "num_frags %d hlen %d first_bd_sz %d\n",
1802 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1803 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1809 /* called with netif_tx_lock
1810 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1811 * netif_wake_queue()
1813 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1815 struct bnx2x *bp = netdev_priv(dev);
1816 struct bnx2x_fastpath *fp;
1817 struct netdev_queue *txq;
1818 struct sw_tx_bd *tx_buf;
1819 struct eth_tx_start_bd *tx_start_bd;
1820 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1821 struct eth_tx_parse_bd *pbd = NULL;
1822 u16 pkt_prod, bd_prod;
1825 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1828 __le16 pkt_size = 0;
1830 u8 mac_type = UNICAST_ADDRESS;
1832 #ifdef BNX2X_STOP_ON_ERROR
1833 if (unlikely(bp->panic))
1834 return NETDEV_TX_BUSY;
1837 fp_index = skb_get_queue_mapping(skb);
1838 txq = netdev_get_tx_queue(dev, fp_index);
1840 fp = &bp->fp[fp_index];
1842 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1843 fp->eth_q_stats.driver_xoff++;
1844 netif_tx_stop_queue(txq);
1845 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1846 return NETDEV_TX_BUSY;
1849 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
1850 " gso type %x xmit_type %x\n",
1851 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1852 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1854 eth = (struct ethhdr *)skb->data;
1856 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1857 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1858 if (is_broadcast_ether_addr(eth->h_dest))
1859 mac_type = BROADCAST_ADDRESS;
1861 mac_type = MULTICAST_ADDRESS;
1864 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1865 /* First, check if we need to linearize the skb (due to FW
1866 restrictions). No need to check fragmentation if page size > 8K
1867 (there will be no violation to FW restrictions) */
1868 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1869 /* Statistics of linearization */
1871 if (skb_linearize(skb) != 0) {
1872 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1873 "silently dropping this SKB\n");
1874 dev_kfree_skb_any(skb);
1875 return NETDEV_TX_OK;
1881 Please read carefully. First we use one BD which we mark as start,
1882 then we have a parsing info BD (used for TSO or xsum),
1883 and only then we have the rest of the TSO BDs.
1884 (don't forget to mark the last one as last,
1885 and to unmap only AFTER you write to the BD ...)
1886 And above all, all pdb sizes are in words - NOT DWORDS!
1889 pkt_prod = fp->tx_pkt_prod++;
1890 bd_prod = TX_BD(fp->tx_bd_prod);
1892 /* get a tx_buf and first BD */
1893 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
1894 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1896 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1897 tx_start_bd->general_data = (mac_type <<
1898 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
1900 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
1902 /* remember the first BD of the packet */
1903 tx_buf->first_bd = fp->tx_bd_prod;
1907 DP(NETIF_MSG_TX_QUEUED,
1908 "sending pkt %u @%p next_idx %u bd %u @%p\n",
1909 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
1912 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
1913 (bp->flags & HW_VLAN_TX_FLAG)) {
1914 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
1915 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
1918 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
1920 /* turn on parsing and get a BD */
1921 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1922 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
1924 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
1926 if (xmit_type & XMIT_CSUM) {
1927 hlen = (skb_network_header(skb) - skb->data) / 2;
1929 /* for now NS flag is not used in Linux */
1931 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1932 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
1934 pbd->ip_hlen = (skb_transport_header(skb) -
1935 skb_network_header(skb)) / 2;
1937 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
1939 pbd->total_hlen = cpu_to_le16(hlen);
1942 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1944 if (xmit_type & XMIT_CSUM_V4)
1945 tx_start_bd->bd_flags.as_bitfield |=
1946 ETH_TX_BD_FLAGS_IP_CSUM;
1948 tx_start_bd->bd_flags.as_bitfield |=
1949 ETH_TX_BD_FLAGS_IPV6;
1951 if (xmit_type & XMIT_CSUM_TCP) {
1952 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1955 s8 fix = SKB_CS_OFF(skb); /* signed! */
1957 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
1959 DP(NETIF_MSG_TX_QUEUED,
1960 "hlen %d fix %d csum before fix %x\n",
1961 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
1963 /* HW bug: fixup the CSUM */
1964 pbd->tcp_pseudo_csum =
1965 bnx2x_csum_fix(skb_transport_header(skb),
1968 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1969 pbd->tcp_pseudo_csum);
1973 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1974 skb_headlen(skb), DMA_TO_DEVICE);
1976 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1977 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1978 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
1979 tx_start_bd->nbd = cpu_to_le16(nbd);
1980 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
1981 pkt_size = tx_start_bd->nbytes;
1983 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
1984 " nbytes %d flags %x vlan %x\n",
1985 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
1986 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
1987 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
1989 if (xmit_type & XMIT_GSO) {
1991 DP(NETIF_MSG_TX_QUEUED,
1992 "TSO packet len %d hlen %d total len %d tso size %d\n",
1993 skb->len, hlen, skb_headlen(skb),
1994 skb_shinfo(skb)->gso_size);
1996 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
1998 if (unlikely(skb_headlen(skb) > hlen))
1999 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2000 hlen, bd_prod, ++nbd);
2002 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2003 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2004 pbd->tcp_flags = pbd_tcp_flags(skb);
2006 if (xmit_type & XMIT_GSO_V4) {
2007 pbd->ip_id = swab16(ip_hdr(skb)->id);
2008 pbd->tcp_pseudo_csum =
2009 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2011 0, IPPROTO_TCP, 0));
2014 pbd->tcp_pseudo_csum =
2015 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2016 &ipv6_hdr(skb)->daddr,
2017 0, IPPROTO_TCP, 0));
2019 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
2021 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2023 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2024 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2026 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2027 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2028 if (total_pkt_bd == NULL)
2029 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2031 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2033 frag->size, DMA_TO_DEVICE);
2035 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2036 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2037 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2038 le16_add_cpu(&pkt_size, frag->size);
2040 DP(NETIF_MSG_TX_QUEUED,
2041 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2042 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2043 le16_to_cpu(tx_data_bd->nbytes));
2046 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2048 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2050 /* now send a tx doorbell, counting the next BD
2051 * if the packet contains or ends with it
2053 if (TX_BD_POFF(bd_prod) < nbd)
2056 if (total_pkt_bd != NULL)
2057 total_pkt_bd->total_pkt_bytes = pkt_size;
2060 DP(NETIF_MSG_TX_QUEUED,
2061 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2062 " tcp_flags %x xsum %x seq %u hlen %u\n",
2063 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
2064 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
2065 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
2067 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2070 * Make sure that the BD data is updated before updating the producer
2071 * since FW might read the BD right after the producer is updated.
2072 * This is only applicable for weak-ordered memory model archs such
2073 * as IA-64. The following barrier is also mandatory since FW will
2074 * assumes packets must have BDs.
2078 fp->tx_db.data.prod += nbd;
2080 DOORBELL(bp, fp->index, fp->tx_db.raw);
2084 fp->tx_bd_prod += nbd;
2086 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2087 netif_tx_stop_queue(txq);
2089 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2090 * ordering of set_bit() in netif_tx_stop_queue() and read of
2094 fp->eth_q_stats.driver_xoff++;
2095 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2096 netif_tx_wake_queue(txq);
2100 return NETDEV_TX_OK;
2102 /* called with rtnl_lock */
2103 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2105 struct sockaddr *addr = p;
2106 struct bnx2x *bp = netdev_priv(dev);
2108 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2111 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2112 if (netif_running(dev)) {
2114 bnx2x_set_eth_mac_addr_e1(bp, 1);
2116 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2122 /* called with rtnl_lock */
2123 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2125 struct bnx2x *bp = netdev_priv(dev);
2128 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2129 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2133 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2134 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2137 /* This does not race with packet allocation
2138 * because the actual alloc size is
2139 * only updated as part of load
2143 if (netif_running(dev)) {
2144 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2145 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2151 void bnx2x_tx_timeout(struct net_device *dev)
2153 struct bnx2x *bp = netdev_priv(dev);
2155 #ifdef BNX2X_STOP_ON_ERROR
2159 /* This allows the netif to be shutdown gracefully before resetting */
2160 schedule_delayed_work(&bp->reset_task, 0);
2164 /* called with rtnl_lock */
2165 void bnx2x_vlan_rx_register(struct net_device *dev,
2166 struct vlan_group *vlgrp)
2168 struct bnx2x *bp = netdev_priv(dev);
2172 /* Set flags according to the required capabilities */
2173 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
2175 if (dev->features & NETIF_F_HW_VLAN_TX)
2176 bp->flags |= HW_VLAN_TX_FLAG;
2178 if (dev->features & NETIF_F_HW_VLAN_RX)
2179 bp->flags |= HW_VLAN_RX_FLAG;
2181 if (netif_running(dev))
2182 bnx2x_set_client_config(bp);
2186 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2188 struct net_device *dev = pci_get_drvdata(pdev);
2192 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2195 bp = netdev_priv(dev);
2199 pci_save_state(pdev);
2201 if (!netif_running(dev)) {
2206 netif_device_detach(dev);
2208 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2210 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2217 int bnx2x_resume(struct pci_dev *pdev)
2219 struct net_device *dev = pci_get_drvdata(pdev);
2224 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2227 bp = netdev_priv(dev);
2229 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2230 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2236 pci_restore_state(pdev);
2238 if (!netif_running(dev)) {
2243 bnx2x_set_power_state(bp, PCI_D0);
2244 netif_device_attach(dev);
2246 rc = bnx2x_nic_load(bp, LOAD_OPEN);