1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/etherdevice.h>
21 #include <net/ip6_checksum.h>
22 #include <linux/firmware.h>
23 #include "bnx2x_cmn.h"
26 #include <linux/if_vlan.h>
29 #include "bnx2x_init.h"
32 /* free skb in the packet ring at pos idx
33 * return idx of last bd freed
35 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
38 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
39 struct eth_tx_start_bd *tx_start_bd;
40 struct eth_tx_bd *tx_data_bd;
41 struct sk_buff *skb = tx_buf->skb;
42 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
45 /* prefetch skb end pointer to speedup dev_kfree_skb() */
48 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
52 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
53 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
54 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
55 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
57 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
58 #ifdef BNX2X_STOP_ON_ERROR
59 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
60 BNX2X_ERR("BAD nbd!\n");
64 new_cons = nbd + tx_buf->first_bd;
67 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
69 /* Skip a parse bd... */
71 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
73 /* ...and the TSO split header bd since they have no mapping */
74 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
76 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
82 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
83 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
84 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
85 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
87 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
99 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
101 struct bnx2x *bp = fp->bp;
102 struct netdev_queue *txq;
103 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
105 #ifdef BNX2X_STOP_ON_ERROR
106 if (unlikely(bp->panic))
110 txq = netdev_get_tx_queue(bp->dev, fp->index);
111 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
112 sw_cons = fp->tx_pkt_cons;
114 while (sw_cons != hw_cons) {
117 pkt_cons = TX_BD(sw_cons);
119 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
121 fp->index, hw_cons, sw_cons, pkt_cons);
123 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
127 fp->tx_pkt_cons = sw_cons;
128 fp->tx_bd_cons = bd_cons;
130 /* Need to make the tx_bd_cons update visible to start_xmit()
131 * before checking for netif_tx_queue_stopped(). Without the
132 * memory barrier, there is a small possibility that
133 * start_xmit() will miss it and cause the queue to be stopped
138 if (unlikely(netif_tx_queue_stopped(txq))) {
139 /* Taking tx_lock() is needed to prevent reenabling the queue
140 * while it's empty. This could have happen if rx_action() gets
141 * suspended in bnx2x_tx_int() after the condition before
142 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
144 * stops the queue->sees fresh tx_bd_cons->releases the queue->
145 * sends some packets consuming the whole queue again->
149 __netif_tx_lock(txq, smp_processor_id());
151 if ((netif_tx_queue_stopped(txq)) &&
152 (bp->state == BNX2X_STATE_OPEN) &&
153 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
154 netif_tx_wake_queue(txq);
156 __netif_tx_unlock(txq);
161 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
164 u16 last_max = fp->last_max_sge;
166 if (SUB_S16(idx, last_max) > 0)
167 fp->last_max_sge = idx;
170 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
171 struct eth_fast_path_rx_cqe *fp_cqe)
173 struct bnx2x *bp = fp->bp;
174 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
175 le16_to_cpu(fp_cqe->len_on_bd)) >>
177 u16 last_max, last_elem, first_elem;
184 /* First mark all used pages */
185 for (i = 0; i < sge_len; i++)
186 SGE_MASK_CLEAR_BIT(fp,
187 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
189 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
190 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
192 /* Here we assume that the last SGE index is the biggest */
193 prefetch((void *)(fp->sge_mask));
194 bnx2x_update_last_max_sge(fp,
195 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
197 last_max = RX_SGE(fp->last_max_sge);
198 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
199 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
201 /* If ring is not full */
202 if (last_elem + 1 != first_elem)
205 /* Now update the prod */
206 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
207 if (likely(fp->sge_mask[i]))
210 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
211 delta += RX_SGE_MASK_ELEM_SZ;
215 fp->rx_sge_prod += delta;
216 /* clear page-end entries */
217 bnx2x_clear_sge_mask_next_elems(fp);
220 DP(NETIF_MSG_RX_STATUS,
221 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
222 fp->last_max_sge, fp->rx_sge_prod);
225 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
226 struct sk_buff *skb, u16 cons, u16 prod)
228 struct bnx2x *bp = fp->bp;
229 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
230 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
231 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
234 /* move empty skb from pool to prod and map it */
235 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
236 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
237 bp->rx_buf_size, DMA_FROM_DEVICE);
238 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
240 /* move partial skb from cons to pool (don't unmap yet) */
241 fp->tpa_pool[queue] = *cons_rx_buf;
243 /* mark bin state as start - print error if current state != stop */
244 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
245 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
247 fp->tpa_state[queue] = BNX2X_TPA_START;
249 /* point prod_bd to new skb */
250 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
251 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
253 #ifdef BNX2X_STOP_ON_ERROR
254 fp->tpa_queue_used |= (1 << queue);
255 #ifdef _ASM_GENERIC_INT_L64_H
256 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
264 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
266 struct eth_fast_path_rx_cqe *fp_cqe,
269 struct sw_rx_page *rx_pg, old_rx_pg;
270 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
271 u32 i, frag_len, frag_size, pages;
275 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
276 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
278 /* This is needed in order to enable forwarding support */
280 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
281 max(frag_size, (u32)len_on_bd));
283 #ifdef BNX2X_STOP_ON_ERROR
284 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
285 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
287 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
288 fp_cqe->pkt_len, len_on_bd);
294 /* Run through the SGL and compose the fragmented skb */
295 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
297 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
299 /* FW gives the indices of the SGE as if the ring is an array
300 (meaning that "next" element will consume 2 indices) */
301 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
302 rx_pg = &fp->rx_page_ring[sge_idx];
305 /* If we fail to allocate a substitute page, we simply stop
306 where we are and drop the whole packet */
307 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
309 fp->eth_q_stats.rx_skb_alloc_failed++;
313 /* Unmap the page as we r going to pass it to the stack */
314 dma_unmap_page(&bp->pdev->dev,
315 dma_unmap_addr(&old_rx_pg, mapping),
316 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
318 /* Add one frag and update the appropriate fields in the skb */
319 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
321 skb->data_len += frag_len;
322 skb->truesize += frag_len;
323 skb->len += frag_len;
325 frag_size -= frag_len;
331 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
332 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
335 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
336 struct sk_buff *skb = rx_buf->skb;
338 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
340 /* Unmap skb in the pool anyway, as we are going to change
341 pool entry status to BNX2X_TPA_STOP even if new skb allocation
343 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
344 bp->rx_buf_size, DMA_FROM_DEVICE);
346 if (likely(new_skb)) {
347 /* fix ip xsum and give it to the stack */
348 /* (no need to map the new skb) */
351 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
353 int is_not_hwaccel_vlan_cqe =
354 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
358 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
360 #ifdef BNX2X_STOP_ON_ERROR
361 if (pad + len > bp->rx_buf_size) {
362 BNX2X_ERR("skb_put is about to fail... "
363 "pad %d len %d rx_buf_size %d\n",
364 pad, len, bp->rx_buf_size);
370 skb_reserve(skb, pad);
373 skb->protocol = eth_type_trans(skb, bp->dev);
374 skb->ip_summed = CHECKSUM_UNNECESSARY;
379 iph = (struct iphdr *)skb->data;
381 /* If there is no Rx VLAN offloading -
382 take VLAN tag into an account */
383 if (unlikely(is_not_hwaccel_vlan_cqe))
384 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
387 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
390 if (!bnx2x_fill_frag_skb(bp, fp, skb,
391 &cqe->fast_path_cqe, cqe_idx)) {
393 if ((bp->vlgrp != NULL) &&
394 (le16_to_cpu(cqe->fast_path_cqe.
395 pars_flags.flags) & PARSING_FLAGS_VLAN))
396 vlan_gro_receive(&fp->napi, bp->vlgrp,
397 le16_to_cpu(cqe->fast_path_cqe.
401 napi_gro_receive(&fp->napi, skb);
403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
404 " - dropping packet!\n");
409 /* put new skb in bin */
410 fp->tpa_pool[queue].skb = new_skb;
413 /* else drop the packet and keep the buffer in the bin */
414 DP(NETIF_MSG_RX_STATUS,
415 "Failed to allocate new skb - dropping packet!\n");
416 fp->eth_q_stats.rx_skb_alloc_failed++;
419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
422 /* Set Toeplitz hash value in the skb using the value from the
423 * CQE (calculated by HW).
425 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
428 /* Set Toeplitz hash from CQE */
429 if ((bp->dev->features & NETIF_F_RXHASH) &&
430 (cqe->fast_path_cqe.status_flags &
431 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
433 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
436 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
438 struct bnx2x *bp = fp->bp;
439 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
440 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
443 #ifdef BNX2X_STOP_ON_ERROR
444 if (unlikely(bp->panic))
448 /* CQ "next element" is of the size of the regular element,
449 that's why it's ok here */
450 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
451 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
454 bd_cons = fp->rx_bd_cons;
455 bd_prod = fp->rx_bd_prod;
456 bd_prod_fw = bd_prod;
457 sw_comp_cons = fp->rx_comp_cons;
458 sw_comp_prod = fp->rx_comp_prod;
460 /* Memory barrier necessary as speculative reads of the rx
461 * buffer can be ahead of the index in the status block
465 DP(NETIF_MSG_RX_STATUS,
466 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
467 fp->index, hw_comp_cons, sw_comp_cons);
469 while (sw_comp_cons != hw_comp_cons) {
470 struct sw_rx_bd *rx_buf = NULL;
472 union eth_rx_cqe *cqe;
476 comp_ring_cons = RCQ_BD(sw_comp_cons);
477 bd_prod = RX_BD(bd_prod);
478 bd_cons = RX_BD(bd_cons);
480 /* Prefetch the page containing the BD descriptor
481 at producer's index. It will be needed when new skb is
483 prefetch((void *)(PAGE_ALIGN((unsigned long)
484 (&fp->rx_desc_ring[bd_prod])) -
487 cqe = &fp->rx_comp_ring[comp_ring_cons];
488 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
490 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
491 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
492 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
493 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
494 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
495 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
497 /* is this a slowpath msg? */
498 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
499 bnx2x_sp_event(fp, cqe);
502 /* this is an rx packet */
504 rx_buf = &fp->rx_buf_ring[bd_cons];
507 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
508 pad = cqe->fast_path_cqe.placement_offset;
510 /* If CQE is marked both TPA_START and TPA_END
511 it is a non-TPA CQE */
512 if ((!fp->disable_tpa) &&
513 (TPA_TYPE(cqe_fp_flags) !=
514 (TPA_TYPE_START | TPA_TYPE_END))) {
515 u16 queue = cqe->fast_path_cqe.queue_index;
517 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
518 DP(NETIF_MSG_RX_STATUS,
519 "calling tpa_start on queue %d\n",
522 bnx2x_tpa_start(fp, queue, skb,
525 /* Set Toeplitz hash for an LRO skb */
526 bnx2x_set_skb_rxhash(bp, cqe, skb);
531 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
532 DP(NETIF_MSG_RX_STATUS,
533 "calling tpa_stop on queue %d\n",
536 if (!BNX2X_RX_SUM_FIX(cqe))
537 BNX2X_ERR("STOP on none TCP "
540 /* This is a size of the linear data
542 len = le16_to_cpu(cqe->fast_path_cqe.
544 bnx2x_tpa_stop(bp, fp, queue, pad,
545 len, cqe, comp_ring_cons);
546 #ifdef BNX2X_STOP_ON_ERROR
551 bnx2x_update_sge_prod(fp,
552 &cqe->fast_path_cqe);
557 dma_sync_single_for_device(&bp->pdev->dev,
558 dma_unmap_addr(rx_buf, mapping),
559 pad + RX_COPY_THRESH,
561 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
563 /* is this an error packet? */
564 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
566 "ERROR flags %x rx packet %u\n",
567 cqe_fp_flags, sw_comp_cons);
568 fp->eth_q_stats.rx_err_discard_pkt++;
572 /* Since we don't have a jumbo ring
573 * copy small packets if mtu > 1500
575 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
576 (len <= RX_COPY_THRESH)) {
577 struct sk_buff *new_skb;
579 new_skb = netdev_alloc_skb(bp->dev,
581 if (new_skb == NULL) {
583 "ERROR packet dropped "
584 "because of alloc failure\n");
585 fp->eth_q_stats.rx_skb_alloc_failed++;
590 skb_copy_from_linear_data_offset(skb, pad,
591 new_skb->data + pad, len);
592 skb_reserve(new_skb, pad);
593 skb_put(new_skb, len);
595 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
600 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
601 dma_unmap_single(&bp->pdev->dev,
602 dma_unmap_addr(rx_buf, mapping),
605 skb_reserve(skb, pad);
610 "ERROR packet dropped because "
611 "of alloc failure\n");
612 fp->eth_q_stats.rx_skb_alloc_failed++;
614 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
618 skb->protocol = eth_type_trans(skb, bp->dev);
620 /* Set Toeplitz hash for a none-LRO skb */
621 bnx2x_set_skb_rxhash(bp, cqe, skb);
623 skb_checksum_none_assert(skb);
626 if (likely(BNX2X_RX_CSUM_OK(cqe)))
627 skb->ip_summed = CHECKSUM_UNNECESSARY;
629 fp->eth_q_stats.hw_csum_err++;
633 skb_record_rx_queue(skb, fp->index);
636 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
637 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
639 vlan_gro_receive(&fp->napi, bp->vlgrp,
640 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
643 napi_gro_receive(&fp->napi, skb);
649 bd_cons = NEXT_RX_IDX(bd_cons);
650 bd_prod = NEXT_RX_IDX(bd_prod);
651 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
654 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
655 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
657 if (rx_pkt == budget)
661 fp->rx_bd_cons = bd_cons;
662 fp->rx_bd_prod = bd_prod_fw;
663 fp->rx_comp_cons = sw_comp_cons;
664 fp->rx_comp_prod = sw_comp_prod;
666 /* Update producers */
667 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
670 fp->rx_pkt += rx_pkt;
676 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
678 struct bnx2x_fastpath *fp = fp_cookie;
679 struct bnx2x *bp = fp->bp;
681 /* Return here if interrupt is disabled */
682 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
683 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
687 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
688 "[fp %d fw_sd %d igusb %d]\n",
689 fp->index, fp->fw_sb_id, fp->igu_sb_id);
690 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
692 #ifdef BNX2X_STOP_ON_ERROR
693 if (unlikely(bp->panic))
697 /* Handle Rx and Tx according to MSI-X vector */
698 prefetch(fp->rx_cons_sb);
699 prefetch(fp->tx_cons_sb);
700 prefetch(&fp->sb_running_index[SM_RX_ID]);
701 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
706 /* HW Lock for shared dual port PHYs */
707 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
709 mutex_lock(&bp->port.phy_mutex);
711 if (bp->port.need_hw_lock)
712 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
715 void bnx2x_release_phy_lock(struct bnx2x *bp)
717 if (bp->port.need_hw_lock)
718 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
720 mutex_unlock(&bp->port.phy_mutex);
723 void bnx2x_link_report(struct bnx2x *bp)
725 if (bp->flags & MF_FUNC_DIS) {
726 netif_carrier_off(bp->dev);
727 netdev_err(bp->dev, "NIC Link is Down\n");
731 if (bp->link_vars.link_up) {
734 if (bp->state == BNX2X_STATE_OPEN)
735 netif_carrier_on(bp->dev);
736 netdev_info(bp->dev, "NIC Link is Up, ");
738 line_speed = bp->link_vars.line_speed;
743 ((bp->mf_config[BP_VN(bp)] &
744 FUNC_MF_CFG_MAX_BW_MASK) >>
745 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
746 if (vn_max_rate < line_speed)
747 line_speed = vn_max_rate;
749 pr_cont("%d Mbps ", line_speed);
751 if (bp->link_vars.duplex == DUPLEX_FULL)
752 pr_cont("full duplex");
754 pr_cont("half duplex");
756 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
757 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
758 pr_cont(", receive ");
759 if (bp->link_vars.flow_ctrl &
761 pr_cont("& transmit ");
763 pr_cont(", transmit ");
765 pr_cont("flow control ON");
769 } else { /* link_down */
770 netif_carrier_off(bp->dev);
771 netdev_err(bp->dev, "NIC Link is Down\n");
775 /* Returns the number of actually allocated BDs */
776 static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
779 struct bnx2x *bp = fp->bp;
780 u16 ring_prod, cqe_ring_prod;
783 fp->rx_comp_cons = 0;
784 cqe_ring_prod = ring_prod = 0;
785 for (i = 0; i < rx_ring_size; i++) {
786 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
787 BNX2X_ERR("was only able to allocate "
788 "%d rx skbs on queue[%d]\n", i, fp->index);
789 fp->eth_q_stats.rx_skb_alloc_failed++;
792 ring_prod = NEXT_RX_IDX(ring_prod);
793 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
794 WARN_ON(ring_prod <= i);
797 fp->rx_bd_prod = ring_prod;
798 /* Limit the CQE producer by the CQE ring size */
799 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
801 fp->rx_pkt = fp->rx_calls = 0;
806 static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
808 struct bnx2x *bp = fp->bp;
809 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
810 MAX_RX_AVAIL/bp->num_queues;
812 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
814 bnx2x_alloc_rx_bds(fp, rx_ring_size);
817 * this will generate an interrupt (to the TSTORM)
818 * must only be done after chip is initialized
820 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
824 void bnx2x_init_rx_rings(struct bnx2x *bp)
826 int func = BP_FUNC(bp);
827 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
828 ETH_MAX_AGGREGATION_QUEUES_E1H;
832 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
833 BNX2X_FW_IP_HDR_ALIGN_PAD;
836 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
838 for_each_queue(bp, j) {
839 struct bnx2x_fastpath *fp = &bp->fp[j];
841 if (!fp->disable_tpa) {
842 for (i = 0; i < max_agg_queues; i++) {
843 fp->tpa_pool[i].skb =
844 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
845 if (!fp->tpa_pool[i].skb) {
846 BNX2X_ERR("Failed to allocate TPA "
847 "skb pool for queue[%d] - "
848 "disabling TPA on this "
850 bnx2x_free_tpa_pool(bp, fp, i);
854 dma_unmap_addr_set((struct sw_rx_bd *)
855 &bp->fp->tpa_pool[i],
857 fp->tpa_state[i] = BNX2X_TPA_STOP;
860 /* "next page" elements initialization */
861 bnx2x_set_next_page_sgl(fp);
863 /* set SGEs bit mask */
864 bnx2x_init_sge_ring_bit_mask(fp);
866 /* Allocate SGEs and initialize the ring elements */
867 for (i = 0, ring_prod = 0;
868 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
870 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
871 BNX2X_ERR("was only able to allocate "
873 BNX2X_ERR("disabling TPA for"
875 /* Cleanup already allocated elements */
876 bnx2x_free_rx_sge_range(bp,
878 bnx2x_free_tpa_pool(bp,
884 ring_prod = NEXT_SGE_IDX(ring_prod);
887 fp->rx_sge_prod = ring_prod;
891 for_each_queue(bp, j) {
892 struct bnx2x_fastpath *fp = &bp->fp[j];
896 bnx2x_set_next_page_rx_bd(fp);
899 bnx2x_set_next_page_rx_cq(fp);
901 /* Allocate BDs and initialize BD ring */
902 bnx2x_alloc_rx_bd_ring(fp);
907 if (!CHIP_IS_E2(bp)) {
908 REG_WR(bp, BAR_USTRORM_INTMEM +
909 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
910 U64_LO(fp->rx_comp_mapping));
911 REG_WR(bp, BAR_USTRORM_INTMEM +
912 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
913 U64_HI(fp->rx_comp_mapping));
918 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
922 for_each_queue(bp, i) {
923 struct bnx2x_fastpath *fp = &bp->fp[i];
925 u16 bd_cons = fp->tx_bd_cons;
926 u16 sw_prod = fp->tx_pkt_prod;
927 u16 sw_cons = fp->tx_pkt_cons;
929 while (sw_cons != sw_prod) {
930 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
936 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
940 for_each_queue(bp, j) {
941 struct bnx2x_fastpath *fp = &bp->fp[j];
943 for (i = 0; i < NUM_RX_BD; i++) {
944 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
945 struct sk_buff *skb = rx_buf->skb;
950 dma_unmap_single(&bp->pdev->dev,
951 dma_unmap_addr(rx_buf, mapping),
952 bp->rx_buf_size, DMA_FROM_DEVICE);
957 if (!fp->disable_tpa)
958 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
959 ETH_MAX_AGGREGATION_QUEUES_E1 :
960 ETH_MAX_AGGREGATION_QUEUES_E1H);
964 void bnx2x_free_skbs(struct bnx2x *bp)
966 bnx2x_free_tx_skbs(bp);
967 bnx2x_free_rx_skbs(bp);
970 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
974 free_irq(bp->msix_table[0].vector, bp->dev);
975 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
976 bp->msix_table[0].vector);
981 for_each_queue(bp, i) {
982 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
983 "state %x\n", i, bp->msix_table[i + offset].vector,
984 bnx2x_fp(bp, i, state));
986 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
990 void bnx2x_free_irq(struct bnx2x *bp)
992 if (bp->flags & USING_MSIX_FLAG)
993 bnx2x_free_msix_irqs(bp);
994 else if (bp->flags & USING_MSI_FLAG)
995 free_irq(bp->pdev->irq, bp->dev);
997 free_irq(bp->pdev->irq, bp->dev);
1000 int bnx2x_enable_msix(struct bnx2x *bp)
1002 int msix_vec = 0, i, rc, req_cnt;
1004 bp->msix_table[msix_vec].entry = msix_vec;
1005 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1006 bp->msix_table[0].entry);
1010 bp->msix_table[msix_vec].entry = msix_vec;
1011 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1012 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1015 for_each_queue(bp, i) {
1016 bp->msix_table[msix_vec].entry = msix_vec;
1017 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1018 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1022 req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1024 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1027 * reconfigure number of tx/rx queues according to available
1030 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1031 /* how less vectors we will have? */
1032 int diff = req_cnt - rc;
1035 "Trying to use less MSI-X vectors: %d\n", rc);
1037 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1041 "MSI-X is not attainable rc %d\n", rc);
1045 * decrease number of queues by number of unallocated entries
1047 bp->num_queues -= diff;
1049 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1052 /* fall to INTx if not enough memory */
1054 bp->flags |= DISABLE_MSI_FLAG;
1055 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1059 bp->flags |= USING_MSIX_FLAG;
1064 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1066 int i, rc, offset = 1;
1068 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1069 bp->dev->name, bp->dev);
1071 BNX2X_ERR("request sp irq failed\n");
1078 for_each_queue(bp, i) {
1079 struct bnx2x_fastpath *fp = &bp->fp[i];
1080 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1083 rc = request_irq(bp->msix_table[offset].vector,
1084 bnx2x_msix_fp_int, 0, fp->name, fp);
1086 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1087 bnx2x_free_msix_irqs(bp);
1092 fp->state = BNX2X_FP_STATE_IRQ;
1095 i = BNX2X_NUM_QUEUES(bp);
1096 offset = 1 + CNIC_CONTEXT_USE;
1097 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1099 bp->msix_table[0].vector,
1100 0, bp->msix_table[offset].vector,
1101 i - 1, bp->msix_table[offset + i - 1].vector);
1106 int bnx2x_enable_msi(struct bnx2x *bp)
1110 rc = pci_enable_msi(bp->pdev);
1112 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1115 bp->flags |= USING_MSI_FLAG;
1120 static int bnx2x_req_irq(struct bnx2x *bp)
1122 unsigned long flags;
1125 if (bp->flags & USING_MSI_FLAG)
1128 flags = IRQF_SHARED;
1130 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1131 bp->dev->name, bp->dev);
1133 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1138 static void bnx2x_napi_enable(struct bnx2x *bp)
1142 for_each_queue(bp, i)
1143 napi_enable(&bnx2x_fp(bp, i, napi));
1146 static void bnx2x_napi_disable(struct bnx2x *bp)
1150 for_each_queue(bp, i)
1151 napi_disable(&bnx2x_fp(bp, i, napi));
1154 void bnx2x_netif_start(struct bnx2x *bp)
1158 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1159 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1162 if (netif_running(bp->dev)) {
1163 bnx2x_napi_enable(bp);
1164 bnx2x_int_enable(bp);
1165 if (bp->state == BNX2X_STATE_OPEN)
1166 netif_tx_wake_all_queues(bp->dev);
1171 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1173 bnx2x_int_disable_sync(bp, disable_hw);
1174 bnx2x_napi_disable(bp);
1175 netif_tx_disable(bp->dev);
1178 void bnx2x_set_num_queues(struct bnx2x *bp)
1180 switch (bp->multi_mode) {
1181 case ETH_RSS_MODE_DISABLED:
1184 case ETH_RSS_MODE_REGULAR:
1185 bp->num_queues = bnx2x_calc_num_queues(bp);
1194 static void bnx2x_release_firmware(struct bnx2x *bp)
1196 kfree(bp->init_ops_offsets);
1197 kfree(bp->init_ops);
1198 kfree(bp->init_data);
1199 release_firmware(bp->firmware);
1202 /* must be called with rtnl_lock */
1203 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1208 /* Set init arrays */
1209 rc = bnx2x_init_firmware(bp);
1211 BNX2X_ERR("Error loading firmware\n");
1215 #ifdef BNX2X_STOP_ON_ERROR
1216 if (unlikely(bp->panic))
1220 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1222 /* must be called before memory allocation and HW init */
1223 bnx2x_ilt_set_info(bp);
1225 if (bnx2x_alloc_mem(bp))
1228 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1229 rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1231 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1235 for_each_queue(bp, i)
1236 bnx2x_fp(bp, i, disable_tpa) =
1237 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1239 bnx2x_napi_enable(bp);
1241 /* Send LOAD_REQUEST command to MCP
1242 Returns the type of LOAD command:
1243 if it is the first port to be initialized
1244 common blocks should be initialized, otherwise - not
1246 if (!BP_NOMCP(bp)) {
1247 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1249 BNX2X_ERR("MCP response failure, aborting\n");
1253 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1254 rc = -EBUSY; /* other port in diagnostic mode */
1259 int path = BP_PATH(bp);
1260 int port = BP_PORT(bp);
1262 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1263 path, load_count[path][0], load_count[path][1],
1264 load_count[path][2]);
1265 load_count[path][0]++;
1266 load_count[path][1 + port]++;
1267 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1268 path, load_count[path][0], load_count[path][1],
1269 load_count[path][2]);
1270 if (load_count[path][0] == 1)
1271 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1272 else if (load_count[path][1 + port] == 1)
1273 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1275 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1278 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1279 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1280 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1284 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1287 rc = bnx2x_init_hw(bp, load_code);
1289 BNX2X_ERR("HW init failed, aborting\n");
1290 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1291 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1292 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1296 /* Connect to IRQs */
1297 rc = bnx2x_setup_irqs(bp);
1299 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1303 /* Setup NIC internals and enable interrupts */
1304 bnx2x_nic_init(bp, load_code);
1306 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1307 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1308 (bp->common.shmem2_base))
1309 SHMEM2_WR(bp, dcc_support,
1310 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1311 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1313 /* Send LOAD_DONE command to MCP */
1314 if (!BP_NOMCP(bp)) {
1315 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1317 BNX2X_ERR("MCP response failure, aborting\n");
1323 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1325 rc = bnx2x_func_start(bp);
1327 BNX2X_ERR("Function start failed!\n");
1328 #ifndef BNX2X_STOP_ON_ERROR
1336 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1338 BNX2X_ERR("Setup leading failed!\n");
1339 #ifndef BNX2X_STOP_ON_ERROR
1347 if (!CHIP_IS_E1(bp) &&
1348 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1349 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1350 bp->flags |= MF_FUNC_DIS;
1354 /* Enable Timer scan */
1355 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1358 for_each_nondefault_queue(bp, i) {
1359 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1368 /* Now when Clients are configured we are ready to work */
1369 bp->state = BNX2X_STATE_OPEN;
1371 bnx2x_set_eth_mac(bp, 1);
1374 bnx2x_initial_phy_init(bp, load_mode);
1376 /* Start fast path */
1377 switch (load_mode) {
1379 /* Tx queue should be only reenabled */
1380 netif_tx_wake_all_queues(bp->dev);
1381 /* Initialize the receive filter. */
1382 bnx2x_set_rx_mode(bp->dev);
1386 netif_tx_start_all_queues(bp->dev);
1387 smp_mb__after_clear_bit();
1388 /* Initialize the receive filter. */
1389 bnx2x_set_rx_mode(bp->dev);
1393 /* Initialize the receive filter. */
1394 bnx2x_set_rx_mode(bp->dev);
1395 bp->state = BNX2X_STATE_DIAG;
1403 bnx2x__link_status_update(bp);
1405 /* start the timer */
1406 mod_timer(&bp->timer, jiffies + bp->current_interval);
1409 bnx2x_setup_cnic_irq_info(bp);
1410 if (bp->state == BNX2X_STATE_OPEN)
1411 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1413 bnx2x_inc_load_cnt(bp);
1415 bnx2x_release_firmware(bp);
1421 /* Disable Timer scan */
1422 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1425 bnx2x_int_disable_sync(bp, 1);
1427 /* Free SKBs, SGEs, TPA pool and driver internals */
1428 bnx2x_free_skbs(bp);
1429 for_each_queue(bp, i)
1430 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1435 if (!BP_NOMCP(bp)) {
1436 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1437 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1442 bnx2x_napi_disable(bp);
1446 bnx2x_release_firmware(bp);
1451 /* must be called with rtnl_lock */
1452 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1456 if (bp->state == BNX2X_STATE_CLOSED) {
1457 /* Interface has been removed - nothing to recover */
1458 bp->recovery_state = BNX2X_RECOVERY_DONE;
1460 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1467 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1469 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1471 /* Set "drop all" */
1472 bp->rx_mode = BNX2X_RX_MODE_NONE;
1473 bnx2x_set_storm_rx_mode(bp);
1476 bnx2x_tx_disable(bp);
1478 del_timer_sync(&bp->timer);
1480 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1481 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1483 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1485 /* Cleanup the chip if needed */
1486 if (unload_mode != UNLOAD_RECOVERY)
1487 bnx2x_chip_cleanup(bp, unload_mode);
1489 /* Disable HW interrupts, NAPI and Tx */
1490 bnx2x_netif_stop(bp, 1);
1498 /* Free SKBs, SGEs, TPA pool and driver internals */
1499 bnx2x_free_skbs(bp);
1500 for_each_queue(bp, i)
1501 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1505 bp->state = BNX2X_STATE_CLOSED;
1507 /* The last driver must disable a "close the gate" if there is no
1508 * parity attention or "process kill" pending.
1510 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1511 bnx2x_reset_is_done(bp))
1512 bnx2x_disable_close_the_gate(bp);
1514 /* Reset MCP mail box sequence if there is on going recovery */
1515 if (unload_mode == UNLOAD_RECOVERY)
1521 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1525 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1529 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1530 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1531 PCI_PM_CTRL_PME_STATUS));
1533 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1534 /* delay required during transition out of D3hot */
1539 /* If there are other clients above don't
1540 shut down the power */
1541 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1543 /* Don't shut down the power for emulation and FPGA */
1544 if (CHIP_REV_IS_SLOW(bp))
1547 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1551 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1553 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1556 /* No more memory access after this point until
1557 * device is brought back to D0.
1568 * net_device service functions
1570 int bnx2x_poll(struct napi_struct *napi, int budget)
1573 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1575 struct bnx2x *bp = fp->bp;
1578 #ifdef BNX2X_STOP_ON_ERROR
1579 if (unlikely(bp->panic)) {
1580 napi_complete(napi);
1585 if (bnx2x_has_tx_work(fp))
1588 if (bnx2x_has_rx_work(fp)) {
1589 work_done += bnx2x_rx_int(fp, budget - work_done);
1591 /* must not complete if we consumed full budget */
1592 if (work_done >= budget)
1596 /* Fall out from the NAPI loop if needed */
1597 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1598 bnx2x_update_fpsb_idx(fp);
1599 /* bnx2x_has_rx_work() reads the status block,
1600 * thus we need to ensure that status block indices
1601 * have been actually read (bnx2x_update_fpsb_idx)
1602 * prior to this check (bnx2x_has_rx_work) so that
1603 * we won't write the "newer" value of the status block
1604 * to IGU (if there was a DMA right after
1605 * bnx2x_has_rx_work and if there is no rmb, the memory
1606 * reading (bnx2x_update_fpsb_idx) may be postponed
1607 * to right before bnx2x_ack_sb). In this case there
1608 * will never be another interrupt until there is
1609 * another update of the status block, while there
1610 * is still unhandled work.
1614 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1615 napi_complete(napi);
1616 /* Re-enable interrupts */
1618 "Update index to %d\n", fp->fp_hc_idx);
1619 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1620 le16_to_cpu(fp->fp_hc_idx),
1630 /* we split the first BD into headers and data BDs
1631 * to ease the pain of our fellow microcode engineers
1632 * we use one mapping for both BDs
1633 * So far this has only been observed to happen
1634 * in Other Operating Systems(TM)
1636 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1637 struct bnx2x_fastpath *fp,
1638 struct sw_tx_bd *tx_buf,
1639 struct eth_tx_start_bd **tx_bd, u16 hlen,
1640 u16 bd_prod, int nbd)
1642 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1643 struct eth_tx_bd *d_tx_bd;
1645 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1647 /* first fix first BD */
1648 h_tx_bd->nbd = cpu_to_le16(nbd);
1649 h_tx_bd->nbytes = cpu_to_le16(hlen);
1651 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1652 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1653 h_tx_bd->addr_lo, h_tx_bd->nbd);
1655 /* now get a new data BD
1656 * (after the pbd) and fill it */
1657 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1658 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1660 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1661 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1663 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1664 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1665 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1667 /* this marks the BD as one that has no individual mapping */
1668 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1670 DP(NETIF_MSG_TX_QUEUED,
1671 "TSO split data size is %d (%x:%x)\n",
1672 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1675 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1680 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1683 csum = (u16) ~csum_fold(csum_sub(csum,
1684 csum_partial(t_header - fix, fix, 0)));
1687 csum = (u16) ~csum_fold(csum_add(csum,
1688 csum_partial(t_header, -fix, 0)));
1690 return swab16(csum);
1693 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1697 if (skb->ip_summed != CHECKSUM_PARTIAL)
1701 if (skb->protocol == htons(ETH_P_IPV6)) {
1703 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1704 rc |= XMIT_CSUM_TCP;
1708 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1709 rc |= XMIT_CSUM_TCP;
1713 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1714 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1716 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1717 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1722 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1723 /* check if packet requires linearization (packet is too fragmented)
1724 no need to check fragmentation if page size > 8K (there will be no
1725 violation to FW restrictions) */
1726 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1731 int first_bd_sz = 0;
1733 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1734 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1736 if (xmit_type & XMIT_GSO) {
1737 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1738 /* Check if LSO packet needs to be copied:
1739 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1740 int wnd_size = MAX_FETCH_BD - 3;
1741 /* Number of windows to check */
1742 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1747 /* Headers length */
1748 hlen = (int)(skb_transport_header(skb) - skb->data) +
1751 /* Amount of data (w/o headers) on linear part of SKB*/
1752 first_bd_sz = skb_headlen(skb) - hlen;
1754 wnd_sum = first_bd_sz;
1756 /* Calculate the first sum - it's special */
1757 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1759 skb_shinfo(skb)->frags[frag_idx].size;
1761 /* If there was data on linear skb data - check it */
1762 if (first_bd_sz > 0) {
1763 if (unlikely(wnd_sum < lso_mss)) {
1768 wnd_sum -= first_bd_sz;
1771 /* Others are easier: run through the frag list and
1772 check all windows */
1773 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1775 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1777 if (unlikely(wnd_sum < lso_mss)) {
1782 skb_shinfo(skb)->frags[wnd_idx].size;
1785 /* in non-LSO too fragmented packet should always
1792 if (unlikely(to_copy))
1793 DP(NETIF_MSG_TX_QUEUED,
1794 "Linearization IS REQUIRED for %s packet. "
1795 "num_frags %d hlen %d first_bd_sz %d\n",
1796 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1797 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1803 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1804 struct eth_tx_parse_bd_e2 *pbd,
1807 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1808 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1809 if ((xmit_type & XMIT_GSO_V6) &&
1810 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1811 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1815 * Update PBD in GSO case.
1818 * @param tx_start_bd
1822 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1823 struct eth_tx_parse_bd_e1x *pbd,
1826 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1827 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1828 pbd->tcp_flags = pbd_tcp_flags(skb);
1830 if (xmit_type & XMIT_GSO_V4) {
1831 pbd->ip_id = swab16(ip_hdr(skb)->id);
1832 pbd->tcp_pseudo_csum =
1833 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1835 0, IPPROTO_TCP, 0));
1838 pbd->tcp_pseudo_csum =
1839 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1840 &ipv6_hdr(skb)->daddr,
1841 0, IPPROTO_TCP, 0));
1843 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1849 * @param tx_start_bd
1853 * @return header len
1855 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1856 struct eth_tx_parse_bd_e2 *pbd,
1859 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1860 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1862 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1864 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1866 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1872 * @param tx_start_bd
1876 * @return Header length
1878 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1879 struct eth_tx_parse_bd_e1x *pbd,
1882 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1884 /* for now NS flag is not used in Linux */
1886 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1887 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1889 pbd->ip_hlen_w = (skb_transport_header(skb) -
1890 skb_network_header(skb)) / 2;
1892 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1894 pbd->total_hlen_w = cpu_to_le16(hlen);
1897 if (xmit_type & XMIT_CSUM_TCP) {
1898 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1901 s8 fix = SKB_CS_OFF(skb); /* signed! */
1903 DP(NETIF_MSG_TX_QUEUED,
1904 "hlen %d fix %d csum before fix %x\n",
1905 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1907 /* HW bug: fixup the CSUM */
1908 pbd->tcp_pseudo_csum =
1909 bnx2x_csum_fix(skb_transport_header(skb),
1912 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1913 pbd->tcp_pseudo_csum);
1919 /* called with netif_tx_lock
1920 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1921 * netif_wake_queue()
1923 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1925 struct bnx2x *bp = netdev_priv(dev);
1926 struct bnx2x_fastpath *fp;
1927 struct netdev_queue *txq;
1928 struct sw_tx_bd *tx_buf;
1929 struct eth_tx_start_bd *tx_start_bd;
1930 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1931 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1932 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
1933 u16 pkt_prod, bd_prod;
1936 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1939 __le16 pkt_size = 0;
1941 u8 mac_type = UNICAST_ADDRESS;
1943 #ifdef BNX2X_STOP_ON_ERROR
1944 if (unlikely(bp->panic))
1945 return NETDEV_TX_BUSY;
1948 fp_index = skb_get_queue_mapping(skb);
1949 txq = netdev_get_tx_queue(dev, fp_index);
1951 fp = &bp->fp[fp_index];
1953 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1954 fp->eth_q_stats.driver_xoff++;
1955 netif_tx_stop_queue(txq);
1956 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1957 return NETDEV_TX_BUSY;
1960 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
1961 "protocol(%x,%x) gso type %x xmit_type %x\n",
1962 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1963 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1965 eth = (struct ethhdr *)skb->data;
1967 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1968 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1969 if (is_broadcast_ether_addr(eth->h_dest))
1970 mac_type = BROADCAST_ADDRESS;
1972 mac_type = MULTICAST_ADDRESS;
1975 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1976 /* First, check if we need to linearize the skb (due to FW
1977 restrictions). No need to check fragmentation if page size > 8K
1978 (there will be no violation to FW restrictions) */
1979 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1980 /* Statistics of linearization */
1982 if (skb_linearize(skb) != 0) {
1983 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1984 "silently dropping this SKB\n");
1985 dev_kfree_skb_any(skb);
1986 return NETDEV_TX_OK;
1992 Please read carefully. First we use one BD which we mark as start,
1993 then we have a parsing info BD (used for TSO or xsum),
1994 and only then we have the rest of the TSO BDs.
1995 (don't forget to mark the last one as last,
1996 and to unmap only AFTER you write to the BD ...)
1997 And above all, all pdb sizes are in words - NOT DWORDS!
2000 pkt_prod = fp->tx_pkt_prod++;
2001 bd_prod = TX_BD(fp->tx_bd_prod);
2003 /* get a tx_buf and first BD */
2004 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2005 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2007 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2008 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2012 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2014 /* remember the first BD of the packet */
2015 tx_buf->first_bd = fp->tx_bd_prod;
2019 DP(NETIF_MSG_TX_QUEUED,
2020 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2021 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2024 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
2025 (bp->flags & HW_VLAN_TX_FLAG)) {
2026 tx_start_bd->vlan_or_ethertype =
2027 cpu_to_le16(vlan_tx_tag_get(skb));
2028 tx_start_bd->bd_flags.as_bitfield |=
2029 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2032 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2034 /* turn on parsing and get a BD */
2035 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2037 if (xmit_type & XMIT_CSUM) {
2038 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2040 if (xmit_type & XMIT_CSUM_V4)
2041 tx_start_bd->bd_flags.as_bitfield |=
2042 ETH_TX_BD_FLAGS_IP_CSUM;
2044 tx_start_bd->bd_flags.as_bitfield |=
2045 ETH_TX_BD_FLAGS_IPV6;
2047 if (!(xmit_type & XMIT_CSUM_TCP))
2048 tx_start_bd->bd_flags.as_bitfield |=
2049 ETH_TX_BD_FLAGS_IS_UDP;
2052 if (CHIP_IS_E2(bp)) {
2053 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2054 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2055 /* Set PBD in checksum offload case */
2056 if (xmit_type & XMIT_CSUM)
2057 hlen = bnx2x_set_pbd_csum_e2(bp,
2058 skb, pbd_e2, xmit_type);
2060 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2061 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2062 /* Set PBD in checksum offload case */
2063 if (xmit_type & XMIT_CSUM)
2064 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2068 /* Map skb linear data for DMA */
2069 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2070 skb_headlen(skb), DMA_TO_DEVICE);
2072 /* Setup the data pointer of the first BD of the packet */
2073 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2074 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2075 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2076 tx_start_bd->nbd = cpu_to_le16(nbd);
2077 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2078 pkt_size = tx_start_bd->nbytes;
2080 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2081 " nbytes %d flags %x vlan %x\n",
2082 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2083 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2084 tx_start_bd->bd_flags.as_bitfield,
2085 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2087 if (xmit_type & XMIT_GSO) {
2089 DP(NETIF_MSG_TX_QUEUED,
2090 "TSO packet len %d hlen %d total len %d tso size %d\n",
2091 skb->len, hlen, skb_headlen(skb),
2092 skb_shinfo(skb)->gso_size);
2094 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2096 if (unlikely(skb_headlen(skb) > hlen))
2097 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2098 hlen, bd_prod, ++nbd);
2100 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2102 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2104 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2106 /* Handle fragmented skb */
2107 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2108 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2110 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2111 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2112 if (total_pkt_bd == NULL)
2113 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2115 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2117 frag->size, DMA_TO_DEVICE);
2119 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2120 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2121 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2122 le16_add_cpu(&pkt_size, frag->size);
2124 DP(NETIF_MSG_TX_QUEUED,
2125 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2126 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2127 le16_to_cpu(tx_data_bd->nbytes));
2130 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2132 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2134 /* now send a tx doorbell, counting the next BD
2135 * if the packet contains or ends with it
2137 if (TX_BD_POFF(bd_prod) < nbd)
2140 if (total_pkt_bd != NULL)
2141 total_pkt_bd->total_pkt_bytes = pkt_size;
2144 DP(NETIF_MSG_TX_QUEUED,
2145 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2146 " tcp_flags %x xsum %x seq %u hlen %u\n",
2147 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2148 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2149 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2150 le16_to_cpu(pbd_e1x->total_hlen_w));
2152 DP(NETIF_MSG_TX_QUEUED,
2153 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2154 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2155 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2156 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2157 pbd_e2->parsing_data);
2158 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2161 * Make sure that the BD data is updated before updating the producer
2162 * since FW might read the BD right after the producer is updated.
2163 * This is only applicable for weak-ordered memory model archs such
2164 * as IA-64. The following barrier is also mandatory since FW will
2165 * assumes packets must have BDs.
2169 fp->tx_db.data.prod += nbd;
2172 DOORBELL(bp, fp->cid, fp->tx_db.raw);
2176 fp->tx_bd_prod += nbd;
2178 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2179 netif_tx_stop_queue(txq);
2181 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2182 * ordering of set_bit() in netif_tx_stop_queue() and read of
2186 fp->eth_q_stats.driver_xoff++;
2187 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2188 netif_tx_wake_queue(txq);
2192 return NETDEV_TX_OK;
2195 /* called with rtnl_lock */
2196 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2198 struct sockaddr *addr = p;
2199 struct bnx2x *bp = netdev_priv(dev);
2201 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2204 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2205 if (netif_running(dev))
2206 bnx2x_set_eth_mac(bp, 1);
2212 int bnx2x_setup_irqs(struct bnx2x *bp)
2215 if (bp->flags & USING_MSIX_FLAG) {
2216 rc = bnx2x_req_msix_irqs(bp);
2221 rc = bnx2x_req_irq(bp);
2223 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2226 if (bp->flags & USING_MSI_FLAG) {
2227 bp->dev->irq = bp->pdev->irq;
2228 netdev_info(bp->dev, "using MSI IRQ %d\n",
2236 void bnx2x_free_mem_bp(struct bnx2x *bp)
2239 kfree(bp->msix_table);
2243 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2245 struct bnx2x_fastpath *fp;
2246 struct msix_entry *tbl;
2247 struct bnx2x_ilt *ilt;
2250 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2256 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2260 bp->msix_table = tbl;
2263 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2270 bnx2x_free_mem_bp(bp);
2275 /* called with rtnl_lock */
2276 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2278 struct bnx2x *bp = netdev_priv(dev);
2281 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2282 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2286 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2287 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2290 /* This does not race with packet allocation
2291 * because the actual alloc size is
2292 * only updated as part of load
2296 if (netif_running(dev)) {
2297 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2298 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2304 void bnx2x_tx_timeout(struct net_device *dev)
2306 struct bnx2x *bp = netdev_priv(dev);
2308 #ifdef BNX2X_STOP_ON_ERROR
2312 /* This allows the netif to be shutdown gracefully before resetting */
2313 schedule_delayed_work(&bp->reset_task, 0);
2317 /* called with rtnl_lock */
2318 void bnx2x_vlan_rx_register(struct net_device *dev,
2319 struct vlan_group *vlgrp)
2321 struct bnx2x *bp = netdev_priv(dev);
2328 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2330 struct net_device *dev = pci_get_drvdata(pdev);
2334 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2337 bp = netdev_priv(dev);
2341 pci_save_state(pdev);
2343 if (!netif_running(dev)) {
2348 netif_device_detach(dev);
2350 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2352 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2359 int bnx2x_resume(struct pci_dev *pdev)
2361 struct net_device *dev = pci_get_drvdata(pdev);
2366 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2369 bp = netdev_priv(dev);
2371 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2372 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2378 pci_restore_state(pdev);
2380 if (!netif_running(dev)) {
2385 bnx2x_set_power_state(bp, PCI_D0);
2386 netif_device_attach(dev);
2388 /* Since the chip was reset, clear the FW sequence number */
2390 rc = bnx2x_nic_load(bp, LOAD_OPEN);