]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_cmn.c
ns83820: spin_lock_irq() => spin_lock()
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
9f6c9258
DK
18#include <linux/etherdevice.h>
19#include <linux/ip.h>
f2e0899f 20#include <net/ipv6.h>
7f3e01fe 21#include <net/ip6_checksum.h>
6891dd25 22#include <linux/firmware.h>
9f6c9258
DK
23#include "bnx2x_cmn.h"
24
25#ifdef BCM_VLAN
26#include <linux/if_vlan.h>
27#endif
28
523224a3
DK
29#include "bnx2x_init.h"
30
9f6c9258
DK
31
32/* free skb in the packet ring at pos idx
33 * return idx of last bd freed
34 */
35static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
36 u16 idx)
37{
38 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
39 struct eth_tx_start_bd *tx_start_bd;
40 struct eth_tx_bd *tx_data_bd;
41 struct sk_buff *skb = tx_buf->skb;
42 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
43 int nbd;
44
45 /* prefetch skb end pointer to speedup dev_kfree_skb() */
46 prefetch(&skb->end);
47
48 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
49 idx, tx_buf, skb);
50
51 /* unmap first bd */
52 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
53 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
54 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 55 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258
DK
56
57 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
58#ifdef BNX2X_STOP_ON_ERROR
59 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
60 BNX2X_ERR("BAD nbd!\n");
61 bnx2x_panic();
62 }
63#endif
64 new_cons = nbd + tx_buf->first_bd;
65
66 /* Get the next bd */
67 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
68
69 /* Skip a parse bd... */
70 --nbd;
71 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
72
73 /* ...and the TSO split header bd since they have no mapping */
74 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
75 --nbd;
76 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
77 }
78
79 /* now free frags */
80 while (nbd > 0) {
81
82 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
83 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
84 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
85 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
86 if (--nbd)
87 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
88 }
89
90 /* release skb */
91 WARN_ON(!skb);
92 dev_kfree_skb(skb);
93 tx_buf->first_bd = 0;
94 tx_buf->skb = NULL;
95
96 return new_cons;
97}
98
99int bnx2x_tx_int(struct bnx2x_fastpath *fp)
100{
101 struct bnx2x *bp = fp->bp;
102 struct netdev_queue *txq;
103 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
104
105#ifdef BNX2X_STOP_ON_ERROR
106 if (unlikely(bp->panic))
107 return -1;
108#endif
109
110 txq = netdev_get_tx_queue(bp->dev, fp->index);
111 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
112 sw_cons = fp->tx_pkt_cons;
113
114 while (sw_cons != hw_cons) {
115 u16 pkt_cons;
116
117 pkt_cons = TX_BD(sw_cons);
118
f2e0899f
DK
119 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
120 " pkt_cons %u\n",
121 fp->index, hw_cons, sw_cons, pkt_cons);
9f6c9258 122
9f6c9258
DK
123 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
124 sw_cons++;
125 }
126
127 fp->tx_pkt_cons = sw_cons;
128 fp->tx_bd_cons = bd_cons;
129
130 /* Need to make the tx_bd_cons update visible to start_xmit()
131 * before checking for netif_tx_queue_stopped(). Without the
132 * memory barrier, there is a small possibility that
133 * start_xmit() will miss it and cause the queue to be stopped
134 * forever.
135 */
136 smp_mb();
137
9f6c9258
DK
138 if (unlikely(netif_tx_queue_stopped(txq))) {
139 /* Taking tx_lock() is needed to prevent reenabling the queue
140 * while it's empty. This could have happen if rx_action() gets
141 * suspended in bnx2x_tx_int() after the condition before
142 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
143 *
144 * stops the queue->sees fresh tx_bd_cons->releases the queue->
145 * sends some packets consuming the whole queue again->
146 * stops the queue
147 */
148
149 __netif_tx_lock(txq, smp_processor_id());
150
151 if ((netif_tx_queue_stopped(txq)) &&
152 (bp->state == BNX2X_STATE_OPEN) &&
153 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
154 netif_tx_wake_queue(txq);
155
156 __netif_tx_unlock(txq);
157 }
158 return 0;
159}
160
161static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
162 u16 idx)
163{
164 u16 last_max = fp->last_max_sge;
165
166 if (SUB_S16(idx, last_max) > 0)
167 fp->last_max_sge = idx;
168}
169
170static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
171 struct eth_fast_path_rx_cqe *fp_cqe)
172{
173 struct bnx2x *bp = fp->bp;
174 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
175 le16_to_cpu(fp_cqe->len_on_bd)) >>
176 SGE_PAGE_SHIFT;
177 u16 last_max, last_elem, first_elem;
178 u16 delta = 0;
179 u16 i;
180
181 if (!sge_len)
182 return;
183
184 /* First mark all used pages */
185 for (i = 0; i < sge_len; i++)
523224a3
DK
186 SGE_MASK_CLEAR_BIT(fp,
187 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
188
189 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
523224a3 190 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
191
192 /* Here we assume that the last SGE index is the biggest */
193 prefetch((void *)(fp->sge_mask));
523224a3
DK
194 bnx2x_update_last_max_sge(fp,
195 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
196
197 last_max = RX_SGE(fp->last_max_sge);
198 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
199 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
200
201 /* If ring is not full */
202 if (last_elem + 1 != first_elem)
203 last_elem++;
204
205 /* Now update the prod */
206 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
207 if (likely(fp->sge_mask[i]))
208 break;
209
210 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
211 delta += RX_SGE_MASK_ELEM_SZ;
212 }
213
214 if (delta > 0) {
215 fp->rx_sge_prod += delta;
216 /* clear page-end entries */
217 bnx2x_clear_sge_mask_next_elems(fp);
218 }
219
220 DP(NETIF_MSG_RX_STATUS,
221 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
222 fp->last_max_sge, fp->rx_sge_prod);
223}
224
225static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
226 struct sk_buff *skb, u16 cons, u16 prod)
227{
228 struct bnx2x *bp = fp->bp;
229 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
230 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
231 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
232 dma_addr_t mapping;
233
234 /* move empty skb from pool to prod and map it */
235 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
236 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
237 bp->rx_buf_size, DMA_FROM_DEVICE);
238 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
239
240 /* move partial skb from cons to pool (don't unmap yet) */
241 fp->tpa_pool[queue] = *cons_rx_buf;
242
243 /* mark bin state as start - print error if current state != stop */
244 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
245 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
246
247 fp->tpa_state[queue] = BNX2X_TPA_START;
248
249 /* point prod_bd to new skb */
250 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
251 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
252
253#ifdef BNX2X_STOP_ON_ERROR
254 fp->tpa_queue_used |= (1 << queue);
255#ifdef _ASM_GENERIC_INT_L64_H
256 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
257#else
258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
259#endif
260 fp->tpa_queue_used);
261#endif
262}
263
264static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
265 struct sk_buff *skb,
266 struct eth_fast_path_rx_cqe *fp_cqe,
267 u16 cqe_idx)
268{
269 struct sw_rx_page *rx_pg, old_rx_pg;
270 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
271 u32 i, frag_len, frag_size, pages;
272 int err;
273 int j;
274
275 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
276 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
277
278 /* This is needed in order to enable forwarding support */
279 if (frag_size)
280 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
281 max(frag_size, (u32)len_on_bd));
282
283#ifdef BNX2X_STOP_ON_ERROR
284 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
285 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
286 pages, cqe_idx);
287 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
288 fp_cqe->pkt_len, len_on_bd);
289 bnx2x_panic();
290 return -EINVAL;
291 }
292#endif
293
294 /* Run through the SGL and compose the fragmented skb */
295 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
523224a3
DK
296 u16 sge_idx =
297 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
298
299 /* FW gives the indices of the SGE as if the ring is an array
300 (meaning that "next" element will consume 2 indices) */
301 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
302 rx_pg = &fp->rx_page_ring[sge_idx];
303 old_rx_pg = *rx_pg;
304
305 /* If we fail to allocate a substitute page, we simply stop
306 where we are and drop the whole packet */
307 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
308 if (unlikely(err)) {
309 fp->eth_q_stats.rx_skb_alloc_failed++;
310 return err;
311 }
312
313 /* Unmap the page as we r going to pass it to the stack */
314 dma_unmap_page(&bp->pdev->dev,
315 dma_unmap_addr(&old_rx_pg, mapping),
316 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
317
318 /* Add one frag and update the appropriate fields in the skb */
319 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
320
321 skb->data_len += frag_len;
322 skb->truesize += frag_len;
323 skb->len += frag_len;
324
325 frag_size -= frag_len;
326 }
327
328 return 0;
329}
330
331static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
332 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
333 u16 cqe_idx)
334{
335 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
336 struct sk_buff *skb = rx_buf->skb;
337 /* alloc new skb */
338 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
339
340 /* Unmap skb in the pool anyway, as we are going to change
341 pool entry status to BNX2X_TPA_STOP even if new skb allocation
342 fails. */
343 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
344 bp->rx_buf_size, DMA_FROM_DEVICE);
345
346 if (likely(new_skb)) {
347 /* fix ip xsum and give it to the stack */
348 /* (no need to map the new skb) */
349#ifdef BCM_VLAN
350 int is_vlan_cqe =
351 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
352 PARSING_FLAGS_VLAN);
353 int is_not_hwaccel_vlan_cqe =
354 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
355#endif
356
357 prefetch(skb);
217de5aa 358 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
359
360#ifdef BNX2X_STOP_ON_ERROR
361 if (pad + len > bp->rx_buf_size) {
362 BNX2X_ERR("skb_put is about to fail... "
363 "pad %d len %d rx_buf_size %d\n",
364 pad, len, bp->rx_buf_size);
365 bnx2x_panic();
366 return;
367 }
368#endif
369
370 skb_reserve(skb, pad);
371 skb_put(skb, len);
372
373 skb->protocol = eth_type_trans(skb, bp->dev);
374 skb->ip_summed = CHECKSUM_UNNECESSARY;
375
376 {
377 struct iphdr *iph;
378
379 iph = (struct iphdr *)skb->data;
380#ifdef BCM_VLAN
381 /* If there is no Rx VLAN offloading -
382 take VLAN tag into an account */
383 if (unlikely(is_not_hwaccel_vlan_cqe))
384 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
385#endif
386 iph->check = 0;
387 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
388 }
389
390 if (!bnx2x_fill_frag_skb(bp, fp, skb,
391 &cqe->fast_path_cqe, cqe_idx)) {
392#ifdef BCM_VLAN
523224a3
DK
393 if ((bp->vlgrp != NULL) &&
394 (le16_to_cpu(cqe->fast_path_cqe.
395 pars_flags.flags) & PARSING_FLAGS_VLAN))
9f6c9258
DK
396 vlan_gro_receive(&fp->napi, bp->vlgrp,
397 le16_to_cpu(cqe->fast_path_cqe.
398 vlan_tag), skb);
399 else
400#endif
401 napi_gro_receive(&fp->napi, skb);
402 } else {
403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
404 " - dropping packet!\n");
405 dev_kfree_skb(skb);
406 }
407
408
409 /* put new skb in bin */
410 fp->tpa_pool[queue].skb = new_skb;
411
412 } else {
413 /* else drop the packet and keep the buffer in the bin */
414 DP(NETIF_MSG_RX_STATUS,
415 "Failed to allocate new skb - dropping packet!\n");
416 fp->eth_q_stats.rx_skb_alloc_failed++;
417 }
418
419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
420}
421
422/* Set Toeplitz hash value in the skb using the value from the
423 * CQE (calculated by HW).
424 */
425static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
426 struct sk_buff *skb)
427{
428 /* Set Toeplitz hash from CQE */
429 if ((bp->dev->features & NETIF_F_RXHASH) &&
430 (cqe->fast_path_cqe.status_flags &
431 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
432 skb->rxhash =
433 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
434}
435
436int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
437{
438 struct bnx2x *bp = fp->bp;
439 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
440 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
441 int rx_pkt = 0;
442
443#ifdef BNX2X_STOP_ON_ERROR
444 if (unlikely(bp->panic))
445 return 0;
446#endif
447
448 /* CQ "next element" is of the size of the regular element,
449 that's why it's ok here */
450 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
451 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
452 hw_comp_cons++;
453
454 bd_cons = fp->rx_bd_cons;
455 bd_prod = fp->rx_bd_prod;
456 bd_prod_fw = bd_prod;
457 sw_comp_cons = fp->rx_comp_cons;
458 sw_comp_prod = fp->rx_comp_prod;
459
460 /* Memory barrier necessary as speculative reads of the rx
461 * buffer can be ahead of the index in the status block
462 */
463 rmb();
464
465 DP(NETIF_MSG_RX_STATUS,
466 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
467 fp->index, hw_comp_cons, sw_comp_cons);
468
469 while (sw_comp_cons != hw_comp_cons) {
470 struct sw_rx_bd *rx_buf = NULL;
471 struct sk_buff *skb;
472 union eth_rx_cqe *cqe;
473 u8 cqe_fp_flags;
474 u16 len, pad;
475
476 comp_ring_cons = RCQ_BD(sw_comp_cons);
477 bd_prod = RX_BD(bd_prod);
478 bd_cons = RX_BD(bd_cons);
479
480 /* Prefetch the page containing the BD descriptor
481 at producer's index. It will be needed when new skb is
482 allocated */
483 prefetch((void *)(PAGE_ALIGN((unsigned long)
484 (&fp->rx_desc_ring[bd_prod])) -
485 PAGE_SIZE + 1));
486
487 cqe = &fp->rx_comp_ring[comp_ring_cons];
488 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
489
490 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
491 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
492 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
493 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
494 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
495 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
496
497 /* is this a slowpath msg? */
498 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
499 bnx2x_sp_event(fp, cqe);
500 goto next_cqe;
501
502 /* this is an rx packet */
503 } else {
504 rx_buf = &fp->rx_buf_ring[bd_cons];
505 skb = rx_buf->skb;
506 prefetch(skb);
507 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
508 pad = cqe->fast_path_cqe.placement_offset;
509
510 /* If CQE is marked both TPA_START and TPA_END
511 it is a non-TPA CQE */
512 if ((!fp->disable_tpa) &&
513 (TPA_TYPE(cqe_fp_flags) !=
514 (TPA_TYPE_START | TPA_TYPE_END))) {
515 u16 queue = cqe->fast_path_cqe.queue_index;
516
517 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
518 DP(NETIF_MSG_RX_STATUS,
519 "calling tpa_start on queue %d\n",
520 queue);
521
522 bnx2x_tpa_start(fp, queue, skb,
523 bd_cons, bd_prod);
524
525 /* Set Toeplitz hash for an LRO skb */
526 bnx2x_set_skb_rxhash(bp, cqe, skb);
527
528 goto next_rx;
529 }
530
531 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
532 DP(NETIF_MSG_RX_STATUS,
533 "calling tpa_stop on queue %d\n",
534 queue);
535
536 if (!BNX2X_RX_SUM_FIX(cqe))
537 BNX2X_ERR("STOP on none TCP "
538 "data\n");
539
540 /* This is a size of the linear data
541 on this skb */
542 len = le16_to_cpu(cqe->fast_path_cqe.
543 len_on_bd);
544 bnx2x_tpa_stop(bp, fp, queue, pad,
545 len, cqe, comp_ring_cons);
546#ifdef BNX2X_STOP_ON_ERROR
547 if (bp->panic)
548 return 0;
549#endif
550
551 bnx2x_update_sge_prod(fp,
552 &cqe->fast_path_cqe);
553 goto next_cqe;
554 }
555 }
556
557 dma_sync_single_for_device(&bp->pdev->dev,
558 dma_unmap_addr(rx_buf, mapping),
559 pad + RX_COPY_THRESH,
560 DMA_FROM_DEVICE);
217de5aa 561 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
562
563 /* is this an error packet? */
564 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
565 DP(NETIF_MSG_RX_ERR,
566 "ERROR flags %x rx packet %u\n",
567 cqe_fp_flags, sw_comp_cons);
568 fp->eth_q_stats.rx_err_discard_pkt++;
569 goto reuse_rx;
570 }
571
572 /* Since we don't have a jumbo ring
573 * copy small packets if mtu > 1500
574 */
575 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
576 (len <= RX_COPY_THRESH)) {
577 struct sk_buff *new_skb;
578
579 new_skb = netdev_alloc_skb(bp->dev,
580 len + pad);
581 if (new_skb == NULL) {
582 DP(NETIF_MSG_RX_ERR,
583 "ERROR packet dropped "
584 "because of alloc failure\n");
585 fp->eth_q_stats.rx_skb_alloc_failed++;
586 goto reuse_rx;
587 }
588
589 /* aligned copy */
590 skb_copy_from_linear_data_offset(skb, pad,
591 new_skb->data + pad, len);
592 skb_reserve(new_skb, pad);
593 skb_put(new_skb, len);
594
749a8503 595 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
596
597 skb = new_skb;
598
599 } else
600 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
601 dma_unmap_single(&bp->pdev->dev,
602 dma_unmap_addr(rx_buf, mapping),
603 bp->rx_buf_size,
604 DMA_FROM_DEVICE);
605 skb_reserve(skb, pad);
606 skb_put(skb, len);
607
608 } else {
609 DP(NETIF_MSG_RX_ERR,
610 "ERROR packet dropped because "
611 "of alloc failure\n");
612 fp->eth_q_stats.rx_skb_alloc_failed++;
613reuse_rx:
749a8503 614 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
615 goto next_rx;
616 }
617
618 skb->protocol = eth_type_trans(skb, bp->dev);
619
620 /* Set Toeplitz hash for a none-LRO skb */
621 bnx2x_set_skb_rxhash(bp, cqe, skb);
622
bc8acf2c 623 skb_checksum_none_assert(skb);
f85582f8 624
9f6c9258
DK
625 if (bp->rx_csum) {
626 if (likely(BNX2X_RX_CSUM_OK(cqe)))
627 skb->ip_summed = CHECKSUM_UNNECESSARY;
628 else
629 fp->eth_q_stats.hw_csum_err++;
630 }
631 }
632
633 skb_record_rx_queue(skb, fp->index);
634
635#ifdef BCM_VLAN
636 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
637 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
638 PARSING_FLAGS_VLAN))
639 vlan_gro_receive(&fp->napi, bp->vlgrp,
640 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
641 else
642#endif
643 napi_gro_receive(&fp->napi, skb);
644
645
646next_rx:
647 rx_buf->skb = NULL;
648
649 bd_cons = NEXT_RX_IDX(bd_cons);
650 bd_prod = NEXT_RX_IDX(bd_prod);
651 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
652 rx_pkt++;
653next_cqe:
654 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
655 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
656
657 if (rx_pkt == budget)
658 break;
659 } /* while */
660
661 fp->rx_bd_cons = bd_cons;
662 fp->rx_bd_prod = bd_prod_fw;
663 fp->rx_comp_cons = sw_comp_cons;
664 fp->rx_comp_prod = sw_comp_prod;
665
666 /* Update producers */
667 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
668 fp->rx_sge_prod);
669
670 fp->rx_pkt += rx_pkt;
671 fp->rx_calls++;
672
673 return rx_pkt;
674}
675
676static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
677{
678 struct bnx2x_fastpath *fp = fp_cookie;
679 struct bnx2x *bp = fp->bp;
680
681 /* Return here if interrupt is disabled */
682 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
683 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
684 return IRQ_HANDLED;
685 }
686
523224a3
DK
687 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
688 "[fp %d fw_sd %d igusb %d]\n",
689 fp->index, fp->fw_sb_id, fp->igu_sb_id);
690 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
691
692#ifdef BNX2X_STOP_ON_ERROR
693 if (unlikely(bp->panic))
694 return IRQ_HANDLED;
695#endif
696
697 /* Handle Rx and Tx according to MSI-X vector */
698 prefetch(fp->rx_cons_sb);
699 prefetch(fp->tx_cons_sb);
523224a3 700 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
701 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
702
703 return IRQ_HANDLED;
704}
705
9f6c9258
DK
706/* HW Lock for shared dual port PHYs */
707void bnx2x_acquire_phy_lock(struct bnx2x *bp)
708{
709 mutex_lock(&bp->port.phy_mutex);
710
711 if (bp->port.need_hw_lock)
712 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
713}
714
715void bnx2x_release_phy_lock(struct bnx2x *bp)
716{
717 if (bp->port.need_hw_lock)
718 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
719
720 mutex_unlock(&bp->port.phy_mutex);
721}
722
723void bnx2x_link_report(struct bnx2x *bp)
724{
725 if (bp->flags & MF_FUNC_DIS) {
726 netif_carrier_off(bp->dev);
727 netdev_err(bp->dev, "NIC Link is Down\n");
728 return;
729 }
730
731 if (bp->link_vars.link_up) {
732 u16 line_speed;
733
734 if (bp->state == BNX2X_STATE_OPEN)
735 netif_carrier_on(bp->dev);
736 netdev_info(bp->dev, "NIC Link is Up, ");
737
738 line_speed = bp->link_vars.line_speed;
fb3bff17 739 if (IS_MF(bp)) {
9f6c9258
DK
740 u16 vn_max_rate;
741
742 vn_max_rate =
f2e0899f
DK
743 ((bp->mf_config[BP_VN(bp)] &
744 FUNC_MF_CFG_MAX_BW_MASK) >>
745 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9f6c9258
DK
746 if (vn_max_rate < line_speed)
747 line_speed = vn_max_rate;
748 }
749 pr_cont("%d Mbps ", line_speed);
750
751 if (bp->link_vars.duplex == DUPLEX_FULL)
752 pr_cont("full duplex");
753 else
754 pr_cont("half duplex");
755
756 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
757 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
758 pr_cont(", receive ");
759 if (bp->link_vars.flow_ctrl &
760 BNX2X_FLOW_CTRL_TX)
761 pr_cont("& transmit ");
762 } else {
763 pr_cont(", transmit ");
764 }
765 pr_cont("flow control ON");
766 }
767 pr_cont("\n");
768
769 } else { /* link_down */
770 netif_carrier_off(bp->dev);
771 netdev_err(bp->dev, "NIC Link is Down\n");
772 }
773}
774
523224a3
DK
775/* Returns the number of actually allocated BDs */
776static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
777 int rx_ring_size)
778{
779 struct bnx2x *bp = fp->bp;
780 u16 ring_prod, cqe_ring_prod;
781 int i;
782
783 fp->rx_comp_cons = 0;
784 cqe_ring_prod = ring_prod = 0;
785 for (i = 0; i < rx_ring_size; i++) {
786 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
787 BNX2X_ERR("was only able to allocate "
788 "%d rx skbs on queue[%d]\n", i, fp->index);
789 fp->eth_q_stats.rx_skb_alloc_failed++;
790 break;
791 }
792 ring_prod = NEXT_RX_IDX(ring_prod);
793 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
794 WARN_ON(ring_prod <= i);
795 }
796
797 fp->rx_bd_prod = ring_prod;
798 /* Limit the CQE producer by the CQE ring size */
799 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
800 cqe_ring_prod);
801 fp->rx_pkt = fp->rx_calls = 0;
802
803 return i;
804}
805
806static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
807{
808 struct bnx2x *bp = fp->bp;
809 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
810 MAX_RX_AVAIL/bp->num_queues;
811
812 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
813
814 bnx2x_alloc_rx_bds(fp, rx_ring_size);
815
816 /* Warning!
817 * this will generate an interrupt (to the TSTORM)
818 * must only be done after chip is initialized
819 */
820 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
821 fp->rx_sge_prod);
822}
823
9f6c9258
DK
824void bnx2x_init_rx_rings(struct bnx2x *bp)
825{
826 int func = BP_FUNC(bp);
827 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
828 ETH_MAX_AGGREGATION_QUEUES_E1H;
523224a3 829 u16 ring_prod;
9f6c9258 830 int i, j;
25141580 831
523224a3
DK
832 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
833 BNX2X_FW_IP_HDR_ALIGN_PAD;
9f6c9258 834
9f6c9258
DK
835 DP(NETIF_MSG_IFUP,
836 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
837
523224a3
DK
838 for_each_queue(bp, j) {
839 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 840
523224a3 841 if (!fp->disable_tpa) {
9f6c9258
DK
842 for (i = 0; i < max_agg_queues; i++) {
843 fp->tpa_pool[i].skb =
844 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
845 if (!fp->tpa_pool[i].skb) {
846 BNX2X_ERR("Failed to allocate TPA "
847 "skb pool for queue[%d] - "
848 "disabling TPA on this "
849 "queue!\n", j);
850 bnx2x_free_tpa_pool(bp, fp, i);
851 fp->disable_tpa = 1;
852 break;
853 }
854 dma_unmap_addr_set((struct sw_rx_bd *)
855 &bp->fp->tpa_pool[i],
856 mapping, 0);
857 fp->tpa_state[i] = BNX2X_TPA_STOP;
858 }
523224a3
DK
859
860 /* "next page" elements initialization */
861 bnx2x_set_next_page_sgl(fp);
862
863 /* set SGEs bit mask */
864 bnx2x_init_sge_ring_bit_mask(fp);
865
866 /* Allocate SGEs and initialize the ring elements */
867 for (i = 0, ring_prod = 0;
868 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
869
870 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
871 BNX2X_ERR("was only able to allocate "
872 "%d rx sges\n", i);
873 BNX2X_ERR("disabling TPA for"
874 " queue[%d]\n", j);
875 /* Cleanup already allocated elements */
876 bnx2x_free_rx_sge_range(bp,
877 fp, ring_prod);
878 bnx2x_free_tpa_pool(bp,
879 fp, max_agg_queues);
880 fp->disable_tpa = 1;
881 ring_prod = 0;
882 break;
883 }
884 ring_prod = NEXT_SGE_IDX(ring_prod);
885 }
886
887 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
888 }
889 }
890
891 for_each_queue(bp, j) {
892 struct bnx2x_fastpath *fp = &bp->fp[j];
893
894 fp->rx_bd_cons = 0;
9f6c9258 895
523224a3 896 bnx2x_set_next_page_rx_bd(fp);
9f6c9258
DK
897
898 /* CQ ring */
523224a3 899 bnx2x_set_next_page_rx_cq(fp);
9f6c9258
DK
900
901 /* Allocate BDs and initialize BD ring */
523224a3 902 bnx2x_alloc_rx_bd_ring(fp);
9f6c9258 903
9f6c9258
DK
904 if (j != 0)
905 continue;
906
f2e0899f
DK
907 if (!CHIP_IS_E2(bp)) {
908 REG_WR(bp, BAR_USTRORM_INTMEM +
909 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
910 U64_LO(fp->rx_comp_mapping));
911 REG_WR(bp, BAR_USTRORM_INTMEM +
912 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
913 U64_HI(fp->rx_comp_mapping));
914 }
9f6c9258
DK
915 }
916}
f85582f8 917
9f6c9258
DK
918static void bnx2x_free_tx_skbs(struct bnx2x *bp)
919{
920 int i;
921
922 for_each_queue(bp, i) {
923 struct bnx2x_fastpath *fp = &bp->fp[i];
924
925 u16 bd_cons = fp->tx_bd_cons;
926 u16 sw_prod = fp->tx_pkt_prod;
927 u16 sw_cons = fp->tx_pkt_cons;
928
929 while (sw_cons != sw_prod) {
930 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
931 sw_cons++;
932 }
933 }
934}
935
936static void bnx2x_free_rx_skbs(struct bnx2x *bp)
937{
938 int i, j;
939
940 for_each_queue(bp, j) {
941 struct bnx2x_fastpath *fp = &bp->fp[j];
942
943 for (i = 0; i < NUM_RX_BD; i++) {
944 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
945 struct sk_buff *skb = rx_buf->skb;
946
947 if (skb == NULL)
948 continue;
949
950 dma_unmap_single(&bp->pdev->dev,
951 dma_unmap_addr(rx_buf, mapping),
952 bp->rx_buf_size, DMA_FROM_DEVICE);
953
954 rx_buf->skb = NULL;
955 dev_kfree_skb(skb);
956 }
957 if (!fp->disable_tpa)
958 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
959 ETH_MAX_AGGREGATION_QUEUES_E1 :
960 ETH_MAX_AGGREGATION_QUEUES_E1H);
961 }
962}
963
964void bnx2x_free_skbs(struct bnx2x *bp)
965{
966 bnx2x_free_tx_skbs(bp);
967 bnx2x_free_rx_skbs(bp);
968}
969
970static void bnx2x_free_msix_irqs(struct bnx2x *bp)
971{
972 int i, offset = 1;
973
974 free_irq(bp->msix_table[0].vector, bp->dev);
975 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
976 bp->msix_table[0].vector);
977
978#ifdef BCM_CNIC
979 offset++;
980#endif
981 for_each_queue(bp, i) {
982 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
983 "state %x\n", i, bp->msix_table[i + offset].vector,
984 bnx2x_fp(bp, i, state));
985
986 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
987 }
988}
989
d6214d7a 990void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 991{
d6214d7a
DK
992 if (bp->flags & USING_MSIX_FLAG)
993 bnx2x_free_msix_irqs(bp);
994 else if (bp->flags & USING_MSI_FLAG)
995 free_irq(bp->pdev->irq, bp->dev);
996 else
9f6c9258
DK
997 free_irq(bp->pdev->irq, bp->dev);
998}
999
d6214d7a 1000int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1001{
d6214d7a 1002 int msix_vec = 0, i, rc, req_cnt;
9f6c9258 1003
d6214d7a
DK
1004 bp->msix_table[msix_vec].entry = msix_vec;
1005 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1006 bp->msix_table[0].entry);
1007 msix_vec++;
9f6c9258
DK
1008
1009#ifdef BCM_CNIC
d6214d7a
DK
1010 bp->msix_table[msix_vec].entry = msix_vec;
1011 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1012 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1013 msix_vec++;
9f6c9258
DK
1014#endif
1015 for_each_queue(bp, i) {
d6214d7a 1016 bp->msix_table[msix_vec].entry = msix_vec;
9f6c9258 1017 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
d6214d7a
DK
1018 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1019 msix_vec++;
9f6c9258
DK
1020 }
1021
d6214d7a
DK
1022 req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1023
1024 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
9f6c9258
DK
1025
1026 /*
1027 * reconfigure number of tx/rx queues according to available
1028 * MSI-X vectors
1029 */
1030 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
d6214d7a
DK
1031 /* how less vectors we will have? */
1032 int diff = req_cnt - rc;
9f6c9258
DK
1033
1034 DP(NETIF_MSG_IFUP,
1035 "Trying to use less MSI-X vectors: %d\n", rc);
1036
1037 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1038
1039 if (rc) {
1040 DP(NETIF_MSG_IFUP,
1041 "MSI-X is not attainable rc %d\n", rc);
1042 return rc;
1043 }
d6214d7a
DK
1044 /*
1045 * decrease number of queues by number of unallocated entries
1046 */
1047 bp->num_queues -= diff;
9f6c9258
DK
1048
1049 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1050 bp->num_queues);
1051 } else if (rc) {
d6214d7a
DK
1052 /* fall to INTx if not enough memory */
1053 if (rc == -ENOMEM)
1054 bp->flags |= DISABLE_MSI_FLAG;
9f6c9258
DK
1055 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1056 return rc;
1057 }
1058
1059 bp->flags |= USING_MSIX_FLAG;
1060
1061 return 0;
1062}
1063
1064static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1065{
1066 int i, rc, offset = 1;
1067
1068 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1069 bp->dev->name, bp->dev);
1070 if (rc) {
1071 BNX2X_ERR("request sp irq failed\n");
1072 return -EBUSY;
1073 }
1074
1075#ifdef BCM_CNIC
1076 offset++;
1077#endif
1078 for_each_queue(bp, i) {
1079 struct bnx2x_fastpath *fp = &bp->fp[i];
1080 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1081 bp->dev->name, i);
1082
d6214d7a 1083 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1084 bnx2x_msix_fp_int, 0, fp->name, fp);
1085 if (rc) {
1086 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1087 bnx2x_free_msix_irqs(bp);
1088 return -EBUSY;
1089 }
1090
d6214d7a 1091 offset++;
9f6c9258
DK
1092 fp->state = BNX2X_FP_STATE_IRQ;
1093 }
1094
1095 i = BNX2X_NUM_QUEUES(bp);
d6214d7a 1096 offset = 1 + CNIC_CONTEXT_USE;
9f6c9258
DK
1097 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1098 " ... fp[%d] %d\n",
1099 bp->msix_table[0].vector,
1100 0, bp->msix_table[offset].vector,
1101 i - 1, bp->msix_table[offset + i - 1].vector);
1102
1103 return 0;
1104}
1105
d6214d7a 1106int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1107{
1108 int rc;
1109
1110 rc = pci_enable_msi(bp->pdev);
1111 if (rc) {
1112 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1113 return -1;
1114 }
1115 bp->flags |= USING_MSI_FLAG;
1116
1117 return 0;
1118}
1119
1120static int bnx2x_req_irq(struct bnx2x *bp)
1121{
1122 unsigned long flags;
1123 int rc;
1124
1125 if (bp->flags & USING_MSI_FLAG)
1126 flags = 0;
1127 else
1128 flags = IRQF_SHARED;
1129
1130 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1131 bp->dev->name, bp->dev);
1132 if (!rc)
1133 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1134
1135 return rc;
1136}
1137
1138static void bnx2x_napi_enable(struct bnx2x *bp)
1139{
1140 int i;
1141
1142 for_each_queue(bp, i)
1143 napi_enable(&bnx2x_fp(bp, i, napi));
1144}
1145
1146static void bnx2x_napi_disable(struct bnx2x *bp)
1147{
1148 int i;
1149
1150 for_each_queue(bp, i)
1151 napi_disable(&bnx2x_fp(bp, i, napi));
1152}
1153
1154void bnx2x_netif_start(struct bnx2x *bp)
1155{
1156 int intr_sem;
1157
1158 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1159 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1160
1161 if (intr_sem) {
1162 if (netif_running(bp->dev)) {
1163 bnx2x_napi_enable(bp);
1164 bnx2x_int_enable(bp);
1165 if (bp->state == BNX2X_STATE_OPEN)
1166 netif_tx_wake_all_queues(bp->dev);
1167 }
1168 }
1169}
1170
1171void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1172{
1173 bnx2x_int_disable_sync(bp, disable_hw);
1174 bnx2x_napi_disable(bp);
1175 netif_tx_disable(bp->dev);
1176}
9f6c9258 1177
d6214d7a
DK
1178void bnx2x_set_num_queues(struct bnx2x *bp)
1179{
1180 switch (bp->multi_mode) {
1181 case ETH_RSS_MODE_DISABLED:
9f6c9258 1182 bp->num_queues = 1;
d6214d7a
DK
1183 break;
1184 case ETH_RSS_MODE_REGULAR:
1185 bp->num_queues = bnx2x_calc_num_queues(bp);
9f6c9258 1186 break;
f85582f8 1187
9f6c9258 1188 default:
d6214d7a 1189 bp->num_queues = 1;
9f6c9258
DK
1190 break;
1191 }
9f6c9258
DK
1192}
1193
6891dd25
DK
1194static void bnx2x_release_firmware(struct bnx2x *bp)
1195{
1196 kfree(bp->init_ops_offsets);
1197 kfree(bp->init_ops);
1198 kfree(bp->init_data);
1199 release_firmware(bp->firmware);
1200}
1201
9f6c9258
DK
1202/* must be called with rtnl_lock */
1203int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1204{
1205 u32 load_code;
1206 int i, rc;
1207
6891dd25
DK
1208 /* Set init arrays */
1209 rc = bnx2x_init_firmware(bp);
1210 if (rc) {
1211 BNX2X_ERR("Error loading firmware\n");
1212 return rc;
1213 }
1214
9f6c9258
DK
1215#ifdef BNX2X_STOP_ON_ERROR
1216 if (unlikely(bp->panic))
1217 return -EPERM;
1218#endif
1219
1220 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1221
523224a3
DK
1222 /* must be called before memory allocation and HW init */
1223 bnx2x_ilt_set_info(bp);
1224
d6214d7a 1225 if (bnx2x_alloc_mem(bp))
9f6c9258 1226 return -ENOMEM;
d6214d7a
DK
1227
1228 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1229 rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1230 if (rc) {
1231 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1232 goto load_error0;
9f6c9258
DK
1233 }
1234
1235 for_each_queue(bp, i)
1236 bnx2x_fp(bp, i, disable_tpa) =
1237 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1238
9f6c9258
DK
1239 bnx2x_napi_enable(bp);
1240
9f6c9258
DK
1241 /* Send LOAD_REQUEST command to MCP
1242 Returns the type of LOAD command:
1243 if it is the first port to be initialized
1244 common blocks should be initialized, otherwise - not
1245 */
1246 if (!BP_NOMCP(bp)) {
a22f0788 1247 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
9f6c9258
DK
1248 if (!load_code) {
1249 BNX2X_ERR("MCP response failure, aborting\n");
1250 rc = -EBUSY;
d6214d7a 1251 goto load_error1;
9f6c9258
DK
1252 }
1253 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1254 rc = -EBUSY; /* other port in diagnostic mode */
d6214d7a 1255 goto load_error1;
9f6c9258
DK
1256 }
1257
1258 } else {
f2e0899f 1259 int path = BP_PATH(bp);
9f6c9258
DK
1260 int port = BP_PORT(bp);
1261
f2e0899f
DK
1262 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1263 path, load_count[path][0], load_count[path][1],
1264 load_count[path][2]);
1265 load_count[path][0]++;
1266 load_count[path][1 + port]++;
1267 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1268 path, load_count[path][0], load_count[path][1],
1269 load_count[path][2]);
1270 if (load_count[path][0] == 1)
9f6c9258 1271 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 1272 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
1273 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1274 else
1275 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1276 }
1277
1278 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 1279 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
9f6c9258
DK
1280 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1281 bp->port.pmf = 1;
1282 else
1283 bp->port.pmf = 0;
1284 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1285
1286 /* Initialize HW */
1287 rc = bnx2x_init_hw(bp, load_code);
1288 if (rc) {
1289 BNX2X_ERR("HW init failed, aborting\n");
a22f0788
YR
1290 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1291 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1292 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9f6c9258
DK
1293 goto load_error2;
1294 }
1295
d6214d7a
DK
1296 /* Connect to IRQs */
1297 rc = bnx2x_setup_irqs(bp);
523224a3
DK
1298 if (rc) {
1299 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1300 goto load_error2;
1301 }
1302
9f6c9258
DK
1303 /* Setup NIC internals and enable interrupts */
1304 bnx2x_nic_init(bp, load_code);
1305
f2e0899f
DK
1306 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1307 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
9f6c9258
DK
1308 (bp->common.shmem2_base))
1309 SHMEM2_WR(bp, dcc_support,
1310 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1311 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1312
1313 /* Send LOAD_DONE command to MCP */
1314 if (!BP_NOMCP(bp)) {
a22f0788 1315 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1316 if (!load_code) {
1317 BNX2X_ERR("MCP response failure, aborting\n");
1318 rc = -EBUSY;
1319 goto load_error3;
1320 }
1321 }
1322
1323 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1324
523224a3
DK
1325 rc = bnx2x_func_start(bp);
1326 if (rc) {
1327 BNX2X_ERR("Function start failed!\n");
1328#ifndef BNX2X_STOP_ON_ERROR
1329 goto load_error3;
1330#else
1331 bp->panic = 1;
1332 return -EBUSY;
1333#endif
1334 }
1335
1336 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
9f6c9258
DK
1337 if (rc) {
1338 BNX2X_ERR("Setup leading failed!\n");
1339#ifndef BNX2X_STOP_ON_ERROR
1340 goto load_error3;
1341#else
1342 bp->panic = 1;
1343 return -EBUSY;
1344#endif
1345 }
1346
f2e0899f
DK
1347 if (!CHIP_IS_E1(bp) &&
1348 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1349 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1350 bp->flags |= MF_FUNC_DIS;
1351 }
9f6c9258 1352
9f6c9258 1353#ifdef BCM_CNIC
523224a3
DK
1354 /* Enable Timer scan */
1355 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
9f6c9258 1356#endif
f85582f8 1357
523224a3
DK
1358 for_each_nondefault_queue(bp, i) {
1359 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1360 if (rc)
9f6c9258 1361#ifdef BCM_CNIC
523224a3 1362 goto load_error4;
9f6c9258 1363#else
523224a3 1364 goto load_error3;
9f6c9258 1365#endif
523224a3
DK
1366 }
1367
1368 /* Now when Clients are configured we are ready to work */
1369 bp->state = BNX2X_STATE_OPEN;
1370
1371 bnx2x_set_eth_mac(bp, 1);
9f6c9258 1372
9f6c9258
DK
1373 if (bp->port.pmf)
1374 bnx2x_initial_phy_init(bp, load_mode);
1375
1376 /* Start fast path */
1377 switch (load_mode) {
1378 case LOAD_NORMAL:
523224a3
DK
1379 /* Tx queue should be only reenabled */
1380 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1381 /* Initialize the receive filter. */
1382 bnx2x_set_rx_mode(bp->dev);
1383 break;
1384
1385 case LOAD_OPEN:
1386 netif_tx_start_all_queues(bp->dev);
523224a3 1387 smp_mb__after_clear_bit();
9f6c9258
DK
1388 /* Initialize the receive filter. */
1389 bnx2x_set_rx_mode(bp->dev);
1390 break;
1391
1392 case LOAD_DIAG:
1393 /* Initialize the receive filter. */
1394 bnx2x_set_rx_mode(bp->dev);
1395 bp->state = BNX2X_STATE_DIAG;
1396 break;
1397
1398 default:
1399 break;
1400 }
1401
1402 if (!bp->port.pmf)
1403 bnx2x__link_status_update(bp);
1404
1405 /* start the timer */
1406 mod_timer(&bp->timer, jiffies + bp->current_interval);
1407
1408#ifdef BCM_CNIC
1409 bnx2x_setup_cnic_irq_info(bp);
1410 if (bp->state == BNX2X_STATE_OPEN)
1411 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1412#endif
1413 bnx2x_inc_load_cnt(bp);
1414
6891dd25
DK
1415 bnx2x_release_firmware(bp);
1416
9f6c9258
DK
1417 return 0;
1418
1419#ifdef BCM_CNIC
1420load_error4:
1421 /* Disable Timer scan */
1422 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1423#endif
1424load_error3:
1425 bnx2x_int_disable_sync(bp, 1);
d6214d7a 1426
9f6c9258
DK
1427 /* Free SKBs, SGEs, TPA pool and driver internals */
1428 bnx2x_free_skbs(bp);
1429 for_each_queue(bp, i)
1430 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1431
9f6c9258 1432 /* Release IRQs */
d6214d7a
DK
1433 bnx2x_free_irq(bp);
1434load_error2:
1435 if (!BP_NOMCP(bp)) {
1436 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1437 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1438 }
1439
1440 bp->port.pmf = 0;
9f6c9258
DK
1441load_error1:
1442 bnx2x_napi_disable(bp);
d6214d7a 1443load_error0:
9f6c9258
DK
1444 bnx2x_free_mem(bp);
1445
6891dd25
DK
1446 bnx2x_release_firmware(bp);
1447
9f6c9258
DK
1448 return rc;
1449}
1450
1451/* must be called with rtnl_lock */
1452int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1453{
1454 int i;
1455
1456 if (bp->state == BNX2X_STATE_CLOSED) {
1457 /* Interface has been removed - nothing to recover */
1458 bp->recovery_state = BNX2X_RECOVERY_DONE;
1459 bp->is_leader = 0;
1460 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1461 smp_wmb();
1462
1463 return -EINVAL;
1464 }
1465
1466#ifdef BCM_CNIC
1467 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1468#endif
1469 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1470
1471 /* Set "drop all" */
1472 bp->rx_mode = BNX2X_RX_MODE_NONE;
1473 bnx2x_set_storm_rx_mode(bp);
1474
f2e0899f
DK
1475 /* Stop Tx */
1476 bnx2x_tx_disable(bp);
f85582f8 1477
9f6c9258 1478 del_timer_sync(&bp->timer);
f85582f8 1479
f2e0899f 1480 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
9f6c9258 1481 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
9f6c9258 1482
f85582f8 1483 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9f6c9258
DK
1484
1485 /* Cleanup the chip if needed */
1486 if (unload_mode != UNLOAD_RECOVERY)
1487 bnx2x_chip_cleanup(bp, unload_mode);
523224a3
DK
1488 else {
1489 /* Disable HW interrupts, NAPI and Tx */
1490 bnx2x_netif_stop(bp, 1);
1491
1492 /* Release IRQs */
d6214d7a 1493 bnx2x_free_irq(bp);
523224a3 1494 }
9f6c9258
DK
1495
1496 bp->port.pmf = 0;
1497
1498 /* Free SKBs, SGEs, TPA pool and driver internals */
1499 bnx2x_free_skbs(bp);
1500 for_each_queue(bp, i)
1501 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1502
9f6c9258
DK
1503 bnx2x_free_mem(bp);
1504
1505 bp->state = BNX2X_STATE_CLOSED;
1506
1507 /* The last driver must disable a "close the gate" if there is no
1508 * parity attention or "process kill" pending.
1509 */
1510 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1511 bnx2x_reset_is_done(bp))
1512 bnx2x_disable_close_the_gate(bp);
1513
1514 /* Reset MCP mail box sequence if there is on going recovery */
1515 if (unload_mode == UNLOAD_RECOVERY)
1516 bp->fw_seq = 0;
1517
1518 return 0;
1519}
f85582f8 1520
9f6c9258
DK
1521int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1522{
1523 u16 pmcsr;
1524
1525 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1526
1527 switch (state) {
1528 case PCI_D0:
1529 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1530 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1531 PCI_PM_CTRL_PME_STATUS));
1532
1533 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1534 /* delay required during transition out of D3hot */
1535 msleep(20);
1536 break;
1537
1538 case PCI_D3hot:
1539 /* If there are other clients above don't
1540 shut down the power */
1541 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1542 return 0;
1543 /* Don't shut down the power for emulation and FPGA */
1544 if (CHIP_REV_IS_SLOW(bp))
1545 return 0;
1546
1547 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1548 pmcsr |= 3;
1549
1550 if (bp->wol)
1551 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1552
1553 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1554 pmcsr);
1555
1556 /* No more memory access after this point until
1557 * device is brought back to D0.
1558 */
1559 break;
1560
1561 default:
1562 return -EINVAL;
1563 }
1564 return 0;
1565}
1566
9f6c9258
DK
1567/*
1568 * net_device service functions
1569 */
d6214d7a 1570int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
1571{
1572 int work_done = 0;
1573 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1574 napi);
1575 struct bnx2x *bp = fp->bp;
1576
1577 while (1) {
1578#ifdef BNX2X_STOP_ON_ERROR
1579 if (unlikely(bp->panic)) {
1580 napi_complete(napi);
1581 return 0;
1582 }
1583#endif
1584
1585 if (bnx2x_has_tx_work(fp))
1586 bnx2x_tx_int(fp);
1587
1588 if (bnx2x_has_rx_work(fp)) {
1589 work_done += bnx2x_rx_int(fp, budget - work_done);
1590
1591 /* must not complete if we consumed full budget */
1592 if (work_done >= budget)
1593 break;
1594 }
1595
1596 /* Fall out from the NAPI loop if needed */
1597 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1598 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
1599 /* bnx2x_has_rx_work() reads the status block,
1600 * thus we need to ensure that status block indices
1601 * have been actually read (bnx2x_update_fpsb_idx)
1602 * prior to this check (bnx2x_has_rx_work) so that
1603 * we won't write the "newer" value of the status block
1604 * to IGU (if there was a DMA right after
1605 * bnx2x_has_rx_work and if there is no rmb, the memory
1606 * reading (bnx2x_update_fpsb_idx) may be postponed
1607 * to right before bnx2x_ack_sb). In this case there
1608 * will never be another interrupt until there is
1609 * another update of the status block, while there
1610 * is still unhandled work.
1611 */
9f6c9258
DK
1612 rmb();
1613
1614 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1615 napi_complete(napi);
1616 /* Re-enable interrupts */
523224a3
DK
1617 DP(NETIF_MSG_HW,
1618 "Update index to %d\n", fp->fp_hc_idx);
1619 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1620 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
1621 IGU_INT_ENABLE, 1);
1622 break;
1623 }
1624 }
1625 }
1626
1627 return work_done;
1628}
1629
9f6c9258
DK
1630/* we split the first BD into headers and data BDs
1631 * to ease the pain of our fellow microcode engineers
1632 * we use one mapping for both BDs
1633 * So far this has only been observed to happen
1634 * in Other Operating Systems(TM)
1635 */
1636static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1637 struct bnx2x_fastpath *fp,
1638 struct sw_tx_bd *tx_buf,
1639 struct eth_tx_start_bd **tx_bd, u16 hlen,
1640 u16 bd_prod, int nbd)
1641{
1642 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1643 struct eth_tx_bd *d_tx_bd;
1644 dma_addr_t mapping;
1645 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1646
1647 /* first fix first BD */
1648 h_tx_bd->nbd = cpu_to_le16(nbd);
1649 h_tx_bd->nbytes = cpu_to_le16(hlen);
1650
1651 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1652 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1653 h_tx_bd->addr_lo, h_tx_bd->nbd);
1654
1655 /* now get a new data BD
1656 * (after the pbd) and fill it */
1657 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1658 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1659
1660 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1661 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1662
1663 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1664 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1665 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1666
1667 /* this marks the BD as one that has no individual mapping */
1668 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1669
1670 DP(NETIF_MSG_TX_QUEUED,
1671 "TSO split data size is %d (%x:%x)\n",
1672 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1673
1674 /* update tx_bd */
1675 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1676
1677 return bd_prod;
1678}
1679
1680static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1681{
1682 if (fix > 0)
1683 csum = (u16) ~csum_fold(csum_sub(csum,
1684 csum_partial(t_header - fix, fix, 0)));
1685
1686 else if (fix < 0)
1687 csum = (u16) ~csum_fold(csum_add(csum,
1688 csum_partial(t_header, -fix, 0)));
1689
1690 return swab16(csum);
1691}
1692
1693static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1694{
1695 u32 rc;
1696
1697 if (skb->ip_summed != CHECKSUM_PARTIAL)
1698 rc = XMIT_PLAIN;
1699
1700 else {
1701 if (skb->protocol == htons(ETH_P_IPV6)) {
1702 rc = XMIT_CSUM_V6;
1703 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1704 rc |= XMIT_CSUM_TCP;
1705
1706 } else {
1707 rc = XMIT_CSUM_V4;
1708 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1709 rc |= XMIT_CSUM_TCP;
1710 }
1711 }
1712
1713 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1714 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1715
1716 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1717 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1718
1719 return rc;
1720}
1721
1722#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1723/* check if packet requires linearization (packet is too fragmented)
1724 no need to check fragmentation if page size > 8K (there will be no
1725 violation to FW restrictions) */
1726static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1727 u32 xmit_type)
1728{
1729 int to_copy = 0;
1730 int hlen = 0;
1731 int first_bd_sz = 0;
1732
1733 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1734 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1735
1736 if (xmit_type & XMIT_GSO) {
1737 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1738 /* Check if LSO packet needs to be copied:
1739 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1740 int wnd_size = MAX_FETCH_BD - 3;
1741 /* Number of windows to check */
1742 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1743 int wnd_idx = 0;
1744 int frag_idx = 0;
1745 u32 wnd_sum = 0;
1746
1747 /* Headers length */
1748 hlen = (int)(skb_transport_header(skb) - skb->data) +
1749 tcp_hdrlen(skb);
1750
1751 /* Amount of data (w/o headers) on linear part of SKB*/
1752 first_bd_sz = skb_headlen(skb) - hlen;
1753
1754 wnd_sum = first_bd_sz;
1755
1756 /* Calculate the first sum - it's special */
1757 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1758 wnd_sum +=
1759 skb_shinfo(skb)->frags[frag_idx].size;
1760
1761 /* If there was data on linear skb data - check it */
1762 if (first_bd_sz > 0) {
1763 if (unlikely(wnd_sum < lso_mss)) {
1764 to_copy = 1;
1765 goto exit_lbl;
1766 }
1767
1768 wnd_sum -= first_bd_sz;
1769 }
1770
1771 /* Others are easier: run through the frag list and
1772 check all windows */
1773 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1774 wnd_sum +=
1775 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1776
1777 if (unlikely(wnd_sum < lso_mss)) {
1778 to_copy = 1;
1779 break;
1780 }
1781 wnd_sum -=
1782 skb_shinfo(skb)->frags[wnd_idx].size;
1783 }
1784 } else {
1785 /* in non-LSO too fragmented packet should always
1786 be linearized */
1787 to_copy = 1;
1788 }
1789 }
1790
1791exit_lbl:
1792 if (unlikely(to_copy))
1793 DP(NETIF_MSG_TX_QUEUED,
1794 "Linearization IS REQUIRED for %s packet. "
1795 "num_frags %d hlen %d first_bd_sz %d\n",
1796 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1797 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1798
1799 return to_copy;
1800}
1801#endif
1802
f2e0899f
DK
1803static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1804 struct eth_tx_parse_bd_e2 *pbd,
1805 u32 xmit_type)
1806{
1807 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1808 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1809 if ((xmit_type & XMIT_GSO_V6) &&
1810 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1811 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1812}
1813
1814/**
1815 * Update PBD in GSO case.
1816 *
1817 * @param skb
1818 * @param tx_start_bd
1819 * @param pbd
1820 * @param xmit_type
1821 */
1822static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1823 struct eth_tx_parse_bd_e1x *pbd,
1824 u32 xmit_type)
1825{
1826 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1827 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1828 pbd->tcp_flags = pbd_tcp_flags(skb);
1829
1830 if (xmit_type & XMIT_GSO_V4) {
1831 pbd->ip_id = swab16(ip_hdr(skb)->id);
1832 pbd->tcp_pseudo_csum =
1833 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1834 ip_hdr(skb)->daddr,
1835 0, IPPROTO_TCP, 0));
1836
1837 } else
1838 pbd->tcp_pseudo_csum =
1839 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1840 &ipv6_hdr(skb)->daddr,
1841 0, IPPROTO_TCP, 0));
1842
1843 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1844}
f85582f8 1845
f2e0899f
DK
1846/**
1847 *
1848 * @param skb
1849 * @param tx_start_bd
1850 * @param pbd_e2
1851 * @param xmit_type
1852 *
1853 * @return header len
1854 */
1855static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1856 struct eth_tx_parse_bd_e2 *pbd,
1857 u32 xmit_type)
1858{
1859 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1860 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1861
1862 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1863 skb->data) / 2) <<
1864 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1865
1866 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1867}
1868
1869/**
1870 *
1871 * @param skb
1872 * @param tx_start_bd
1873 * @param pbd
1874 * @param xmit_type
1875 *
1876 * @return Header length
1877 */
1878static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1879 struct eth_tx_parse_bd_e1x *pbd,
1880 u32 xmit_type)
1881{
1882 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1883
1884 /* for now NS flag is not used in Linux */
1885 pbd->global_data =
1886 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1887 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1888
1889 pbd->ip_hlen_w = (skb_transport_header(skb) -
1890 skb_network_header(skb)) / 2;
1891
1892 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1893
1894 pbd->total_hlen_w = cpu_to_le16(hlen);
1895 hlen = hlen*2;
1896
1897 if (xmit_type & XMIT_CSUM_TCP) {
1898 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1899
1900 } else {
1901 s8 fix = SKB_CS_OFF(skb); /* signed! */
1902
1903 DP(NETIF_MSG_TX_QUEUED,
1904 "hlen %d fix %d csum before fix %x\n",
1905 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1906
1907 /* HW bug: fixup the CSUM */
1908 pbd->tcp_pseudo_csum =
1909 bnx2x_csum_fix(skb_transport_header(skb),
1910 SKB_CS(skb), fix);
1911
1912 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1913 pbd->tcp_pseudo_csum);
1914 }
1915
1916 return hlen;
1917}
f85582f8 1918
9f6c9258
DK
1919/* called with netif_tx_lock
1920 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1921 * netif_wake_queue()
1922 */
1923netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1924{
1925 struct bnx2x *bp = netdev_priv(dev);
1926 struct bnx2x_fastpath *fp;
1927 struct netdev_queue *txq;
1928 struct sw_tx_bd *tx_buf;
1929 struct eth_tx_start_bd *tx_start_bd;
1930 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 1931 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 1932 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
9f6c9258
DK
1933 u16 pkt_prod, bd_prod;
1934 int nbd, fp_index;
1935 dma_addr_t mapping;
1936 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1937 int i;
1938 u8 hlen = 0;
1939 __le16 pkt_size = 0;
1940 struct ethhdr *eth;
1941 u8 mac_type = UNICAST_ADDRESS;
1942
1943#ifdef BNX2X_STOP_ON_ERROR
1944 if (unlikely(bp->panic))
1945 return NETDEV_TX_BUSY;
1946#endif
1947
1948 fp_index = skb_get_queue_mapping(skb);
1949 txq = netdev_get_tx_queue(dev, fp_index);
1950
1951 fp = &bp->fp[fp_index];
1952
1953 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1954 fp->eth_q_stats.driver_xoff++;
1955 netif_tx_stop_queue(txq);
1956 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1957 return NETDEV_TX_BUSY;
1958 }
1959
f2e0899f
DK
1960 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
1961 "protocol(%x,%x) gso type %x xmit_type %x\n",
1962 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
1963 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1964
1965 eth = (struct ethhdr *)skb->data;
1966
1967 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1968 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1969 if (is_broadcast_ether_addr(eth->h_dest))
1970 mac_type = BROADCAST_ADDRESS;
1971 else
1972 mac_type = MULTICAST_ADDRESS;
1973 }
1974
1975#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1976 /* First, check if we need to linearize the skb (due to FW
1977 restrictions). No need to check fragmentation if page size > 8K
1978 (there will be no violation to FW restrictions) */
1979 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1980 /* Statistics of linearization */
1981 bp->lin_cnt++;
1982 if (skb_linearize(skb) != 0) {
1983 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1984 "silently dropping this SKB\n");
1985 dev_kfree_skb_any(skb);
1986 return NETDEV_TX_OK;
1987 }
1988 }
1989#endif
1990
1991 /*
1992 Please read carefully. First we use one BD which we mark as start,
1993 then we have a parsing info BD (used for TSO or xsum),
1994 and only then we have the rest of the TSO BDs.
1995 (don't forget to mark the last one as last,
1996 and to unmap only AFTER you write to the BD ...)
1997 And above all, all pdb sizes are in words - NOT DWORDS!
1998 */
1999
2000 pkt_prod = fp->tx_pkt_prod++;
2001 bd_prod = TX_BD(fp->tx_bd_prod);
2002
2003 /* get a tx_buf and first BD */
2004 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2005 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2006
2007 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8
DK
2008 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2009 mac_type);
2010
9f6c9258 2011 /* header nbd */
f85582f8 2012 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
2013
2014 /* remember the first BD of the packet */
2015 tx_buf->first_bd = fp->tx_bd_prod;
2016 tx_buf->skb = skb;
2017 tx_buf->flags = 0;
2018
2019 DP(NETIF_MSG_TX_QUEUED,
2020 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2021 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2022
2023#ifdef BCM_VLAN
2024 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
2025 (bp->flags & HW_VLAN_TX_FLAG)) {
523224a3
DK
2026 tx_start_bd->vlan_or_ethertype =
2027 cpu_to_le16(vlan_tx_tag_get(skb));
2028 tx_start_bd->bd_flags.as_bitfield |=
2029 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258
DK
2030 } else
2031#endif
523224a3 2032 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
2033
2034 /* turn on parsing and get a BD */
2035 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 2036
523224a3
DK
2037 if (xmit_type & XMIT_CSUM) {
2038 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2039
2040 if (xmit_type & XMIT_CSUM_V4)
2041 tx_start_bd->bd_flags.as_bitfield |=
2042 ETH_TX_BD_FLAGS_IP_CSUM;
2043 else
2044 tx_start_bd->bd_flags.as_bitfield |=
2045 ETH_TX_BD_FLAGS_IPV6;
9f6c9258 2046
523224a3
DK
2047 if (!(xmit_type & XMIT_CSUM_TCP))
2048 tx_start_bd->bd_flags.as_bitfield |=
2049 ETH_TX_BD_FLAGS_IS_UDP;
2050 }
9f6c9258 2051
f2e0899f
DK
2052 if (CHIP_IS_E2(bp)) {
2053 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2054 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2055 /* Set PBD in checksum offload case */
2056 if (xmit_type & XMIT_CSUM)
2057 hlen = bnx2x_set_pbd_csum_e2(bp,
2058 skb, pbd_e2, xmit_type);
2059 } else {
2060 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2061 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2062 /* Set PBD in checksum offload case */
2063 if (xmit_type & XMIT_CSUM)
2064 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 2065
9f6c9258
DK
2066 }
2067
f85582f8 2068 /* Map skb linear data for DMA */
9f6c9258
DK
2069 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2070 skb_headlen(skb), DMA_TO_DEVICE);
2071
f85582f8 2072 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
2073 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2074 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2075 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2076 tx_start_bd->nbd = cpu_to_le16(nbd);
2077 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2078 pkt_size = tx_start_bd->nbytes;
2079
2080 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2081 " nbytes %d flags %x vlan %x\n",
2082 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2083 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
2084 tx_start_bd->bd_flags.as_bitfield,
2085 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
2086
2087 if (xmit_type & XMIT_GSO) {
2088
2089 DP(NETIF_MSG_TX_QUEUED,
2090 "TSO packet len %d hlen %d total len %d tso size %d\n",
2091 skb->len, hlen, skb_headlen(skb),
2092 skb_shinfo(skb)->gso_size);
2093
2094 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2095
2096 if (unlikely(skb_headlen(skb) > hlen))
2097 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2098 hlen, bd_prod, ++nbd);
f2e0899f
DK
2099 if (CHIP_IS_E2(bp))
2100 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2101 else
2102 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258
DK
2103 }
2104 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2105
f85582f8 2106 /* Handle fragmented skb */
9f6c9258
DK
2107 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2108 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2109
2110 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2111 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2112 if (total_pkt_bd == NULL)
2113 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2114
2115 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2116 frag->page_offset,
2117 frag->size, DMA_TO_DEVICE);
2118
2119 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2120 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2121 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2122 le16_add_cpu(&pkt_size, frag->size);
2123
2124 DP(NETIF_MSG_TX_QUEUED,
2125 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2126 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2127 le16_to_cpu(tx_data_bd->nbytes));
2128 }
2129
2130 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2131
2132 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2133
2134 /* now send a tx doorbell, counting the next BD
2135 * if the packet contains or ends with it
2136 */
2137 if (TX_BD_POFF(bd_prod) < nbd)
2138 nbd++;
2139
2140 if (total_pkt_bd != NULL)
2141 total_pkt_bd->total_pkt_bytes = pkt_size;
2142
523224a3 2143 if (pbd_e1x)
9f6c9258 2144 DP(NETIF_MSG_TX_QUEUED,
523224a3 2145 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9f6c9258 2146 " tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
2147 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2148 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2149 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2150 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
2151 if (pbd_e2)
2152 DP(NETIF_MSG_TX_QUEUED,
2153 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2154 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2155 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2156 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2157 pbd_e2->parsing_data);
9f6c9258
DK
2158 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2159
2160 /*
2161 * Make sure that the BD data is updated before updating the producer
2162 * since FW might read the BD right after the producer is updated.
2163 * This is only applicable for weak-ordered memory model archs such
2164 * as IA-64. The following barrier is also mandatory since FW will
2165 * assumes packets must have BDs.
2166 */
2167 wmb();
2168
2169 fp->tx_db.data.prod += nbd;
2170 barrier();
f85582f8 2171
523224a3 2172 DOORBELL(bp, fp->cid, fp->tx_db.raw);
9f6c9258
DK
2173
2174 mmiowb();
2175
2176 fp->tx_bd_prod += nbd;
2177
2178 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2179 netif_tx_stop_queue(txq);
2180
2181 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2182 * ordering of set_bit() in netif_tx_stop_queue() and read of
2183 * fp->bd_tx_cons */
2184 smp_mb();
2185
2186 fp->eth_q_stats.driver_xoff++;
2187 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2188 netif_tx_wake_queue(txq);
2189 }
2190 fp->tx_pkt++;
2191
2192 return NETDEV_TX_OK;
2193}
f85582f8 2194
9f6c9258
DK
2195/* called with rtnl_lock */
2196int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2197{
2198 struct sockaddr *addr = p;
2199 struct bnx2x *bp = netdev_priv(dev);
2200
2201 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2202 return -EINVAL;
2203
2204 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
523224a3
DK
2205 if (netif_running(dev))
2206 bnx2x_set_eth_mac(bp, 1);
9f6c9258
DK
2207
2208 return 0;
2209}
2210
d6214d7a
DK
2211
2212int bnx2x_setup_irqs(struct bnx2x *bp)
2213{
2214 int rc = 0;
2215 if (bp->flags & USING_MSIX_FLAG) {
2216 rc = bnx2x_req_msix_irqs(bp);
2217 if (rc)
2218 return rc;
2219 } else {
2220 bnx2x_ack_int(bp);
2221 rc = bnx2x_req_irq(bp);
2222 if (rc) {
2223 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2224 return rc;
2225 }
2226 if (bp->flags & USING_MSI_FLAG) {
2227 bp->dev->irq = bp->pdev->irq;
2228 netdev_info(bp->dev, "using MSI IRQ %d\n",
2229 bp->pdev->irq);
2230 }
2231 }
2232
2233 return 0;
2234}
2235
523224a3
DK
2236void bnx2x_free_mem_bp(struct bnx2x *bp)
2237{
2238 kfree(bp->fp);
2239 kfree(bp->msix_table);
2240 kfree(bp->ilt);
2241}
2242
2243int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2244{
2245 struct bnx2x_fastpath *fp;
2246 struct msix_entry *tbl;
2247 struct bnx2x_ilt *ilt;
2248
2249 /* fp array */
2250 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2251 if (!fp)
2252 goto alloc_err;
2253 bp->fp = fp;
2254
2255 /* msix table */
2256 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2257 GFP_KERNEL);
2258 if (!tbl)
2259 goto alloc_err;
2260 bp->msix_table = tbl;
2261
2262 /* ilt */
2263 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2264 if (!ilt)
2265 goto alloc_err;
2266 bp->ilt = ilt;
2267
2268 return 0;
2269alloc_err:
2270 bnx2x_free_mem_bp(bp);
2271 return -ENOMEM;
2272
2273}
2274
9f6c9258
DK
2275/* called with rtnl_lock */
2276int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2277{
2278 struct bnx2x *bp = netdev_priv(dev);
2279 int rc = 0;
2280
2281 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2282 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2283 return -EAGAIN;
2284 }
2285
2286 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2287 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2288 return -EINVAL;
2289
2290 /* This does not race with packet allocation
2291 * because the actual alloc size is
2292 * only updated as part of load
2293 */
2294 dev->mtu = new_mtu;
2295
2296 if (netif_running(dev)) {
2297 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2298 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2299 }
2300
2301 return rc;
2302}
2303
2304void bnx2x_tx_timeout(struct net_device *dev)
2305{
2306 struct bnx2x *bp = netdev_priv(dev);
2307
2308#ifdef BNX2X_STOP_ON_ERROR
2309 if (!bp->panic)
2310 bnx2x_panic();
2311#endif
2312 /* This allows the netif to be shutdown gracefully before resetting */
2313 schedule_delayed_work(&bp->reset_task, 0);
2314}
2315
2316#ifdef BCM_VLAN
2317/* called with rtnl_lock */
2318void bnx2x_vlan_rx_register(struct net_device *dev,
2319 struct vlan_group *vlgrp)
2320{
2321 struct bnx2x *bp = netdev_priv(dev);
2322
2323 bp->vlgrp = vlgrp;
9f6c9258
DK
2324}
2325
2326#endif
f85582f8 2327
9f6c9258
DK
2328int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2329{
2330 struct net_device *dev = pci_get_drvdata(pdev);
2331 struct bnx2x *bp;
2332
2333 if (!dev) {
2334 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2335 return -ENODEV;
2336 }
2337 bp = netdev_priv(dev);
2338
2339 rtnl_lock();
2340
2341 pci_save_state(pdev);
2342
2343 if (!netif_running(dev)) {
2344 rtnl_unlock();
2345 return 0;
2346 }
2347
2348 netif_device_detach(dev);
2349
2350 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2351
2352 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2353
2354 rtnl_unlock();
2355
2356 return 0;
2357}
2358
2359int bnx2x_resume(struct pci_dev *pdev)
2360{
2361 struct net_device *dev = pci_get_drvdata(pdev);
2362 struct bnx2x *bp;
2363 int rc;
2364
2365 if (!dev) {
2366 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2367 return -ENODEV;
2368 }
2369 bp = netdev_priv(dev);
2370
2371 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2372 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2373 return -EAGAIN;
2374 }
2375
2376 rtnl_lock();
2377
2378 pci_restore_state(pdev);
2379
2380 if (!netif_running(dev)) {
2381 rtnl_unlock();
2382 return 0;
2383 }
2384
2385 bnx2x_set_power_state(bp, PCI_D0);
2386 netif_device_attach(dev);
2387
f2e0899f
DK
2388 /* Since the chip was reset, clear the FW sequence number */
2389 bp->fw_seq = 0;
9f6c9258
DK
2390 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2391
2392 rtnl_unlock();
2393
2394 return rc;
2395}