]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_cmn.c
bnx2x: Optimized the branching in the bnx2x_rx_int()
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
9f6c9258
DK
18#include <linux/etherdevice.h>
19#include <linux/ip.h>
f2e0899f 20#include <net/ipv6.h>
7f3e01fe 21#include <net/ip6_checksum.h>
6891dd25 22#include <linux/firmware.h>
9f6c9258
DK
23#include "bnx2x_cmn.h"
24
25#ifdef BCM_VLAN
26#include <linux/if_vlan.h>
27#endif
28
523224a3
DK
29#include "bnx2x_init.h"
30
9f6c9258
DK
31
32/* free skb in the packet ring at pos idx
33 * return idx of last bd freed
34 */
35static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
36 u16 idx)
37{
38 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
39 struct eth_tx_start_bd *tx_start_bd;
40 struct eth_tx_bd *tx_data_bd;
41 struct sk_buff *skb = tx_buf->skb;
42 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
43 int nbd;
44
45 /* prefetch skb end pointer to speedup dev_kfree_skb() */
46 prefetch(&skb->end);
47
48 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
49 idx, tx_buf, skb);
50
51 /* unmap first bd */
52 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
53 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
54 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 55 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258
DK
56
57 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
58#ifdef BNX2X_STOP_ON_ERROR
59 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
60 BNX2X_ERR("BAD nbd!\n");
61 bnx2x_panic();
62 }
63#endif
64 new_cons = nbd + tx_buf->first_bd;
65
66 /* Get the next bd */
67 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
68
69 /* Skip a parse bd... */
70 --nbd;
71 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
72
73 /* ...and the TSO split header bd since they have no mapping */
74 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
75 --nbd;
76 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
77 }
78
79 /* now free frags */
80 while (nbd > 0) {
81
82 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
83 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
84 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
85 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
86 if (--nbd)
87 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
88 }
89
90 /* release skb */
91 WARN_ON(!skb);
92 dev_kfree_skb(skb);
93 tx_buf->first_bd = 0;
94 tx_buf->skb = NULL;
95
96 return new_cons;
97}
98
99int bnx2x_tx_int(struct bnx2x_fastpath *fp)
100{
101 struct bnx2x *bp = fp->bp;
102 struct netdev_queue *txq;
103 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
104
105#ifdef BNX2X_STOP_ON_ERROR
106 if (unlikely(bp->panic))
107 return -1;
108#endif
109
110 txq = netdev_get_tx_queue(bp->dev, fp->index);
111 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
112 sw_cons = fp->tx_pkt_cons;
113
114 while (sw_cons != hw_cons) {
115 u16 pkt_cons;
116
117 pkt_cons = TX_BD(sw_cons);
118
f2e0899f
DK
119 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
120 " pkt_cons %u\n",
121 fp->index, hw_cons, sw_cons, pkt_cons);
9f6c9258 122
9f6c9258
DK
123 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
124 sw_cons++;
125 }
126
127 fp->tx_pkt_cons = sw_cons;
128 fp->tx_bd_cons = bd_cons;
129
130 /* Need to make the tx_bd_cons update visible to start_xmit()
131 * before checking for netif_tx_queue_stopped(). Without the
132 * memory barrier, there is a small possibility that
133 * start_xmit() will miss it and cause the queue to be stopped
134 * forever.
135 */
136 smp_mb();
137
9f6c9258
DK
138 if (unlikely(netif_tx_queue_stopped(txq))) {
139 /* Taking tx_lock() is needed to prevent reenabling the queue
140 * while it's empty. This could have happen if rx_action() gets
141 * suspended in bnx2x_tx_int() after the condition before
142 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
143 *
144 * stops the queue->sees fresh tx_bd_cons->releases the queue->
145 * sends some packets consuming the whole queue again->
146 * stops the queue
147 */
148
149 __netif_tx_lock(txq, smp_processor_id());
150
151 if ((netif_tx_queue_stopped(txq)) &&
152 (bp->state == BNX2X_STATE_OPEN) &&
153 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
154 netif_tx_wake_queue(txq);
155
156 __netif_tx_unlock(txq);
157 }
158 return 0;
159}
160
161static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
162 u16 idx)
163{
164 u16 last_max = fp->last_max_sge;
165
166 if (SUB_S16(idx, last_max) > 0)
167 fp->last_max_sge = idx;
168}
169
170static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
171 struct eth_fast_path_rx_cqe *fp_cqe)
172{
173 struct bnx2x *bp = fp->bp;
174 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
175 le16_to_cpu(fp_cqe->len_on_bd)) >>
176 SGE_PAGE_SHIFT;
177 u16 last_max, last_elem, first_elem;
178 u16 delta = 0;
179 u16 i;
180
181 if (!sge_len)
182 return;
183
184 /* First mark all used pages */
185 for (i = 0; i < sge_len; i++)
523224a3
DK
186 SGE_MASK_CLEAR_BIT(fp,
187 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
188
189 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
523224a3 190 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
191
192 /* Here we assume that the last SGE index is the biggest */
193 prefetch((void *)(fp->sge_mask));
523224a3
DK
194 bnx2x_update_last_max_sge(fp,
195 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
196
197 last_max = RX_SGE(fp->last_max_sge);
198 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
199 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
200
201 /* If ring is not full */
202 if (last_elem + 1 != first_elem)
203 last_elem++;
204
205 /* Now update the prod */
206 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
207 if (likely(fp->sge_mask[i]))
208 break;
209
210 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
211 delta += RX_SGE_MASK_ELEM_SZ;
212 }
213
214 if (delta > 0) {
215 fp->rx_sge_prod += delta;
216 /* clear page-end entries */
217 bnx2x_clear_sge_mask_next_elems(fp);
218 }
219
220 DP(NETIF_MSG_RX_STATUS,
221 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
222 fp->last_max_sge, fp->rx_sge_prod);
223}
224
225static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
226 struct sk_buff *skb, u16 cons, u16 prod)
227{
228 struct bnx2x *bp = fp->bp;
229 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
230 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
231 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
232 dma_addr_t mapping;
233
234 /* move empty skb from pool to prod and map it */
235 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
236 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
237 bp->rx_buf_size, DMA_FROM_DEVICE);
238 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
239
240 /* move partial skb from cons to pool (don't unmap yet) */
241 fp->tpa_pool[queue] = *cons_rx_buf;
242
243 /* mark bin state as start - print error if current state != stop */
244 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
245 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
246
247 fp->tpa_state[queue] = BNX2X_TPA_START;
248
249 /* point prod_bd to new skb */
250 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
251 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
252
253#ifdef BNX2X_STOP_ON_ERROR
254 fp->tpa_queue_used |= (1 << queue);
255#ifdef _ASM_GENERIC_INT_L64_H
256 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
257#else
258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
259#endif
260 fp->tpa_queue_used);
261#endif
262}
263
264static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
265 struct sk_buff *skb,
266 struct eth_fast_path_rx_cqe *fp_cqe,
267 u16 cqe_idx)
268{
269 struct sw_rx_page *rx_pg, old_rx_pg;
270 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
271 u32 i, frag_len, frag_size, pages;
272 int err;
273 int j;
274
275 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
276 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
277
278 /* This is needed in order to enable forwarding support */
279 if (frag_size)
280 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
281 max(frag_size, (u32)len_on_bd));
282
283#ifdef BNX2X_STOP_ON_ERROR
284 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
285 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
286 pages, cqe_idx);
287 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
288 fp_cqe->pkt_len, len_on_bd);
289 bnx2x_panic();
290 return -EINVAL;
291 }
292#endif
293
294 /* Run through the SGL and compose the fragmented skb */
295 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
523224a3
DK
296 u16 sge_idx =
297 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
298
299 /* FW gives the indices of the SGE as if the ring is an array
300 (meaning that "next" element will consume 2 indices) */
301 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
302 rx_pg = &fp->rx_page_ring[sge_idx];
303 old_rx_pg = *rx_pg;
304
305 /* If we fail to allocate a substitute page, we simply stop
306 where we are and drop the whole packet */
307 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
308 if (unlikely(err)) {
309 fp->eth_q_stats.rx_skb_alloc_failed++;
310 return err;
311 }
312
313 /* Unmap the page as we r going to pass it to the stack */
314 dma_unmap_page(&bp->pdev->dev,
315 dma_unmap_addr(&old_rx_pg, mapping),
316 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
317
318 /* Add one frag and update the appropriate fields in the skb */
319 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
320
321 skb->data_len += frag_len;
322 skb->truesize += frag_len;
323 skb->len += frag_len;
324
325 frag_size -= frag_len;
326 }
327
328 return 0;
329}
330
331static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
332 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
333 u16 cqe_idx)
334{
335 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
336 struct sk_buff *skb = rx_buf->skb;
337 /* alloc new skb */
338 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
339
340 /* Unmap skb in the pool anyway, as we are going to change
341 pool entry status to BNX2X_TPA_STOP even if new skb allocation
342 fails. */
343 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
344 bp->rx_buf_size, DMA_FROM_DEVICE);
345
346 if (likely(new_skb)) {
347 /* fix ip xsum and give it to the stack */
348 /* (no need to map the new skb) */
349#ifdef BCM_VLAN
350 int is_vlan_cqe =
351 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
352 PARSING_FLAGS_VLAN);
353 int is_not_hwaccel_vlan_cqe =
354 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
355#endif
356
357 prefetch(skb);
217de5aa 358 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
359
360#ifdef BNX2X_STOP_ON_ERROR
361 if (pad + len > bp->rx_buf_size) {
362 BNX2X_ERR("skb_put is about to fail... "
363 "pad %d len %d rx_buf_size %d\n",
364 pad, len, bp->rx_buf_size);
365 bnx2x_panic();
366 return;
367 }
368#endif
369
370 skb_reserve(skb, pad);
371 skb_put(skb, len);
372
373 skb->protocol = eth_type_trans(skb, bp->dev);
374 skb->ip_summed = CHECKSUM_UNNECESSARY;
375
376 {
377 struct iphdr *iph;
378
379 iph = (struct iphdr *)skb->data;
380#ifdef BCM_VLAN
381 /* If there is no Rx VLAN offloading -
382 take VLAN tag into an account */
383 if (unlikely(is_not_hwaccel_vlan_cqe))
384 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
385#endif
386 iph->check = 0;
387 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
388 }
389
390 if (!bnx2x_fill_frag_skb(bp, fp, skb,
391 &cqe->fast_path_cqe, cqe_idx)) {
392#ifdef BCM_VLAN
523224a3
DK
393 if ((bp->vlgrp != NULL) &&
394 (le16_to_cpu(cqe->fast_path_cqe.
395 pars_flags.flags) & PARSING_FLAGS_VLAN))
9f6c9258
DK
396 vlan_gro_receive(&fp->napi, bp->vlgrp,
397 le16_to_cpu(cqe->fast_path_cqe.
398 vlan_tag), skb);
399 else
400#endif
401 napi_gro_receive(&fp->napi, skb);
402 } else {
403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
404 " - dropping packet!\n");
405 dev_kfree_skb(skb);
406 }
407
408
409 /* put new skb in bin */
410 fp->tpa_pool[queue].skb = new_skb;
411
412 } else {
413 /* else drop the packet and keep the buffer in the bin */
414 DP(NETIF_MSG_RX_STATUS,
415 "Failed to allocate new skb - dropping packet!\n");
416 fp->eth_q_stats.rx_skb_alloc_failed++;
417 }
418
419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
420}
421
422/* Set Toeplitz hash value in the skb using the value from the
423 * CQE (calculated by HW).
424 */
425static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
426 struct sk_buff *skb)
427{
428 /* Set Toeplitz hash from CQE */
429 if ((bp->dev->features & NETIF_F_RXHASH) &&
430 (cqe->fast_path_cqe.status_flags &
431 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
432 skb->rxhash =
433 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
434}
435
436int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
437{
438 struct bnx2x *bp = fp->bp;
439 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
440 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
441 int rx_pkt = 0;
442
443#ifdef BNX2X_STOP_ON_ERROR
444 if (unlikely(bp->panic))
445 return 0;
446#endif
447
448 /* CQ "next element" is of the size of the regular element,
449 that's why it's ok here */
450 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
451 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
452 hw_comp_cons++;
453
454 bd_cons = fp->rx_bd_cons;
455 bd_prod = fp->rx_bd_prod;
456 bd_prod_fw = bd_prod;
457 sw_comp_cons = fp->rx_comp_cons;
458 sw_comp_prod = fp->rx_comp_prod;
459
460 /* Memory barrier necessary as speculative reads of the rx
461 * buffer can be ahead of the index in the status block
462 */
463 rmb();
464
465 DP(NETIF_MSG_RX_STATUS,
466 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
467 fp->index, hw_comp_cons, sw_comp_cons);
468
469 while (sw_comp_cons != hw_comp_cons) {
470 struct sw_rx_bd *rx_buf = NULL;
471 struct sk_buff *skb;
472 union eth_rx_cqe *cqe;
473 u8 cqe_fp_flags;
474 u16 len, pad;
475
476 comp_ring_cons = RCQ_BD(sw_comp_cons);
477 bd_prod = RX_BD(bd_prod);
478 bd_cons = RX_BD(bd_cons);
479
480 /* Prefetch the page containing the BD descriptor
481 at producer's index. It will be needed when new skb is
482 allocated */
483 prefetch((void *)(PAGE_ALIGN((unsigned long)
484 (&fp->rx_desc_ring[bd_prod])) -
485 PAGE_SIZE + 1));
486
487 cqe = &fp->rx_comp_ring[comp_ring_cons];
488 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
489
490 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
491 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
492 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
493 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
494 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
495 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
496
497 /* is this a slowpath msg? */
498 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
499 bnx2x_sp_event(fp, cqe);
500 goto next_cqe;
501
502 /* this is an rx packet */
503 } else {
504 rx_buf = &fp->rx_buf_ring[bd_cons];
505 skb = rx_buf->skb;
506 prefetch(skb);
507 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
508 pad = cqe->fast_path_cqe.placement_offset;
509
fe78d263
VZ
510 /* - If CQE is marked both TPA_START and TPA_END it is
511 * a non-TPA CQE.
512 * - FP CQE will always have either TPA_START or/and
513 * TPA_STOP flags set.
514 */
9f6c9258
DK
515 if ((!fp->disable_tpa) &&
516 (TPA_TYPE(cqe_fp_flags) !=
517 (TPA_TYPE_START | TPA_TYPE_END))) {
518 u16 queue = cqe->fast_path_cqe.queue_index;
519
520 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
521 DP(NETIF_MSG_RX_STATUS,
522 "calling tpa_start on queue %d\n",
523 queue);
524
525 bnx2x_tpa_start(fp, queue, skb,
526 bd_cons, bd_prod);
527
528 /* Set Toeplitz hash for an LRO skb */
529 bnx2x_set_skb_rxhash(bp, cqe, skb);
530
531 goto next_rx;
fe78d263 532 } else { /* TPA_STOP */
9f6c9258
DK
533 DP(NETIF_MSG_RX_STATUS,
534 "calling tpa_stop on queue %d\n",
535 queue);
536
537 if (!BNX2X_RX_SUM_FIX(cqe))
538 BNX2X_ERR("STOP on none TCP "
539 "data\n");
540
541 /* This is a size of the linear data
542 on this skb */
543 len = le16_to_cpu(cqe->fast_path_cqe.
544 len_on_bd);
545 bnx2x_tpa_stop(bp, fp, queue, pad,
546 len, cqe, comp_ring_cons);
547#ifdef BNX2X_STOP_ON_ERROR
548 if (bp->panic)
549 return 0;
550#endif
551
552 bnx2x_update_sge_prod(fp,
553 &cqe->fast_path_cqe);
554 goto next_cqe;
555 }
556 }
557
558 dma_sync_single_for_device(&bp->pdev->dev,
559 dma_unmap_addr(rx_buf, mapping),
560 pad + RX_COPY_THRESH,
561 DMA_FROM_DEVICE);
217de5aa 562 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
563
564 /* is this an error packet? */
565 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
566 DP(NETIF_MSG_RX_ERR,
567 "ERROR flags %x rx packet %u\n",
568 cqe_fp_flags, sw_comp_cons);
569 fp->eth_q_stats.rx_err_discard_pkt++;
570 goto reuse_rx;
571 }
572
573 /* Since we don't have a jumbo ring
574 * copy small packets if mtu > 1500
575 */
576 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
577 (len <= RX_COPY_THRESH)) {
578 struct sk_buff *new_skb;
579
580 new_skb = netdev_alloc_skb(bp->dev,
581 len + pad);
582 if (new_skb == NULL) {
583 DP(NETIF_MSG_RX_ERR,
584 "ERROR packet dropped "
585 "because of alloc failure\n");
586 fp->eth_q_stats.rx_skb_alloc_failed++;
587 goto reuse_rx;
588 }
589
590 /* aligned copy */
591 skb_copy_from_linear_data_offset(skb, pad,
592 new_skb->data + pad, len);
593 skb_reserve(new_skb, pad);
594 skb_put(new_skb, len);
595
749a8503 596 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
597
598 skb = new_skb;
599
600 } else
601 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
602 dma_unmap_single(&bp->pdev->dev,
603 dma_unmap_addr(rx_buf, mapping),
604 bp->rx_buf_size,
605 DMA_FROM_DEVICE);
606 skb_reserve(skb, pad);
607 skb_put(skb, len);
608
609 } else {
610 DP(NETIF_MSG_RX_ERR,
611 "ERROR packet dropped because "
612 "of alloc failure\n");
613 fp->eth_q_stats.rx_skb_alloc_failed++;
614reuse_rx:
749a8503 615 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
616 goto next_rx;
617 }
618
619 skb->protocol = eth_type_trans(skb, bp->dev);
620
621 /* Set Toeplitz hash for a none-LRO skb */
622 bnx2x_set_skb_rxhash(bp, cqe, skb);
623
bc8acf2c 624 skb_checksum_none_assert(skb);
f85582f8 625
9f6c9258
DK
626 if (bp->rx_csum) {
627 if (likely(BNX2X_RX_CSUM_OK(cqe)))
628 skb->ip_summed = CHECKSUM_UNNECESSARY;
629 else
630 fp->eth_q_stats.hw_csum_err++;
631 }
632 }
633
634 skb_record_rx_queue(skb, fp->index);
635
636#ifdef BCM_VLAN
637 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
638 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
639 PARSING_FLAGS_VLAN))
640 vlan_gro_receive(&fp->napi, bp->vlgrp,
641 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
642 else
643#endif
644 napi_gro_receive(&fp->napi, skb);
645
646
647next_rx:
648 rx_buf->skb = NULL;
649
650 bd_cons = NEXT_RX_IDX(bd_cons);
651 bd_prod = NEXT_RX_IDX(bd_prod);
652 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
653 rx_pkt++;
654next_cqe:
655 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
656 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
657
658 if (rx_pkt == budget)
659 break;
660 } /* while */
661
662 fp->rx_bd_cons = bd_cons;
663 fp->rx_bd_prod = bd_prod_fw;
664 fp->rx_comp_cons = sw_comp_cons;
665 fp->rx_comp_prod = sw_comp_prod;
666
667 /* Update producers */
668 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
669 fp->rx_sge_prod);
670
671 fp->rx_pkt += rx_pkt;
672 fp->rx_calls++;
673
674 return rx_pkt;
675}
676
677static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
678{
679 struct bnx2x_fastpath *fp = fp_cookie;
680 struct bnx2x *bp = fp->bp;
681
682 /* Return here if interrupt is disabled */
683 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
684 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
685 return IRQ_HANDLED;
686 }
687
523224a3
DK
688 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
689 "[fp %d fw_sd %d igusb %d]\n",
690 fp->index, fp->fw_sb_id, fp->igu_sb_id);
691 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
692
693#ifdef BNX2X_STOP_ON_ERROR
694 if (unlikely(bp->panic))
695 return IRQ_HANDLED;
696#endif
697
698 /* Handle Rx and Tx according to MSI-X vector */
699 prefetch(fp->rx_cons_sb);
700 prefetch(fp->tx_cons_sb);
523224a3 701 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
702 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
703
704 return IRQ_HANDLED;
705}
706
9f6c9258
DK
707/* HW Lock for shared dual port PHYs */
708void bnx2x_acquire_phy_lock(struct bnx2x *bp)
709{
710 mutex_lock(&bp->port.phy_mutex);
711
712 if (bp->port.need_hw_lock)
713 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
714}
715
716void bnx2x_release_phy_lock(struct bnx2x *bp)
717{
718 if (bp->port.need_hw_lock)
719 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
720
721 mutex_unlock(&bp->port.phy_mutex);
722}
723
724void bnx2x_link_report(struct bnx2x *bp)
725{
726 if (bp->flags & MF_FUNC_DIS) {
727 netif_carrier_off(bp->dev);
728 netdev_err(bp->dev, "NIC Link is Down\n");
729 return;
730 }
731
732 if (bp->link_vars.link_up) {
733 u16 line_speed;
734
735 if (bp->state == BNX2X_STATE_OPEN)
736 netif_carrier_on(bp->dev);
737 netdev_info(bp->dev, "NIC Link is Up, ");
738
739 line_speed = bp->link_vars.line_speed;
fb3bff17 740 if (IS_MF(bp)) {
9f6c9258
DK
741 u16 vn_max_rate;
742
743 vn_max_rate =
f2e0899f
DK
744 ((bp->mf_config[BP_VN(bp)] &
745 FUNC_MF_CFG_MAX_BW_MASK) >>
746 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9f6c9258
DK
747 if (vn_max_rate < line_speed)
748 line_speed = vn_max_rate;
749 }
750 pr_cont("%d Mbps ", line_speed);
751
752 if (bp->link_vars.duplex == DUPLEX_FULL)
753 pr_cont("full duplex");
754 else
755 pr_cont("half duplex");
756
757 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
758 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
759 pr_cont(", receive ");
760 if (bp->link_vars.flow_ctrl &
761 BNX2X_FLOW_CTRL_TX)
762 pr_cont("& transmit ");
763 } else {
764 pr_cont(", transmit ");
765 }
766 pr_cont("flow control ON");
767 }
768 pr_cont("\n");
769
770 } else { /* link_down */
771 netif_carrier_off(bp->dev);
772 netdev_err(bp->dev, "NIC Link is Down\n");
773 }
774}
775
523224a3
DK
776/* Returns the number of actually allocated BDs */
777static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
778 int rx_ring_size)
779{
780 struct bnx2x *bp = fp->bp;
781 u16 ring_prod, cqe_ring_prod;
782 int i;
783
784 fp->rx_comp_cons = 0;
785 cqe_ring_prod = ring_prod = 0;
786 for (i = 0; i < rx_ring_size; i++) {
787 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
788 BNX2X_ERR("was only able to allocate "
789 "%d rx skbs on queue[%d]\n", i, fp->index);
790 fp->eth_q_stats.rx_skb_alloc_failed++;
791 break;
792 }
793 ring_prod = NEXT_RX_IDX(ring_prod);
794 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
795 WARN_ON(ring_prod <= i);
796 }
797
798 fp->rx_bd_prod = ring_prod;
799 /* Limit the CQE producer by the CQE ring size */
800 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
801 cqe_ring_prod);
802 fp->rx_pkt = fp->rx_calls = 0;
803
804 return i;
805}
806
807static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
808{
809 struct bnx2x *bp = fp->bp;
810 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
811 MAX_RX_AVAIL/bp->num_queues;
812
813 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
814
815 bnx2x_alloc_rx_bds(fp, rx_ring_size);
816
817 /* Warning!
818 * this will generate an interrupt (to the TSTORM)
819 * must only be done after chip is initialized
820 */
821 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
822 fp->rx_sge_prod);
823}
824
9f6c9258
DK
825void bnx2x_init_rx_rings(struct bnx2x *bp)
826{
827 int func = BP_FUNC(bp);
828 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
829 ETH_MAX_AGGREGATION_QUEUES_E1H;
523224a3 830 u16 ring_prod;
9f6c9258 831 int i, j;
25141580 832
523224a3
DK
833 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
834 BNX2X_FW_IP_HDR_ALIGN_PAD;
9f6c9258 835
9f6c9258
DK
836 DP(NETIF_MSG_IFUP,
837 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
838
523224a3
DK
839 for_each_queue(bp, j) {
840 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 841
523224a3 842 if (!fp->disable_tpa) {
9f6c9258
DK
843 for (i = 0; i < max_agg_queues; i++) {
844 fp->tpa_pool[i].skb =
845 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
846 if (!fp->tpa_pool[i].skb) {
847 BNX2X_ERR("Failed to allocate TPA "
848 "skb pool for queue[%d] - "
849 "disabling TPA on this "
850 "queue!\n", j);
851 bnx2x_free_tpa_pool(bp, fp, i);
852 fp->disable_tpa = 1;
853 break;
854 }
855 dma_unmap_addr_set((struct sw_rx_bd *)
856 &bp->fp->tpa_pool[i],
857 mapping, 0);
858 fp->tpa_state[i] = BNX2X_TPA_STOP;
859 }
523224a3
DK
860
861 /* "next page" elements initialization */
862 bnx2x_set_next_page_sgl(fp);
863
864 /* set SGEs bit mask */
865 bnx2x_init_sge_ring_bit_mask(fp);
866
867 /* Allocate SGEs and initialize the ring elements */
868 for (i = 0, ring_prod = 0;
869 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
870
871 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
872 BNX2X_ERR("was only able to allocate "
873 "%d rx sges\n", i);
874 BNX2X_ERR("disabling TPA for"
875 " queue[%d]\n", j);
876 /* Cleanup already allocated elements */
877 bnx2x_free_rx_sge_range(bp,
878 fp, ring_prod);
879 bnx2x_free_tpa_pool(bp,
880 fp, max_agg_queues);
881 fp->disable_tpa = 1;
882 ring_prod = 0;
883 break;
884 }
885 ring_prod = NEXT_SGE_IDX(ring_prod);
886 }
887
888 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
889 }
890 }
891
892 for_each_queue(bp, j) {
893 struct bnx2x_fastpath *fp = &bp->fp[j];
894
895 fp->rx_bd_cons = 0;
9f6c9258 896
523224a3 897 bnx2x_set_next_page_rx_bd(fp);
9f6c9258
DK
898
899 /* CQ ring */
523224a3 900 bnx2x_set_next_page_rx_cq(fp);
9f6c9258
DK
901
902 /* Allocate BDs and initialize BD ring */
523224a3 903 bnx2x_alloc_rx_bd_ring(fp);
9f6c9258 904
9f6c9258
DK
905 if (j != 0)
906 continue;
907
f2e0899f
DK
908 if (!CHIP_IS_E2(bp)) {
909 REG_WR(bp, BAR_USTRORM_INTMEM +
910 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
911 U64_LO(fp->rx_comp_mapping));
912 REG_WR(bp, BAR_USTRORM_INTMEM +
913 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
914 U64_HI(fp->rx_comp_mapping));
915 }
9f6c9258
DK
916 }
917}
f85582f8 918
9f6c9258
DK
919static void bnx2x_free_tx_skbs(struct bnx2x *bp)
920{
921 int i;
922
923 for_each_queue(bp, i) {
924 struct bnx2x_fastpath *fp = &bp->fp[i];
925
926 u16 bd_cons = fp->tx_bd_cons;
927 u16 sw_prod = fp->tx_pkt_prod;
928 u16 sw_cons = fp->tx_pkt_cons;
929
930 while (sw_cons != sw_prod) {
931 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
932 sw_cons++;
933 }
934 }
935}
936
937static void bnx2x_free_rx_skbs(struct bnx2x *bp)
938{
939 int i, j;
940
941 for_each_queue(bp, j) {
942 struct bnx2x_fastpath *fp = &bp->fp[j];
943
944 for (i = 0; i < NUM_RX_BD; i++) {
945 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
946 struct sk_buff *skb = rx_buf->skb;
947
948 if (skb == NULL)
949 continue;
950
951 dma_unmap_single(&bp->pdev->dev,
952 dma_unmap_addr(rx_buf, mapping),
953 bp->rx_buf_size, DMA_FROM_DEVICE);
954
955 rx_buf->skb = NULL;
956 dev_kfree_skb(skb);
957 }
958 if (!fp->disable_tpa)
959 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
960 ETH_MAX_AGGREGATION_QUEUES_E1 :
961 ETH_MAX_AGGREGATION_QUEUES_E1H);
962 }
963}
964
965void bnx2x_free_skbs(struct bnx2x *bp)
966{
967 bnx2x_free_tx_skbs(bp);
968 bnx2x_free_rx_skbs(bp);
969}
970
971static void bnx2x_free_msix_irqs(struct bnx2x *bp)
972{
973 int i, offset = 1;
974
975 free_irq(bp->msix_table[0].vector, bp->dev);
976 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
977 bp->msix_table[0].vector);
978
979#ifdef BCM_CNIC
980 offset++;
981#endif
982 for_each_queue(bp, i) {
983 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
984 "state %x\n", i, bp->msix_table[i + offset].vector,
985 bnx2x_fp(bp, i, state));
986
987 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
988 }
989}
990
d6214d7a 991void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 992{
d6214d7a
DK
993 if (bp->flags & USING_MSIX_FLAG)
994 bnx2x_free_msix_irqs(bp);
995 else if (bp->flags & USING_MSI_FLAG)
996 free_irq(bp->pdev->irq, bp->dev);
997 else
9f6c9258
DK
998 free_irq(bp->pdev->irq, bp->dev);
999}
1000
d6214d7a 1001int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1002{
d6214d7a 1003 int msix_vec = 0, i, rc, req_cnt;
9f6c9258 1004
d6214d7a
DK
1005 bp->msix_table[msix_vec].entry = msix_vec;
1006 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1007 bp->msix_table[0].entry);
1008 msix_vec++;
9f6c9258
DK
1009
1010#ifdef BCM_CNIC
d6214d7a
DK
1011 bp->msix_table[msix_vec].entry = msix_vec;
1012 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1013 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1014 msix_vec++;
9f6c9258
DK
1015#endif
1016 for_each_queue(bp, i) {
d6214d7a 1017 bp->msix_table[msix_vec].entry = msix_vec;
9f6c9258 1018 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
d6214d7a
DK
1019 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1020 msix_vec++;
9f6c9258
DK
1021 }
1022
d6214d7a
DK
1023 req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1024
1025 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
9f6c9258
DK
1026
1027 /*
1028 * reconfigure number of tx/rx queues according to available
1029 * MSI-X vectors
1030 */
1031 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
d6214d7a
DK
1032 /* how less vectors we will have? */
1033 int diff = req_cnt - rc;
9f6c9258
DK
1034
1035 DP(NETIF_MSG_IFUP,
1036 "Trying to use less MSI-X vectors: %d\n", rc);
1037
1038 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1039
1040 if (rc) {
1041 DP(NETIF_MSG_IFUP,
1042 "MSI-X is not attainable rc %d\n", rc);
1043 return rc;
1044 }
d6214d7a
DK
1045 /*
1046 * decrease number of queues by number of unallocated entries
1047 */
1048 bp->num_queues -= diff;
9f6c9258
DK
1049
1050 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1051 bp->num_queues);
1052 } else if (rc) {
d6214d7a
DK
1053 /* fall to INTx if not enough memory */
1054 if (rc == -ENOMEM)
1055 bp->flags |= DISABLE_MSI_FLAG;
9f6c9258
DK
1056 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1057 return rc;
1058 }
1059
1060 bp->flags |= USING_MSIX_FLAG;
1061
1062 return 0;
1063}
1064
1065static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1066{
1067 int i, rc, offset = 1;
1068
1069 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1070 bp->dev->name, bp->dev);
1071 if (rc) {
1072 BNX2X_ERR("request sp irq failed\n");
1073 return -EBUSY;
1074 }
1075
1076#ifdef BCM_CNIC
1077 offset++;
1078#endif
1079 for_each_queue(bp, i) {
1080 struct bnx2x_fastpath *fp = &bp->fp[i];
1081 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1082 bp->dev->name, i);
1083
d6214d7a 1084 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1085 bnx2x_msix_fp_int, 0, fp->name, fp);
1086 if (rc) {
1087 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1088 bnx2x_free_msix_irqs(bp);
1089 return -EBUSY;
1090 }
1091
d6214d7a 1092 offset++;
9f6c9258
DK
1093 fp->state = BNX2X_FP_STATE_IRQ;
1094 }
1095
1096 i = BNX2X_NUM_QUEUES(bp);
d6214d7a 1097 offset = 1 + CNIC_CONTEXT_USE;
9f6c9258
DK
1098 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1099 " ... fp[%d] %d\n",
1100 bp->msix_table[0].vector,
1101 0, bp->msix_table[offset].vector,
1102 i - 1, bp->msix_table[offset + i - 1].vector);
1103
1104 return 0;
1105}
1106
d6214d7a 1107int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1108{
1109 int rc;
1110
1111 rc = pci_enable_msi(bp->pdev);
1112 if (rc) {
1113 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1114 return -1;
1115 }
1116 bp->flags |= USING_MSI_FLAG;
1117
1118 return 0;
1119}
1120
1121static int bnx2x_req_irq(struct bnx2x *bp)
1122{
1123 unsigned long flags;
1124 int rc;
1125
1126 if (bp->flags & USING_MSI_FLAG)
1127 flags = 0;
1128 else
1129 flags = IRQF_SHARED;
1130
1131 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1132 bp->dev->name, bp->dev);
1133 if (!rc)
1134 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1135
1136 return rc;
1137}
1138
1139static void bnx2x_napi_enable(struct bnx2x *bp)
1140{
1141 int i;
1142
1143 for_each_queue(bp, i)
1144 napi_enable(&bnx2x_fp(bp, i, napi));
1145}
1146
1147static void bnx2x_napi_disable(struct bnx2x *bp)
1148{
1149 int i;
1150
1151 for_each_queue(bp, i)
1152 napi_disable(&bnx2x_fp(bp, i, napi));
1153}
1154
1155void bnx2x_netif_start(struct bnx2x *bp)
1156{
1157 int intr_sem;
1158
1159 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1160 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1161
1162 if (intr_sem) {
1163 if (netif_running(bp->dev)) {
1164 bnx2x_napi_enable(bp);
1165 bnx2x_int_enable(bp);
1166 if (bp->state == BNX2X_STATE_OPEN)
1167 netif_tx_wake_all_queues(bp->dev);
1168 }
1169 }
1170}
1171
1172void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1173{
1174 bnx2x_int_disable_sync(bp, disable_hw);
1175 bnx2x_napi_disable(bp);
1176 netif_tx_disable(bp->dev);
1177}
9f6c9258 1178
d6214d7a
DK
1179void bnx2x_set_num_queues(struct bnx2x *bp)
1180{
1181 switch (bp->multi_mode) {
1182 case ETH_RSS_MODE_DISABLED:
9f6c9258 1183 bp->num_queues = 1;
d6214d7a
DK
1184 break;
1185 case ETH_RSS_MODE_REGULAR:
1186 bp->num_queues = bnx2x_calc_num_queues(bp);
9f6c9258 1187 break;
f85582f8 1188
9f6c9258 1189 default:
d6214d7a 1190 bp->num_queues = 1;
9f6c9258
DK
1191 break;
1192 }
9f6c9258
DK
1193}
1194
6891dd25
DK
1195static void bnx2x_release_firmware(struct bnx2x *bp)
1196{
1197 kfree(bp->init_ops_offsets);
1198 kfree(bp->init_ops);
1199 kfree(bp->init_data);
1200 release_firmware(bp->firmware);
1201}
1202
9f6c9258
DK
1203/* must be called with rtnl_lock */
1204int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1205{
1206 u32 load_code;
1207 int i, rc;
1208
6891dd25
DK
1209 /* Set init arrays */
1210 rc = bnx2x_init_firmware(bp);
1211 if (rc) {
1212 BNX2X_ERR("Error loading firmware\n");
1213 return rc;
1214 }
1215
9f6c9258
DK
1216#ifdef BNX2X_STOP_ON_ERROR
1217 if (unlikely(bp->panic))
1218 return -EPERM;
1219#endif
1220
1221 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1222
523224a3
DK
1223 /* must be called before memory allocation and HW init */
1224 bnx2x_ilt_set_info(bp);
1225
d6214d7a 1226 if (bnx2x_alloc_mem(bp))
9f6c9258 1227 return -ENOMEM;
d6214d7a
DK
1228
1229 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1230 rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1231 if (rc) {
1232 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1233 goto load_error0;
9f6c9258
DK
1234 }
1235
1236 for_each_queue(bp, i)
1237 bnx2x_fp(bp, i, disable_tpa) =
1238 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1239
9f6c9258
DK
1240 bnx2x_napi_enable(bp);
1241
9f6c9258
DK
1242 /* Send LOAD_REQUEST command to MCP
1243 Returns the type of LOAD command:
1244 if it is the first port to be initialized
1245 common blocks should be initialized, otherwise - not
1246 */
1247 if (!BP_NOMCP(bp)) {
a22f0788 1248 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
9f6c9258
DK
1249 if (!load_code) {
1250 BNX2X_ERR("MCP response failure, aborting\n");
1251 rc = -EBUSY;
d6214d7a 1252 goto load_error1;
9f6c9258
DK
1253 }
1254 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1255 rc = -EBUSY; /* other port in diagnostic mode */
d6214d7a 1256 goto load_error1;
9f6c9258
DK
1257 }
1258
1259 } else {
f2e0899f 1260 int path = BP_PATH(bp);
9f6c9258
DK
1261 int port = BP_PORT(bp);
1262
f2e0899f
DK
1263 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1264 path, load_count[path][0], load_count[path][1],
1265 load_count[path][2]);
1266 load_count[path][0]++;
1267 load_count[path][1 + port]++;
1268 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1269 path, load_count[path][0], load_count[path][1],
1270 load_count[path][2]);
1271 if (load_count[path][0] == 1)
9f6c9258 1272 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 1273 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
1274 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1275 else
1276 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1277 }
1278
1279 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 1280 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
9f6c9258
DK
1281 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1282 bp->port.pmf = 1;
1283 else
1284 bp->port.pmf = 0;
1285 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1286
1287 /* Initialize HW */
1288 rc = bnx2x_init_hw(bp, load_code);
1289 if (rc) {
1290 BNX2X_ERR("HW init failed, aborting\n");
a22f0788
YR
1291 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1292 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1293 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9f6c9258
DK
1294 goto load_error2;
1295 }
1296
d6214d7a
DK
1297 /* Connect to IRQs */
1298 rc = bnx2x_setup_irqs(bp);
523224a3
DK
1299 if (rc) {
1300 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1301 goto load_error2;
1302 }
1303
9f6c9258
DK
1304 /* Setup NIC internals and enable interrupts */
1305 bnx2x_nic_init(bp, load_code);
1306
f2e0899f
DK
1307 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1308 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
9f6c9258
DK
1309 (bp->common.shmem2_base))
1310 SHMEM2_WR(bp, dcc_support,
1311 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1312 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1313
1314 /* Send LOAD_DONE command to MCP */
1315 if (!BP_NOMCP(bp)) {
a22f0788 1316 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1317 if (!load_code) {
1318 BNX2X_ERR("MCP response failure, aborting\n");
1319 rc = -EBUSY;
1320 goto load_error3;
1321 }
1322 }
1323
1324 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1325
523224a3
DK
1326 rc = bnx2x_func_start(bp);
1327 if (rc) {
1328 BNX2X_ERR("Function start failed!\n");
1329#ifndef BNX2X_STOP_ON_ERROR
1330 goto load_error3;
1331#else
1332 bp->panic = 1;
1333 return -EBUSY;
1334#endif
1335 }
1336
1337 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
9f6c9258
DK
1338 if (rc) {
1339 BNX2X_ERR("Setup leading failed!\n");
1340#ifndef BNX2X_STOP_ON_ERROR
1341 goto load_error3;
1342#else
1343 bp->panic = 1;
1344 return -EBUSY;
1345#endif
1346 }
1347
f2e0899f
DK
1348 if (!CHIP_IS_E1(bp) &&
1349 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1350 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1351 bp->flags |= MF_FUNC_DIS;
1352 }
9f6c9258 1353
9f6c9258 1354#ifdef BCM_CNIC
523224a3
DK
1355 /* Enable Timer scan */
1356 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
9f6c9258 1357#endif
f85582f8 1358
523224a3
DK
1359 for_each_nondefault_queue(bp, i) {
1360 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1361 if (rc)
9f6c9258 1362#ifdef BCM_CNIC
523224a3 1363 goto load_error4;
9f6c9258 1364#else
523224a3 1365 goto load_error3;
9f6c9258 1366#endif
523224a3
DK
1367 }
1368
1369 /* Now when Clients are configured we are ready to work */
1370 bp->state = BNX2X_STATE_OPEN;
1371
1372 bnx2x_set_eth_mac(bp, 1);
9f6c9258 1373
9f6c9258
DK
1374 if (bp->port.pmf)
1375 bnx2x_initial_phy_init(bp, load_mode);
1376
1377 /* Start fast path */
1378 switch (load_mode) {
1379 case LOAD_NORMAL:
523224a3
DK
1380 /* Tx queue should be only reenabled */
1381 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1382 /* Initialize the receive filter. */
1383 bnx2x_set_rx_mode(bp->dev);
1384 break;
1385
1386 case LOAD_OPEN:
1387 netif_tx_start_all_queues(bp->dev);
523224a3 1388 smp_mb__after_clear_bit();
9f6c9258
DK
1389 /* Initialize the receive filter. */
1390 bnx2x_set_rx_mode(bp->dev);
1391 break;
1392
1393 case LOAD_DIAG:
1394 /* Initialize the receive filter. */
1395 bnx2x_set_rx_mode(bp->dev);
1396 bp->state = BNX2X_STATE_DIAG;
1397 break;
1398
1399 default:
1400 break;
1401 }
1402
1403 if (!bp->port.pmf)
1404 bnx2x__link_status_update(bp);
1405
1406 /* start the timer */
1407 mod_timer(&bp->timer, jiffies + bp->current_interval);
1408
1409#ifdef BCM_CNIC
1410 bnx2x_setup_cnic_irq_info(bp);
1411 if (bp->state == BNX2X_STATE_OPEN)
1412 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1413#endif
1414 bnx2x_inc_load_cnt(bp);
1415
6891dd25
DK
1416 bnx2x_release_firmware(bp);
1417
9f6c9258
DK
1418 return 0;
1419
1420#ifdef BCM_CNIC
1421load_error4:
1422 /* Disable Timer scan */
1423 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1424#endif
1425load_error3:
1426 bnx2x_int_disable_sync(bp, 1);
d6214d7a 1427
9f6c9258
DK
1428 /* Free SKBs, SGEs, TPA pool and driver internals */
1429 bnx2x_free_skbs(bp);
1430 for_each_queue(bp, i)
1431 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1432
9f6c9258 1433 /* Release IRQs */
d6214d7a
DK
1434 bnx2x_free_irq(bp);
1435load_error2:
1436 if (!BP_NOMCP(bp)) {
1437 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1438 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1439 }
1440
1441 bp->port.pmf = 0;
9f6c9258
DK
1442load_error1:
1443 bnx2x_napi_disable(bp);
d6214d7a 1444load_error0:
9f6c9258
DK
1445 bnx2x_free_mem(bp);
1446
6891dd25
DK
1447 bnx2x_release_firmware(bp);
1448
9f6c9258
DK
1449 return rc;
1450}
1451
1452/* must be called with rtnl_lock */
1453int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1454{
1455 int i;
1456
1457 if (bp->state == BNX2X_STATE_CLOSED) {
1458 /* Interface has been removed - nothing to recover */
1459 bp->recovery_state = BNX2X_RECOVERY_DONE;
1460 bp->is_leader = 0;
1461 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1462 smp_wmb();
1463
1464 return -EINVAL;
1465 }
1466
1467#ifdef BCM_CNIC
1468 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1469#endif
1470 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1471
1472 /* Set "drop all" */
1473 bp->rx_mode = BNX2X_RX_MODE_NONE;
1474 bnx2x_set_storm_rx_mode(bp);
1475
f2e0899f
DK
1476 /* Stop Tx */
1477 bnx2x_tx_disable(bp);
f85582f8 1478
9f6c9258 1479 del_timer_sync(&bp->timer);
f85582f8 1480
f2e0899f 1481 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
9f6c9258 1482 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
9f6c9258 1483
f85582f8 1484 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9f6c9258
DK
1485
1486 /* Cleanup the chip if needed */
1487 if (unload_mode != UNLOAD_RECOVERY)
1488 bnx2x_chip_cleanup(bp, unload_mode);
523224a3
DK
1489 else {
1490 /* Disable HW interrupts, NAPI and Tx */
1491 bnx2x_netif_stop(bp, 1);
1492
1493 /* Release IRQs */
d6214d7a 1494 bnx2x_free_irq(bp);
523224a3 1495 }
9f6c9258
DK
1496
1497 bp->port.pmf = 0;
1498
1499 /* Free SKBs, SGEs, TPA pool and driver internals */
1500 bnx2x_free_skbs(bp);
1501 for_each_queue(bp, i)
1502 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1503
9f6c9258
DK
1504 bnx2x_free_mem(bp);
1505
1506 bp->state = BNX2X_STATE_CLOSED;
1507
1508 /* The last driver must disable a "close the gate" if there is no
1509 * parity attention or "process kill" pending.
1510 */
1511 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1512 bnx2x_reset_is_done(bp))
1513 bnx2x_disable_close_the_gate(bp);
1514
1515 /* Reset MCP mail box sequence if there is on going recovery */
1516 if (unload_mode == UNLOAD_RECOVERY)
1517 bp->fw_seq = 0;
1518
1519 return 0;
1520}
f85582f8 1521
9f6c9258
DK
1522int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1523{
1524 u16 pmcsr;
1525
1526 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1527
1528 switch (state) {
1529 case PCI_D0:
1530 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1531 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1532 PCI_PM_CTRL_PME_STATUS));
1533
1534 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1535 /* delay required during transition out of D3hot */
1536 msleep(20);
1537 break;
1538
1539 case PCI_D3hot:
1540 /* If there are other clients above don't
1541 shut down the power */
1542 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1543 return 0;
1544 /* Don't shut down the power for emulation and FPGA */
1545 if (CHIP_REV_IS_SLOW(bp))
1546 return 0;
1547
1548 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1549 pmcsr |= 3;
1550
1551 if (bp->wol)
1552 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1553
1554 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1555 pmcsr);
1556
1557 /* No more memory access after this point until
1558 * device is brought back to D0.
1559 */
1560 break;
1561
1562 default:
1563 return -EINVAL;
1564 }
1565 return 0;
1566}
1567
9f6c9258
DK
1568/*
1569 * net_device service functions
1570 */
d6214d7a 1571int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
1572{
1573 int work_done = 0;
1574 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1575 napi);
1576 struct bnx2x *bp = fp->bp;
1577
1578 while (1) {
1579#ifdef BNX2X_STOP_ON_ERROR
1580 if (unlikely(bp->panic)) {
1581 napi_complete(napi);
1582 return 0;
1583 }
1584#endif
1585
1586 if (bnx2x_has_tx_work(fp))
1587 bnx2x_tx_int(fp);
1588
1589 if (bnx2x_has_rx_work(fp)) {
1590 work_done += bnx2x_rx_int(fp, budget - work_done);
1591
1592 /* must not complete if we consumed full budget */
1593 if (work_done >= budget)
1594 break;
1595 }
1596
1597 /* Fall out from the NAPI loop if needed */
1598 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1599 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
1600 /* bnx2x_has_rx_work() reads the status block,
1601 * thus we need to ensure that status block indices
1602 * have been actually read (bnx2x_update_fpsb_idx)
1603 * prior to this check (bnx2x_has_rx_work) so that
1604 * we won't write the "newer" value of the status block
1605 * to IGU (if there was a DMA right after
1606 * bnx2x_has_rx_work and if there is no rmb, the memory
1607 * reading (bnx2x_update_fpsb_idx) may be postponed
1608 * to right before bnx2x_ack_sb). In this case there
1609 * will never be another interrupt until there is
1610 * another update of the status block, while there
1611 * is still unhandled work.
1612 */
9f6c9258
DK
1613 rmb();
1614
1615 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1616 napi_complete(napi);
1617 /* Re-enable interrupts */
523224a3
DK
1618 DP(NETIF_MSG_HW,
1619 "Update index to %d\n", fp->fp_hc_idx);
1620 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1621 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
1622 IGU_INT_ENABLE, 1);
1623 break;
1624 }
1625 }
1626 }
1627
1628 return work_done;
1629}
1630
9f6c9258
DK
1631/* we split the first BD into headers and data BDs
1632 * to ease the pain of our fellow microcode engineers
1633 * we use one mapping for both BDs
1634 * So far this has only been observed to happen
1635 * in Other Operating Systems(TM)
1636 */
1637static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1638 struct bnx2x_fastpath *fp,
1639 struct sw_tx_bd *tx_buf,
1640 struct eth_tx_start_bd **tx_bd, u16 hlen,
1641 u16 bd_prod, int nbd)
1642{
1643 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1644 struct eth_tx_bd *d_tx_bd;
1645 dma_addr_t mapping;
1646 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1647
1648 /* first fix first BD */
1649 h_tx_bd->nbd = cpu_to_le16(nbd);
1650 h_tx_bd->nbytes = cpu_to_le16(hlen);
1651
1652 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1653 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1654 h_tx_bd->addr_lo, h_tx_bd->nbd);
1655
1656 /* now get a new data BD
1657 * (after the pbd) and fill it */
1658 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1659 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1660
1661 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1662 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1663
1664 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1665 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1666 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1667
1668 /* this marks the BD as one that has no individual mapping */
1669 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1670
1671 DP(NETIF_MSG_TX_QUEUED,
1672 "TSO split data size is %d (%x:%x)\n",
1673 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1674
1675 /* update tx_bd */
1676 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1677
1678 return bd_prod;
1679}
1680
1681static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1682{
1683 if (fix > 0)
1684 csum = (u16) ~csum_fold(csum_sub(csum,
1685 csum_partial(t_header - fix, fix, 0)));
1686
1687 else if (fix < 0)
1688 csum = (u16) ~csum_fold(csum_add(csum,
1689 csum_partial(t_header, -fix, 0)));
1690
1691 return swab16(csum);
1692}
1693
1694static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1695{
1696 u32 rc;
1697
1698 if (skb->ip_summed != CHECKSUM_PARTIAL)
1699 rc = XMIT_PLAIN;
1700
1701 else {
1702 if (skb->protocol == htons(ETH_P_IPV6)) {
1703 rc = XMIT_CSUM_V6;
1704 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1705 rc |= XMIT_CSUM_TCP;
1706
1707 } else {
1708 rc = XMIT_CSUM_V4;
1709 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1710 rc |= XMIT_CSUM_TCP;
1711 }
1712 }
1713
1714 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1715 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1716
1717 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1718 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1719
1720 return rc;
1721}
1722
1723#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1724/* check if packet requires linearization (packet is too fragmented)
1725 no need to check fragmentation if page size > 8K (there will be no
1726 violation to FW restrictions) */
1727static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1728 u32 xmit_type)
1729{
1730 int to_copy = 0;
1731 int hlen = 0;
1732 int first_bd_sz = 0;
1733
1734 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1735 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1736
1737 if (xmit_type & XMIT_GSO) {
1738 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1739 /* Check if LSO packet needs to be copied:
1740 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1741 int wnd_size = MAX_FETCH_BD - 3;
1742 /* Number of windows to check */
1743 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1744 int wnd_idx = 0;
1745 int frag_idx = 0;
1746 u32 wnd_sum = 0;
1747
1748 /* Headers length */
1749 hlen = (int)(skb_transport_header(skb) - skb->data) +
1750 tcp_hdrlen(skb);
1751
1752 /* Amount of data (w/o headers) on linear part of SKB*/
1753 first_bd_sz = skb_headlen(skb) - hlen;
1754
1755 wnd_sum = first_bd_sz;
1756
1757 /* Calculate the first sum - it's special */
1758 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1759 wnd_sum +=
1760 skb_shinfo(skb)->frags[frag_idx].size;
1761
1762 /* If there was data on linear skb data - check it */
1763 if (first_bd_sz > 0) {
1764 if (unlikely(wnd_sum < lso_mss)) {
1765 to_copy = 1;
1766 goto exit_lbl;
1767 }
1768
1769 wnd_sum -= first_bd_sz;
1770 }
1771
1772 /* Others are easier: run through the frag list and
1773 check all windows */
1774 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1775 wnd_sum +=
1776 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1777
1778 if (unlikely(wnd_sum < lso_mss)) {
1779 to_copy = 1;
1780 break;
1781 }
1782 wnd_sum -=
1783 skb_shinfo(skb)->frags[wnd_idx].size;
1784 }
1785 } else {
1786 /* in non-LSO too fragmented packet should always
1787 be linearized */
1788 to_copy = 1;
1789 }
1790 }
1791
1792exit_lbl:
1793 if (unlikely(to_copy))
1794 DP(NETIF_MSG_TX_QUEUED,
1795 "Linearization IS REQUIRED for %s packet. "
1796 "num_frags %d hlen %d first_bd_sz %d\n",
1797 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1798 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1799
1800 return to_copy;
1801}
1802#endif
1803
f2e0899f
DK
1804static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1805 struct eth_tx_parse_bd_e2 *pbd,
1806 u32 xmit_type)
1807{
1808 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1809 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1810 if ((xmit_type & XMIT_GSO_V6) &&
1811 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1812 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1813}
1814
1815/**
1816 * Update PBD in GSO case.
1817 *
1818 * @param skb
1819 * @param tx_start_bd
1820 * @param pbd
1821 * @param xmit_type
1822 */
1823static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1824 struct eth_tx_parse_bd_e1x *pbd,
1825 u32 xmit_type)
1826{
1827 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1828 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1829 pbd->tcp_flags = pbd_tcp_flags(skb);
1830
1831 if (xmit_type & XMIT_GSO_V4) {
1832 pbd->ip_id = swab16(ip_hdr(skb)->id);
1833 pbd->tcp_pseudo_csum =
1834 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1835 ip_hdr(skb)->daddr,
1836 0, IPPROTO_TCP, 0));
1837
1838 } else
1839 pbd->tcp_pseudo_csum =
1840 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1841 &ipv6_hdr(skb)->daddr,
1842 0, IPPROTO_TCP, 0));
1843
1844 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1845}
f85582f8 1846
f2e0899f
DK
1847/**
1848 *
1849 * @param skb
1850 * @param tx_start_bd
1851 * @param pbd_e2
1852 * @param xmit_type
1853 *
1854 * @return header len
1855 */
1856static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1857 struct eth_tx_parse_bd_e2 *pbd,
1858 u32 xmit_type)
1859{
1860 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1861 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1862
1863 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1864 skb->data) / 2) <<
1865 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1866
1867 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1868}
1869
1870/**
1871 *
1872 * @param skb
1873 * @param tx_start_bd
1874 * @param pbd
1875 * @param xmit_type
1876 *
1877 * @return Header length
1878 */
1879static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1880 struct eth_tx_parse_bd_e1x *pbd,
1881 u32 xmit_type)
1882{
1883 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1884
1885 /* for now NS flag is not used in Linux */
1886 pbd->global_data =
1887 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1888 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1889
1890 pbd->ip_hlen_w = (skb_transport_header(skb) -
1891 skb_network_header(skb)) / 2;
1892
1893 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1894
1895 pbd->total_hlen_w = cpu_to_le16(hlen);
1896 hlen = hlen*2;
1897
1898 if (xmit_type & XMIT_CSUM_TCP) {
1899 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1900
1901 } else {
1902 s8 fix = SKB_CS_OFF(skb); /* signed! */
1903
1904 DP(NETIF_MSG_TX_QUEUED,
1905 "hlen %d fix %d csum before fix %x\n",
1906 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1907
1908 /* HW bug: fixup the CSUM */
1909 pbd->tcp_pseudo_csum =
1910 bnx2x_csum_fix(skb_transport_header(skb),
1911 SKB_CS(skb), fix);
1912
1913 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1914 pbd->tcp_pseudo_csum);
1915 }
1916
1917 return hlen;
1918}
f85582f8 1919
9f6c9258
DK
1920/* called with netif_tx_lock
1921 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1922 * netif_wake_queue()
1923 */
1924netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1925{
1926 struct bnx2x *bp = netdev_priv(dev);
1927 struct bnx2x_fastpath *fp;
1928 struct netdev_queue *txq;
1929 struct sw_tx_bd *tx_buf;
1930 struct eth_tx_start_bd *tx_start_bd;
1931 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 1932 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 1933 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
9f6c9258
DK
1934 u16 pkt_prod, bd_prod;
1935 int nbd, fp_index;
1936 dma_addr_t mapping;
1937 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1938 int i;
1939 u8 hlen = 0;
1940 __le16 pkt_size = 0;
1941 struct ethhdr *eth;
1942 u8 mac_type = UNICAST_ADDRESS;
1943
1944#ifdef BNX2X_STOP_ON_ERROR
1945 if (unlikely(bp->panic))
1946 return NETDEV_TX_BUSY;
1947#endif
1948
1949 fp_index = skb_get_queue_mapping(skb);
1950 txq = netdev_get_tx_queue(dev, fp_index);
1951
1952 fp = &bp->fp[fp_index];
1953
1954 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1955 fp->eth_q_stats.driver_xoff++;
1956 netif_tx_stop_queue(txq);
1957 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1958 return NETDEV_TX_BUSY;
1959 }
1960
f2e0899f
DK
1961 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
1962 "protocol(%x,%x) gso type %x xmit_type %x\n",
1963 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
1964 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1965
1966 eth = (struct ethhdr *)skb->data;
1967
1968 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1969 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1970 if (is_broadcast_ether_addr(eth->h_dest))
1971 mac_type = BROADCAST_ADDRESS;
1972 else
1973 mac_type = MULTICAST_ADDRESS;
1974 }
1975
1976#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1977 /* First, check if we need to linearize the skb (due to FW
1978 restrictions). No need to check fragmentation if page size > 8K
1979 (there will be no violation to FW restrictions) */
1980 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1981 /* Statistics of linearization */
1982 bp->lin_cnt++;
1983 if (skb_linearize(skb) != 0) {
1984 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1985 "silently dropping this SKB\n");
1986 dev_kfree_skb_any(skb);
1987 return NETDEV_TX_OK;
1988 }
1989 }
1990#endif
1991
1992 /*
1993 Please read carefully. First we use one BD which we mark as start,
1994 then we have a parsing info BD (used for TSO or xsum),
1995 and only then we have the rest of the TSO BDs.
1996 (don't forget to mark the last one as last,
1997 and to unmap only AFTER you write to the BD ...)
1998 And above all, all pdb sizes are in words - NOT DWORDS!
1999 */
2000
2001 pkt_prod = fp->tx_pkt_prod++;
2002 bd_prod = TX_BD(fp->tx_bd_prod);
2003
2004 /* get a tx_buf and first BD */
2005 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2006 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2007
2008 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8
DK
2009 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2010 mac_type);
2011
9f6c9258 2012 /* header nbd */
f85582f8 2013 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
2014
2015 /* remember the first BD of the packet */
2016 tx_buf->first_bd = fp->tx_bd_prod;
2017 tx_buf->skb = skb;
2018 tx_buf->flags = 0;
2019
2020 DP(NETIF_MSG_TX_QUEUED,
2021 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2022 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2023
2024#ifdef BCM_VLAN
2025 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
2026 (bp->flags & HW_VLAN_TX_FLAG)) {
523224a3
DK
2027 tx_start_bd->vlan_or_ethertype =
2028 cpu_to_le16(vlan_tx_tag_get(skb));
2029 tx_start_bd->bd_flags.as_bitfield |=
2030 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258
DK
2031 } else
2032#endif
523224a3 2033 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
2034
2035 /* turn on parsing and get a BD */
2036 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 2037
523224a3
DK
2038 if (xmit_type & XMIT_CSUM) {
2039 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2040
2041 if (xmit_type & XMIT_CSUM_V4)
2042 tx_start_bd->bd_flags.as_bitfield |=
2043 ETH_TX_BD_FLAGS_IP_CSUM;
2044 else
2045 tx_start_bd->bd_flags.as_bitfield |=
2046 ETH_TX_BD_FLAGS_IPV6;
9f6c9258 2047
523224a3
DK
2048 if (!(xmit_type & XMIT_CSUM_TCP))
2049 tx_start_bd->bd_flags.as_bitfield |=
2050 ETH_TX_BD_FLAGS_IS_UDP;
2051 }
9f6c9258 2052
f2e0899f
DK
2053 if (CHIP_IS_E2(bp)) {
2054 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2055 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2056 /* Set PBD in checksum offload case */
2057 if (xmit_type & XMIT_CSUM)
2058 hlen = bnx2x_set_pbd_csum_e2(bp,
2059 skb, pbd_e2, xmit_type);
2060 } else {
2061 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2062 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2063 /* Set PBD in checksum offload case */
2064 if (xmit_type & XMIT_CSUM)
2065 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 2066
9f6c9258
DK
2067 }
2068
f85582f8 2069 /* Map skb linear data for DMA */
9f6c9258
DK
2070 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2071 skb_headlen(skb), DMA_TO_DEVICE);
2072
f85582f8 2073 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
2074 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2075 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2076 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2077 tx_start_bd->nbd = cpu_to_le16(nbd);
2078 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2079 pkt_size = tx_start_bd->nbytes;
2080
2081 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2082 " nbytes %d flags %x vlan %x\n",
2083 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2084 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
2085 tx_start_bd->bd_flags.as_bitfield,
2086 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
2087
2088 if (xmit_type & XMIT_GSO) {
2089
2090 DP(NETIF_MSG_TX_QUEUED,
2091 "TSO packet len %d hlen %d total len %d tso size %d\n",
2092 skb->len, hlen, skb_headlen(skb),
2093 skb_shinfo(skb)->gso_size);
2094
2095 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2096
2097 if (unlikely(skb_headlen(skb) > hlen))
2098 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2099 hlen, bd_prod, ++nbd);
f2e0899f
DK
2100 if (CHIP_IS_E2(bp))
2101 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2102 else
2103 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258
DK
2104 }
2105 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2106
f85582f8 2107 /* Handle fragmented skb */
9f6c9258
DK
2108 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2109 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2110
2111 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2112 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2113 if (total_pkt_bd == NULL)
2114 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2115
2116 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2117 frag->page_offset,
2118 frag->size, DMA_TO_DEVICE);
2119
2120 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2121 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2122 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2123 le16_add_cpu(&pkt_size, frag->size);
2124
2125 DP(NETIF_MSG_TX_QUEUED,
2126 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2127 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2128 le16_to_cpu(tx_data_bd->nbytes));
2129 }
2130
2131 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2132
2133 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2134
2135 /* now send a tx doorbell, counting the next BD
2136 * if the packet contains or ends with it
2137 */
2138 if (TX_BD_POFF(bd_prod) < nbd)
2139 nbd++;
2140
2141 if (total_pkt_bd != NULL)
2142 total_pkt_bd->total_pkt_bytes = pkt_size;
2143
523224a3 2144 if (pbd_e1x)
9f6c9258 2145 DP(NETIF_MSG_TX_QUEUED,
523224a3 2146 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9f6c9258 2147 " tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
2148 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2149 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2150 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2151 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
2152 if (pbd_e2)
2153 DP(NETIF_MSG_TX_QUEUED,
2154 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2155 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2156 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2157 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2158 pbd_e2->parsing_data);
9f6c9258
DK
2159 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2160
2161 /*
2162 * Make sure that the BD data is updated before updating the producer
2163 * since FW might read the BD right after the producer is updated.
2164 * This is only applicable for weak-ordered memory model archs such
2165 * as IA-64. The following barrier is also mandatory since FW will
2166 * assumes packets must have BDs.
2167 */
2168 wmb();
2169
2170 fp->tx_db.data.prod += nbd;
2171 barrier();
f85582f8 2172
523224a3 2173 DOORBELL(bp, fp->cid, fp->tx_db.raw);
9f6c9258
DK
2174
2175 mmiowb();
2176
2177 fp->tx_bd_prod += nbd;
2178
2179 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2180 netif_tx_stop_queue(txq);
2181
2182 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2183 * ordering of set_bit() in netif_tx_stop_queue() and read of
2184 * fp->bd_tx_cons */
2185 smp_mb();
2186
2187 fp->eth_q_stats.driver_xoff++;
2188 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2189 netif_tx_wake_queue(txq);
2190 }
2191 fp->tx_pkt++;
2192
2193 return NETDEV_TX_OK;
2194}
f85582f8 2195
9f6c9258
DK
2196/* called with rtnl_lock */
2197int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2198{
2199 struct sockaddr *addr = p;
2200 struct bnx2x *bp = netdev_priv(dev);
2201
2202 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2203 return -EINVAL;
2204
2205 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
523224a3
DK
2206 if (netif_running(dev))
2207 bnx2x_set_eth_mac(bp, 1);
9f6c9258
DK
2208
2209 return 0;
2210}
2211
d6214d7a
DK
2212
2213int bnx2x_setup_irqs(struct bnx2x *bp)
2214{
2215 int rc = 0;
2216 if (bp->flags & USING_MSIX_FLAG) {
2217 rc = bnx2x_req_msix_irqs(bp);
2218 if (rc)
2219 return rc;
2220 } else {
2221 bnx2x_ack_int(bp);
2222 rc = bnx2x_req_irq(bp);
2223 if (rc) {
2224 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2225 return rc;
2226 }
2227 if (bp->flags & USING_MSI_FLAG) {
2228 bp->dev->irq = bp->pdev->irq;
2229 netdev_info(bp->dev, "using MSI IRQ %d\n",
2230 bp->pdev->irq);
2231 }
2232 }
2233
2234 return 0;
2235}
2236
523224a3
DK
2237void bnx2x_free_mem_bp(struct bnx2x *bp)
2238{
2239 kfree(bp->fp);
2240 kfree(bp->msix_table);
2241 kfree(bp->ilt);
2242}
2243
2244int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2245{
2246 struct bnx2x_fastpath *fp;
2247 struct msix_entry *tbl;
2248 struct bnx2x_ilt *ilt;
2249
2250 /* fp array */
2251 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2252 if (!fp)
2253 goto alloc_err;
2254 bp->fp = fp;
2255
2256 /* msix table */
2257 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2258 GFP_KERNEL);
2259 if (!tbl)
2260 goto alloc_err;
2261 bp->msix_table = tbl;
2262
2263 /* ilt */
2264 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2265 if (!ilt)
2266 goto alloc_err;
2267 bp->ilt = ilt;
2268
2269 return 0;
2270alloc_err:
2271 bnx2x_free_mem_bp(bp);
2272 return -ENOMEM;
2273
2274}
2275
9f6c9258
DK
2276/* called with rtnl_lock */
2277int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2278{
2279 struct bnx2x *bp = netdev_priv(dev);
2280 int rc = 0;
2281
2282 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2283 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2284 return -EAGAIN;
2285 }
2286
2287 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2288 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2289 return -EINVAL;
2290
2291 /* This does not race with packet allocation
2292 * because the actual alloc size is
2293 * only updated as part of load
2294 */
2295 dev->mtu = new_mtu;
2296
2297 if (netif_running(dev)) {
2298 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2299 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2300 }
2301
2302 return rc;
2303}
2304
2305void bnx2x_tx_timeout(struct net_device *dev)
2306{
2307 struct bnx2x *bp = netdev_priv(dev);
2308
2309#ifdef BNX2X_STOP_ON_ERROR
2310 if (!bp->panic)
2311 bnx2x_panic();
2312#endif
2313 /* This allows the netif to be shutdown gracefully before resetting */
2314 schedule_delayed_work(&bp->reset_task, 0);
2315}
2316
2317#ifdef BCM_VLAN
2318/* called with rtnl_lock */
2319void bnx2x_vlan_rx_register(struct net_device *dev,
2320 struct vlan_group *vlgrp)
2321{
2322 struct bnx2x *bp = netdev_priv(dev);
2323
2324 bp->vlgrp = vlgrp;
9f6c9258
DK
2325}
2326
2327#endif
f85582f8 2328
9f6c9258
DK
2329int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2330{
2331 struct net_device *dev = pci_get_drvdata(pdev);
2332 struct bnx2x *bp;
2333
2334 if (!dev) {
2335 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2336 return -ENODEV;
2337 }
2338 bp = netdev_priv(dev);
2339
2340 rtnl_lock();
2341
2342 pci_save_state(pdev);
2343
2344 if (!netif_running(dev)) {
2345 rtnl_unlock();
2346 return 0;
2347 }
2348
2349 netif_device_detach(dev);
2350
2351 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2352
2353 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2354
2355 rtnl_unlock();
2356
2357 return 0;
2358}
2359
2360int bnx2x_resume(struct pci_dev *pdev)
2361{
2362 struct net_device *dev = pci_get_drvdata(pdev);
2363 struct bnx2x *bp;
2364 int rc;
2365
2366 if (!dev) {
2367 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2368 return -ENODEV;
2369 }
2370 bp = netdev_priv(dev);
2371
2372 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2373 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2374 return -EAGAIN;
2375 }
2376
2377 rtnl_lock();
2378
2379 pci_restore_state(pdev);
2380
2381 if (!netif_running(dev)) {
2382 rtnl_unlock();
2383 return 0;
2384 }
2385
2386 bnx2x_set_power_state(bp, PCI_D0);
2387 netif_device_attach(dev);
2388
f2e0899f
DK
2389 /* Since the chip was reset, clear the FW sequence number */
2390 bp->fw_seq = 0;
9f6c9258
DK
2391 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2392
2393 rtnl_unlock();
2394
2395 return rc;
2396}