]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_cmn.c
bnx2x: move msix table initialization to probe()
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18
19#include <linux/etherdevice.h>
20#include <linux/ip.h>
f2e0899f 21#include <net/ipv6.h>
7f3e01fe 22#include <net/ip6_checksum.h>
6891dd25 23#include <linux/firmware.h>
9f6c9258
DK
24#include "bnx2x_cmn.h"
25
26#ifdef BCM_VLAN
27#include <linux/if_vlan.h>
28#endif
29
523224a3
DK
30#include "bnx2x_init.h"
31
9f6c9258
DK
32
33/* free skb in the packet ring at pos idx
34 * return idx of last bd freed
35 */
36static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
37 u16 idx)
38{
39 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
40 struct eth_tx_start_bd *tx_start_bd;
41 struct eth_tx_bd *tx_data_bd;
42 struct sk_buff *skb = tx_buf->skb;
43 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
44 int nbd;
45
46 /* prefetch skb end pointer to speedup dev_kfree_skb() */
47 prefetch(&skb->end);
48
49 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
50 idx, tx_buf, skb);
51
52 /* unmap first bd */
53 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
54 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
55 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 56 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258
DK
57
58 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
59#ifdef BNX2X_STOP_ON_ERROR
60 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
61 BNX2X_ERR("BAD nbd!\n");
62 bnx2x_panic();
63 }
64#endif
65 new_cons = nbd + tx_buf->first_bd;
66
67 /* Get the next bd */
68 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
69
70 /* Skip a parse bd... */
71 --nbd;
72 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
73
74 /* ...and the TSO split header bd since they have no mapping */
75 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
76 --nbd;
77 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
78 }
79
80 /* now free frags */
81 while (nbd > 0) {
82
83 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
84 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
85 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
86 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
87 if (--nbd)
88 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
89 }
90
91 /* release skb */
92 WARN_ON(!skb);
93 dev_kfree_skb(skb);
94 tx_buf->first_bd = 0;
95 tx_buf->skb = NULL;
96
97 return new_cons;
98}
99
100int bnx2x_tx_int(struct bnx2x_fastpath *fp)
101{
102 struct bnx2x *bp = fp->bp;
103 struct netdev_queue *txq;
104 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
105
106#ifdef BNX2X_STOP_ON_ERROR
107 if (unlikely(bp->panic))
108 return -1;
109#endif
110
111 txq = netdev_get_tx_queue(bp->dev, fp->index);
112 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
113 sw_cons = fp->tx_pkt_cons;
114
115 while (sw_cons != hw_cons) {
116 u16 pkt_cons;
117
118 pkt_cons = TX_BD(sw_cons);
119
f2e0899f
DK
120 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
121 " pkt_cons %u\n",
122 fp->index, hw_cons, sw_cons, pkt_cons);
9f6c9258 123
9f6c9258
DK
124 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
125 sw_cons++;
126 }
127
128 fp->tx_pkt_cons = sw_cons;
129 fp->tx_bd_cons = bd_cons;
130
131 /* Need to make the tx_bd_cons update visible to start_xmit()
132 * before checking for netif_tx_queue_stopped(). Without the
133 * memory barrier, there is a small possibility that
134 * start_xmit() will miss it and cause the queue to be stopped
135 * forever.
136 */
137 smp_mb();
138
139 /* TBD need a thresh? */
140 if (unlikely(netif_tx_queue_stopped(txq))) {
141 /* Taking tx_lock() is needed to prevent reenabling the queue
142 * while it's empty. This could have happen if rx_action() gets
143 * suspended in bnx2x_tx_int() after the condition before
144 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
145 *
146 * stops the queue->sees fresh tx_bd_cons->releases the queue->
147 * sends some packets consuming the whole queue again->
148 * stops the queue
149 */
150
151 __netif_tx_lock(txq, smp_processor_id());
152
153 if ((netif_tx_queue_stopped(txq)) &&
154 (bp->state == BNX2X_STATE_OPEN) &&
155 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
156 netif_tx_wake_queue(txq);
157
158 __netif_tx_unlock(txq);
159 }
160 return 0;
161}
162
163static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
164 u16 idx)
165{
166 u16 last_max = fp->last_max_sge;
167
168 if (SUB_S16(idx, last_max) > 0)
169 fp->last_max_sge = idx;
170}
171
172static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
173 struct eth_fast_path_rx_cqe *fp_cqe)
174{
175 struct bnx2x *bp = fp->bp;
176 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
177 le16_to_cpu(fp_cqe->len_on_bd)) >>
178 SGE_PAGE_SHIFT;
179 u16 last_max, last_elem, first_elem;
180 u16 delta = 0;
181 u16 i;
182
183 if (!sge_len)
184 return;
185
186 /* First mark all used pages */
187 for (i = 0; i < sge_len; i++)
523224a3
DK
188 SGE_MASK_CLEAR_BIT(fp,
189 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
190
191 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
523224a3 192 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
193
194 /* Here we assume that the last SGE index is the biggest */
195 prefetch((void *)(fp->sge_mask));
523224a3
DK
196 bnx2x_update_last_max_sge(fp,
197 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
198
199 last_max = RX_SGE(fp->last_max_sge);
200 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
201 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
202
203 /* If ring is not full */
204 if (last_elem + 1 != first_elem)
205 last_elem++;
206
207 /* Now update the prod */
208 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
209 if (likely(fp->sge_mask[i]))
210 break;
211
212 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
213 delta += RX_SGE_MASK_ELEM_SZ;
214 }
215
216 if (delta > 0) {
217 fp->rx_sge_prod += delta;
218 /* clear page-end entries */
219 bnx2x_clear_sge_mask_next_elems(fp);
220 }
221
222 DP(NETIF_MSG_RX_STATUS,
223 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
224 fp->last_max_sge, fp->rx_sge_prod);
225}
226
227static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
228 struct sk_buff *skb, u16 cons, u16 prod)
229{
230 struct bnx2x *bp = fp->bp;
231 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
232 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
233 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
234 dma_addr_t mapping;
235
236 /* move empty skb from pool to prod and map it */
237 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
238 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
239 bp->rx_buf_size, DMA_FROM_DEVICE);
240 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
241
242 /* move partial skb from cons to pool (don't unmap yet) */
243 fp->tpa_pool[queue] = *cons_rx_buf;
244
245 /* mark bin state as start - print error if current state != stop */
246 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
247 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
248
249 fp->tpa_state[queue] = BNX2X_TPA_START;
250
251 /* point prod_bd to new skb */
252 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
253 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
254
255#ifdef BNX2X_STOP_ON_ERROR
256 fp->tpa_queue_used |= (1 << queue);
257#ifdef _ASM_GENERIC_INT_L64_H
258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
259#else
260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
261#endif
262 fp->tpa_queue_used);
263#endif
264}
265
266static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
267 struct sk_buff *skb,
268 struct eth_fast_path_rx_cqe *fp_cqe,
269 u16 cqe_idx)
270{
271 struct sw_rx_page *rx_pg, old_rx_pg;
272 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
273 u32 i, frag_len, frag_size, pages;
274 int err;
275 int j;
276
277 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
278 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
279
280 /* This is needed in order to enable forwarding support */
281 if (frag_size)
282 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
283 max(frag_size, (u32)len_on_bd));
284
285#ifdef BNX2X_STOP_ON_ERROR
286 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
287 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
288 pages, cqe_idx);
289 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
290 fp_cqe->pkt_len, len_on_bd);
291 bnx2x_panic();
292 return -EINVAL;
293 }
294#endif
295
296 /* Run through the SGL and compose the fragmented skb */
297 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
523224a3
DK
298 u16 sge_idx =
299 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
300
301 /* FW gives the indices of the SGE as if the ring is an array
302 (meaning that "next" element will consume 2 indices) */
303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
304 rx_pg = &fp->rx_page_ring[sge_idx];
305 old_rx_pg = *rx_pg;
306
307 /* If we fail to allocate a substitute page, we simply stop
308 where we are and drop the whole packet */
309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
310 if (unlikely(err)) {
311 fp->eth_q_stats.rx_skb_alloc_failed++;
312 return err;
313 }
314
315 /* Unmap the page as we r going to pass it to the stack */
316 dma_unmap_page(&bp->pdev->dev,
317 dma_unmap_addr(&old_rx_pg, mapping),
318 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
319
320 /* Add one frag and update the appropriate fields in the skb */
321 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
322
323 skb->data_len += frag_len;
324 skb->truesize += frag_len;
325 skb->len += frag_len;
326
327 frag_size -= frag_len;
328 }
329
330 return 0;
331}
332
333static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
334 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
335 u16 cqe_idx)
336{
337 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
338 struct sk_buff *skb = rx_buf->skb;
339 /* alloc new skb */
340 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
341
342 /* Unmap skb in the pool anyway, as we are going to change
343 pool entry status to BNX2X_TPA_STOP even if new skb allocation
344 fails. */
345 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
346 bp->rx_buf_size, DMA_FROM_DEVICE);
347
348 if (likely(new_skb)) {
349 /* fix ip xsum and give it to the stack */
350 /* (no need to map the new skb) */
351#ifdef BCM_VLAN
352 int is_vlan_cqe =
353 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
354 PARSING_FLAGS_VLAN);
355 int is_not_hwaccel_vlan_cqe =
356 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
357#endif
358
359 prefetch(skb);
217de5aa 360 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
361
362#ifdef BNX2X_STOP_ON_ERROR
363 if (pad + len > bp->rx_buf_size) {
364 BNX2X_ERR("skb_put is about to fail... "
365 "pad %d len %d rx_buf_size %d\n",
366 pad, len, bp->rx_buf_size);
367 bnx2x_panic();
368 return;
369 }
370#endif
371
372 skb_reserve(skb, pad);
373 skb_put(skb, len);
374
375 skb->protocol = eth_type_trans(skb, bp->dev);
376 skb->ip_summed = CHECKSUM_UNNECESSARY;
377
378 {
379 struct iphdr *iph;
380
381 iph = (struct iphdr *)skb->data;
382#ifdef BCM_VLAN
383 /* If there is no Rx VLAN offloading -
384 take VLAN tag into an account */
385 if (unlikely(is_not_hwaccel_vlan_cqe))
386 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
387#endif
388 iph->check = 0;
389 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
390 }
391
392 if (!bnx2x_fill_frag_skb(bp, fp, skb,
393 &cqe->fast_path_cqe, cqe_idx)) {
394#ifdef BCM_VLAN
523224a3
DK
395 if ((bp->vlgrp != NULL) &&
396 (le16_to_cpu(cqe->fast_path_cqe.
397 pars_flags.flags) & PARSING_FLAGS_VLAN))
9f6c9258
DK
398 vlan_gro_receive(&fp->napi, bp->vlgrp,
399 le16_to_cpu(cqe->fast_path_cqe.
400 vlan_tag), skb);
401 else
402#endif
403 napi_gro_receive(&fp->napi, skb);
404 } else {
405 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
406 " - dropping packet!\n");
407 dev_kfree_skb(skb);
408 }
409
410
411 /* put new skb in bin */
412 fp->tpa_pool[queue].skb = new_skb;
413
414 } else {
415 /* else drop the packet and keep the buffer in the bin */
416 DP(NETIF_MSG_RX_STATUS,
417 "Failed to allocate new skb - dropping packet!\n");
418 fp->eth_q_stats.rx_skb_alloc_failed++;
419 }
420
421 fp->tpa_state[queue] = BNX2X_TPA_STOP;
422}
423
424/* Set Toeplitz hash value in the skb using the value from the
425 * CQE (calculated by HW).
426 */
427static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
428 struct sk_buff *skb)
429{
430 /* Set Toeplitz hash from CQE */
431 if ((bp->dev->features & NETIF_F_RXHASH) &&
432 (cqe->fast_path_cqe.status_flags &
433 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
434 skb->rxhash =
435 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
436}
437
438int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
439{
440 struct bnx2x *bp = fp->bp;
441 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
442 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
443 int rx_pkt = 0;
444
445#ifdef BNX2X_STOP_ON_ERROR
446 if (unlikely(bp->panic))
447 return 0;
448#endif
449
450 /* CQ "next element" is of the size of the regular element,
451 that's why it's ok here */
452 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
453 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
454 hw_comp_cons++;
455
456 bd_cons = fp->rx_bd_cons;
457 bd_prod = fp->rx_bd_prod;
458 bd_prod_fw = bd_prod;
459 sw_comp_cons = fp->rx_comp_cons;
460 sw_comp_prod = fp->rx_comp_prod;
461
462 /* Memory barrier necessary as speculative reads of the rx
463 * buffer can be ahead of the index in the status block
464 */
465 rmb();
466
467 DP(NETIF_MSG_RX_STATUS,
468 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
469 fp->index, hw_comp_cons, sw_comp_cons);
470
471 while (sw_comp_cons != hw_comp_cons) {
472 struct sw_rx_bd *rx_buf = NULL;
473 struct sk_buff *skb;
474 union eth_rx_cqe *cqe;
475 u8 cqe_fp_flags;
476 u16 len, pad;
477
478 comp_ring_cons = RCQ_BD(sw_comp_cons);
479 bd_prod = RX_BD(bd_prod);
480 bd_cons = RX_BD(bd_cons);
481
482 /* Prefetch the page containing the BD descriptor
483 at producer's index. It will be needed when new skb is
484 allocated */
485 prefetch((void *)(PAGE_ALIGN((unsigned long)
486 (&fp->rx_desc_ring[bd_prod])) -
487 PAGE_SIZE + 1));
488
489 cqe = &fp->rx_comp_ring[comp_ring_cons];
490 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
491
492 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
493 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
494 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
495 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
496 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
497 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
498
499 /* is this a slowpath msg? */
500 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
501 bnx2x_sp_event(fp, cqe);
502 goto next_cqe;
503
504 /* this is an rx packet */
505 } else {
506 rx_buf = &fp->rx_buf_ring[bd_cons];
507 skb = rx_buf->skb;
508 prefetch(skb);
509 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
510 pad = cqe->fast_path_cqe.placement_offset;
511
512 /* If CQE is marked both TPA_START and TPA_END
513 it is a non-TPA CQE */
514 if ((!fp->disable_tpa) &&
515 (TPA_TYPE(cqe_fp_flags) !=
516 (TPA_TYPE_START | TPA_TYPE_END))) {
517 u16 queue = cqe->fast_path_cqe.queue_index;
518
519 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
520 DP(NETIF_MSG_RX_STATUS,
521 "calling tpa_start on queue %d\n",
522 queue);
523
524 bnx2x_tpa_start(fp, queue, skb,
525 bd_cons, bd_prod);
526
527 /* Set Toeplitz hash for an LRO skb */
528 bnx2x_set_skb_rxhash(bp, cqe, skb);
529
530 goto next_rx;
531 }
532
533 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
534 DP(NETIF_MSG_RX_STATUS,
535 "calling tpa_stop on queue %d\n",
536 queue);
537
538 if (!BNX2X_RX_SUM_FIX(cqe))
539 BNX2X_ERR("STOP on none TCP "
540 "data\n");
541
542 /* This is a size of the linear data
543 on this skb */
544 len = le16_to_cpu(cqe->fast_path_cqe.
545 len_on_bd);
546 bnx2x_tpa_stop(bp, fp, queue, pad,
547 len, cqe, comp_ring_cons);
548#ifdef BNX2X_STOP_ON_ERROR
549 if (bp->panic)
550 return 0;
551#endif
552
553 bnx2x_update_sge_prod(fp,
554 &cqe->fast_path_cqe);
555 goto next_cqe;
556 }
557 }
558
559 dma_sync_single_for_device(&bp->pdev->dev,
560 dma_unmap_addr(rx_buf, mapping),
561 pad + RX_COPY_THRESH,
562 DMA_FROM_DEVICE);
217de5aa 563 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
564
565 /* is this an error packet? */
566 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
567 DP(NETIF_MSG_RX_ERR,
568 "ERROR flags %x rx packet %u\n",
569 cqe_fp_flags, sw_comp_cons);
570 fp->eth_q_stats.rx_err_discard_pkt++;
571 goto reuse_rx;
572 }
573
574 /* Since we don't have a jumbo ring
575 * copy small packets if mtu > 1500
576 */
577 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
578 (len <= RX_COPY_THRESH)) {
579 struct sk_buff *new_skb;
580
581 new_skb = netdev_alloc_skb(bp->dev,
582 len + pad);
583 if (new_skb == NULL) {
584 DP(NETIF_MSG_RX_ERR,
585 "ERROR packet dropped "
586 "because of alloc failure\n");
587 fp->eth_q_stats.rx_skb_alloc_failed++;
588 goto reuse_rx;
589 }
590
591 /* aligned copy */
592 skb_copy_from_linear_data_offset(skb, pad,
593 new_skb->data + pad, len);
594 skb_reserve(new_skb, pad);
595 skb_put(new_skb, len);
596
749a8503 597 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
598
599 skb = new_skb;
600
601 } else
602 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
603 dma_unmap_single(&bp->pdev->dev,
604 dma_unmap_addr(rx_buf, mapping),
605 bp->rx_buf_size,
606 DMA_FROM_DEVICE);
607 skb_reserve(skb, pad);
608 skb_put(skb, len);
609
610 } else {
611 DP(NETIF_MSG_RX_ERR,
612 "ERROR packet dropped because "
613 "of alloc failure\n");
614 fp->eth_q_stats.rx_skb_alloc_failed++;
615reuse_rx:
749a8503 616 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
617 goto next_rx;
618 }
619
620 skb->protocol = eth_type_trans(skb, bp->dev);
621
622 /* Set Toeplitz hash for a none-LRO skb */
623 bnx2x_set_skb_rxhash(bp, cqe, skb);
624
bc8acf2c 625 skb_checksum_none_assert(skb);
9f6c9258
DK
626 if (bp->rx_csum) {
627 if (likely(BNX2X_RX_CSUM_OK(cqe)))
628 skb->ip_summed = CHECKSUM_UNNECESSARY;
629 else
630 fp->eth_q_stats.hw_csum_err++;
631 }
632 }
633
634 skb_record_rx_queue(skb, fp->index);
635
636#ifdef BCM_VLAN
637 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
638 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
639 PARSING_FLAGS_VLAN))
640 vlan_gro_receive(&fp->napi, bp->vlgrp,
641 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
642 else
643#endif
644 napi_gro_receive(&fp->napi, skb);
645
646
647next_rx:
648 rx_buf->skb = NULL;
649
650 bd_cons = NEXT_RX_IDX(bd_cons);
651 bd_prod = NEXT_RX_IDX(bd_prod);
652 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
653 rx_pkt++;
654next_cqe:
655 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
656 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
657
658 if (rx_pkt == budget)
659 break;
660 } /* while */
661
662 fp->rx_bd_cons = bd_cons;
663 fp->rx_bd_prod = bd_prod_fw;
664 fp->rx_comp_cons = sw_comp_cons;
665 fp->rx_comp_prod = sw_comp_prod;
666
667 /* Update producers */
668 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
669 fp->rx_sge_prod);
670
671 fp->rx_pkt += rx_pkt;
672 fp->rx_calls++;
673
674 return rx_pkt;
675}
676
677static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
678{
679 struct bnx2x_fastpath *fp = fp_cookie;
680 struct bnx2x *bp = fp->bp;
681
682 /* Return here if interrupt is disabled */
683 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
684 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
685 return IRQ_HANDLED;
686 }
687
523224a3
DK
688 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
689 "[fp %d fw_sd %d igusb %d]\n",
690 fp->index, fp->fw_sb_id, fp->igu_sb_id);
691 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
692
693#ifdef BNX2X_STOP_ON_ERROR
694 if (unlikely(bp->panic))
695 return IRQ_HANDLED;
696#endif
697
698 /* Handle Rx and Tx according to MSI-X vector */
699 prefetch(fp->rx_cons_sb);
700 prefetch(fp->tx_cons_sb);
523224a3 701 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
702 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
703
704 return IRQ_HANDLED;
705}
706
707
708/* HW Lock for shared dual port PHYs */
709void bnx2x_acquire_phy_lock(struct bnx2x *bp)
710{
711 mutex_lock(&bp->port.phy_mutex);
712
713 if (bp->port.need_hw_lock)
714 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
715}
716
717void bnx2x_release_phy_lock(struct bnx2x *bp)
718{
719 if (bp->port.need_hw_lock)
720 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
721
722 mutex_unlock(&bp->port.phy_mutex);
723}
724
725void bnx2x_link_report(struct bnx2x *bp)
726{
727 if (bp->flags & MF_FUNC_DIS) {
728 netif_carrier_off(bp->dev);
729 netdev_err(bp->dev, "NIC Link is Down\n");
730 return;
731 }
732
733 if (bp->link_vars.link_up) {
734 u16 line_speed;
735
736 if (bp->state == BNX2X_STATE_OPEN)
737 netif_carrier_on(bp->dev);
738 netdev_info(bp->dev, "NIC Link is Up, ");
739
740 line_speed = bp->link_vars.line_speed;
fb3bff17 741 if (IS_MF(bp)) {
9f6c9258
DK
742 u16 vn_max_rate;
743
744 vn_max_rate =
f2e0899f
DK
745 ((bp->mf_config[BP_VN(bp)] &
746 FUNC_MF_CFG_MAX_BW_MASK) >>
747 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9f6c9258
DK
748 if (vn_max_rate < line_speed)
749 line_speed = vn_max_rate;
750 }
751 pr_cont("%d Mbps ", line_speed);
752
753 if (bp->link_vars.duplex == DUPLEX_FULL)
754 pr_cont("full duplex");
755 else
756 pr_cont("half duplex");
757
758 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
759 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
760 pr_cont(", receive ");
761 if (bp->link_vars.flow_ctrl &
762 BNX2X_FLOW_CTRL_TX)
763 pr_cont("& transmit ");
764 } else {
765 pr_cont(", transmit ");
766 }
767 pr_cont("flow control ON");
768 }
769 pr_cont("\n");
770
771 } else { /* link_down */
772 netif_carrier_off(bp->dev);
773 netdev_err(bp->dev, "NIC Link is Down\n");
774 }
775}
776
523224a3
DK
777/* Returns the number of actually allocated BDs */
778static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
779 int rx_ring_size)
780{
781 struct bnx2x *bp = fp->bp;
782 u16 ring_prod, cqe_ring_prod;
783 int i;
784
785 fp->rx_comp_cons = 0;
786 cqe_ring_prod = ring_prod = 0;
787 for (i = 0; i < rx_ring_size; i++) {
788 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
789 BNX2X_ERR("was only able to allocate "
790 "%d rx skbs on queue[%d]\n", i, fp->index);
791 fp->eth_q_stats.rx_skb_alloc_failed++;
792 break;
793 }
794 ring_prod = NEXT_RX_IDX(ring_prod);
795 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
796 WARN_ON(ring_prod <= i);
797 }
798
799 fp->rx_bd_prod = ring_prod;
800 /* Limit the CQE producer by the CQE ring size */
801 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
802 cqe_ring_prod);
803 fp->rx_pkt = fp->rx_calls = 0;
804
805 return i;
806}
807
808static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
809{
810 struct bnx2x *bp = fp->bp;
811 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
812 MAX_RX_AVAIL/bp->num_queues;
813
814 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
815
816 bnx2x_alloc_rx_bds(fp, rx_ring_size);
817
818 /* Warning!
819 * this will generate an interrupt (to the TSTORM)
820 * must only be done after chip is initialized
821 */
822 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
823 fp->rx_sge_prod);
824}
825
9f6c9258
DK
826void bnx2x_init_rx_rings(struct bnx2x *bp)
827{
828 int func = BP_FUNC(bp);
829 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
830 ETH_MAX_AGGREGATION_QUEUES_E1H;
523224a3 831 u16 ring_prod;
9f6c9258 832 int i, j;
25141580 833
523224a3
DK
834 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
835 BNX2X_FW_IP_HDR_ALIGN_PAD;
9f6c9258 836
9f6c9258
DK
837 DP(NETIF_MSG_IFUP,
838 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
839
523224a3
DK
840 for_each_queue(bp, j) {
841 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 842
523224a3 843 if (!fp->disable_tpa) {
9f6c9258
DK
844 for (i = 0; i < max_agg_queues; i++) {
845 fp->tpa_pool[i].skb =
846 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
847 if (!fp->tpa_pool[i].skb) {
848 BNX2X_ERR("Failed to allocate TPA "
849 "skb pool for queue[%d] - "
850 "disabling TPA on this "
851 "queue!\n", j);
852 bnx2x_free_tpa_pool(bp, fp, i);
853 fp->disable_tpa = 1;
854 break;
855 }
856 dma_unmap_addr_set((struct sw_rx_bd *)
857 &bp->fp->tpa_pool[i],
858 mapping, 0);
859 fp->tpa_state[i] = BNX2X_TPA_STOP;
860 }
523224a3
DK
861
862 /* "next page" elements initialization */
863 bnx2x_set_next_page_sgl(fp);
864
865 /* set SGEs bit mask */
866 bnx2x_init_sge_ring_bit_mask(fp);
867
868 /* Allocate SGEs and initialize the ring elements */
869 for (i = 0, ring_prod = 0;
870 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
871
872 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
873 BNX2X_ERR("was only able to allocate "
874 "%d rx sges\n", i);
875 BNX2X_ERR("disabling TPA for"
876 " queue[%d]\n", j);
877 /* Cleanup already allocated elements */
878 bnx2x_free_rx_sge_range(bp,
879 fp, ring_prod);
880 bnx2x_free_tpa_pool(bp,
881 fp, max_agg_queues);
882 fp->disable_tpa = 1;
883 ring_prod = 0;
884 break;
885 }
886 ring_prod = NEXT_SGE_IDX(ring_prod);
887 }
888
889 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
890 }
891 }
892
893 for_each_queue(bp, j) {
894 struct bnx2x_fastpath *fp = &bp->fp[j];
895
896 fp->rx_bd_cons = 0;
9f6c9258 897
523224a3 898 bnx2x_set_next_page_rx_bd(fp);
9f6c9258
DK
899
900 /* CQ ring */
523224a3 901 bnx2x_set_next_page_rx_cq(fp);
9f6c9258
DK
902
903 /* Allocate BDs and initialize BD ring */
523224a3 904 bnx2x_alloc_rx_bd_ring(fp);
9f6c9258 905
9f6c9258
DK
906 if (j != 0)
907 continue;
908
f2e0899f
DK
909 if (!CHIP_IS_E2(bp)) {
910 REG_WR(bp, BAR_USTRORM_INTMEM +
911 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
912 U64_LO(fp->rx_comp_mapping));
913 REG_WR(bp, BAR_USTRORM_INTMEM +
914 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
915 U64_HI(fp->rx_comp_mapping));
916 }
9f6c9258
DK
917 }
918}
919static void bnx2x_free_tx_skbs(struct bnx2x *bp)
920{
921 int i;
922
923 for_each_queue(bp, i) {
924 struct bnx2x_fastpath *fp = &bp->fp[i];
925
926 u16 bd_cons = fp->tx_bd_cons;
927 u16 sw_prod = fp->tx_pkt_prod;
928 u16 sw_cons = fp->tx_pkt_cons;
929
930 while (sw_cons != sw_prod) {
931 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
932 sw_cons++;
933 }
934 }
935}
936
937static void bnx2x_free_rx_skbs(struct bnx2x *bp)
938{
939 int i, j;
940
941 for_each_queue(bp, j) {
942 struct bnx2x_fastpath *fp = &bp->fp[j];
943
944 for (i = 0; i < NUM_RX_BD; i++) {
945 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
946 struct sk_buff *skb = rx_buf->skb;
947
948 if (skb == NULL)
949 continue;
950
951 dma_unmap_single(&bp->pdev->dev,
952 dma_unmap_addr(rx_buf, mapping),
953 bp->rx_buf_size, DMA_FROM_DEVICE);
954
955 rx_buf->skb = NULL;
956 dev_kfree_skb(skb);
957 }
958 if (!fp->disable_tpa)
959 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
960 ETH_MAX_AGGREGATION_QUEUES_E1 :
961 ETH_MAX_AGGREGATION_QUEUES_E1H);
962 }
963}
964
965void bnx2x_free_skbs(struct bnx2x *bp)
966{
967 bnx2x_free_tx_skbs(bp);
968 bnx2x_free_rx_skbs(bp);
969}
970
971static void bnx2x_free_msix_irqs(struct bnx2x *bp)
972{
973 int i, offset = 1;
974
975 free_irq(bp->msix_table[0].vector, bp->dev);
976 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
977 bp->msix_table[0].vector);
978
979#ifdef BCM_CNIC
980 offset++;
981#endif
982 for_each_queue(bp, i) {
983 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
984 "state %x\n", i, bp->msix_table[i + offset].vector,
985 bnx2x_fp(bp, i, state));
986
987 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
988 }
989}
990
d6214d7a 991void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 992{
d6214d7a
DK
993 if (bp->flags & USING_MSIX_FLAG)
994 bnx2x_free_msix_irqs(bp);
995 else if (bp->flags & USING_MSI_FLAG)
996 free_irq(bp->pdev->irq, bp->dev);
997 else
9f6c9258
DK
998 free_irq(bp->pdev->irq, bp->dev);
999}
1000
d6214d7a 1001int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1002{
d6214d7a 1003 int msix_vec = 0, i, rc, req_cnt;
9f6c9258 1004
d6214d7a
DK
1005 bp->msix_table[msix_vec].entry = msix_vec;
1006 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1007 bp->msix_table[0].entry);
1008 msix_vec++;
9f6c9258
DK
1009
1010#ifdef BCM_CNIC
d6214d7a
DK
1011 bp->msix_table[msix_vec].entry = msix_vec;
1012 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1013 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1014 msix_vec++;
9f6c9258
DK
1015#endif
1016 for_each_queue(bp, i) {
d6214d7a 1017 bp->msix_table[msix_vec].entry = msix_vec;
9f6c9258 1018 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
d6214d7a
DK
1019 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1020 msix_vec++;
9f6c9258
DK
1021 }
1022
d6214d7a
DK
1023 req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1024
1025 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
9f6c9258
DK
1026
1027 /*
1028 * reconfigure number of tx/rx queues according to available
1029 * MSI-X vectors
1030 */
1031 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
d6214d7a
DK
1032 /* how less vectors we will have? */
1033 int diff = req_cnt - rc;
9f6c9258
DK
1034
1035 DP(NETIF_MSG_IFUP,
1036 "Trying to use less MSI-X vectors: %d\n", rc);
1037
1038 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1039
1040 if (rc) {
1041 DP(NETIF_MSG_IFUP,
1042 "MSI-X is not attainable rc %d\n", rc);
1043 return rc;
1044 }
d6214d7a
DK
1045 /*
1046 * decrease number of queues by number of unallocated entries
1047 */
1048 bp->num_queues -= diff;
9f6c9258
DK
1049
1050 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1051 bp->num_queues);
1052 } else if (rc) {
d6214d7a
DK
1053 /* fall to INTx if not enough memory */
1054 if (rc == -ENOMEM)
1055 bp->flags |= DISABLE_MSI_FLAG;
9f6c9258
DK
1056 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1057 return rc;
1058 }
1059
1060 bp->flags |= USING_MSIX_FLAG;
1061
1062 return 0;
1063}
1064
1065static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1066{
1067 int i, rc, offset = 1;
1068
1069 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1070 bp->dev->name, bp->dev);
1071 if (rc) {
1072 BNX2X_ERR("request sp irq failed\n");
1073 return -EBUSY;
1074 }
1075
1076#ifdef BCM_CNIC
1077 offset++;
1078#endif
1079 for_each_queue(bp, i) {
1080 struct bnx2x_fastpath *fp = &bp->fp[i];
1081 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1082 bp->dev->name, i);
1083
d6214d7a 1084 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1085 bnx2x_msix_fp_int, 0, fp->name, fp);
1086 if (rc) {
1087 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1088 bnx2x_free_msix_irqs(bp);
1089 return -EBUSY;
1090 }
1091
d6214d7a 1092 offset++;
9f6c9258
DK
1093 fp->state = BNX2X_FP_STATE_IRQ;
1094 }
1095
1096 i = BNX2X_NUM_QUEUES(bp);
d6214d7a 1097 offset = 1 + CNIC_CONTEXT_USE;
9f6c9258
DK
1098 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1099 " ... fp[%d] %d\n",
1100 bp->msix_table[0].vector,
1101 0, bp->msix_table[offset].vector,
1102 i - 1, bp->msix_table[offset + i - 1].vector);
1103
1104 return 0;
1105}
1106
d6214d7a 1107int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1108{
1109 int rc;
1110
1111 rc = pci_enable_msi(bp->pdev);
1112 if (rc) {
1113 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1114 return -1;
1115 }
1116 bp->flags |= USING_MSI_FLAG;
1117
1118 return 0;
1119}
1120
1121static int bnx2x_req_irq(struct bnx2x *bp)
1122{
1123 unsigned long flags;
1124 int rc;
1125
1126 if (bp->flags & USING_MSI_FLAG)
1127 flags = 0;
1128 else
1129 flags = IRQF_SHARED;
1130
1131 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1132 bp->dev->name, bp->dev);
1133 if (!rc)
1134 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1135
1136 return rc;
1137}
1138
1139static void bnx2x_napi_enable(struct bnx2x *bp)
1140{
1141 int i;
1142
1143 for_each_queue(bp, i)
1144 napi_enable(&bnx2x_fp(bp, i, napi));
1145}
1146
1147static void bnx2x_napi_disable(struct bnx2x *bp)
1148{
1149 int i;
1150
1151 for_each_queue(bp, i)
1152 napi_disable(&bnx2x_fp(bp, i, napi));
1153}
1154
1155void bnx2x_netif_start(struct bnx2x *bp)
1156{
1157 int intr_sem;
1158
1159 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1160 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1161
1162 if (intr_sem) {
1163 if (netif_running(bp->dev)) {
1164 bnx2x_napi_enable(bp);
1165 bnx2x_int_enable(bp);
1166 if (bp->state == BNX2X_STATE_OPEN)
1167 netif_tx_wake_all_queues(bp->dev);
1168 }
1169 }
1170}
1171
1172void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1173{
1174 bnx2x_int_disable_sync(bp, disable_hw);
1175 bnx2x_napi_disable(bp);
1176 netif_tx_disable(bp->dev);
1177}
9f6c9258 1178
d6214d7a
DK
1179void bnx2x_set_num_queues(struct bnx2x *bp)
1180{
1181 switch (bp->multi_mode) {
1182 case ETH_RSS_MODE_DISABLED:
9f6c9258 1183 bp->num_queues = 1;
d6214d7a
DK
1184 break;
1185 case ETH_RSS_MODE_REGULAR:
1186 bp->num_queues = bnx2x_calc_num_queues(bp);
9f6c9258
DK
1187 break;
1188 default:
d6214d7a 1189 bp->num_queues = 1;
9f6c9258
DK
1190 break;
1191 }
9f6c9258
DK
1192}
1193
6891dd25
DK
1194static void bnx2x_release_firmware(struct bnx2x *bp)
1195{
1196 kfree(bp->init_ops_offsets);
1197 kfree(bp->init_ops);
1198 kfree(bp->init_data);
1199 release_firmware(bp->firmware);
1200}
1201
9f6c9258
DK
1202/* must be called with rtnl_lock */
1203int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1204{
1205 u32 load_code;
1206 int i, rc;
1207
6891dd25
DK
1208 /* Set init arrays */
1209 rc = bnx2x_init_firmware(bp);
1210 if (rc) {
1211 BNX2X_ERR("Error loading firmware\n");
1212 return rc;
1213 }
1214
9f6c9258
DK
1215#ifdef BNX2X_STOP_ON_ERROR
1216 if (unlikely(bp->panic))
1217 return -EPERM;
1218#endif
1219
1220 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1221
523224a3
DK
1222 /* must be called before memory allocation and HW init */
1223 bnx2x_ilt_set_info(bp);
1224
d6214d7a 1225 if (bnx2x_alloc_mem(bp))
9f6c9258 1226 return -ENOMEM;
d6214d7a
DK
1227
1228 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1229 rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1230 if (rc) {
1231 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1232 goto load_error0;
9f6c9258
DK
1233 }
1234
1235 for_each_queue(bp, i)
1236 bnx2x_fp(bp, i, disable_tpa) =
1237 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1238
9f6c9258
DK
1239 bnx2x_napi_enable(bp);
1240
9f6c9258
DK
1241 /* Send LOAD_REQUEST command to MCP
1242 Returns the type of LOAD command:
1243 if it is the first port to be initialized
1244 common blocks should be initialized, otherwise - not
1245 */
1246 if (!BP_NOMCP(bp)) {
a22f0788 1247 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
9f6c9258
DK
1248 if (!load_code) {
1249 BNX2X_ERR("MCP response failure, aborting\n");
1250 rc = -EBUSY;
d6214d7a 1251 goto load_error1;
9f6c9258
DK
1252 }
1253 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1254 rc = -EBUSY; /* other port in diagnostic mode */
d6214d7a 1255 goto load_error1;
9f6c9258
DK
1256 }
1257
1258 } else {
f2e0899f 1259 int path = BP_PATH(bp);
9f6c9258
DK
1260 int port = BP_PORT(bp);
1261
f2e0899f
DK
1262 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1263 path, load_count[path][0], load_count[path][1],
1264 load_count[path][2]);
1265 load_count[path][0]++;
1266 load_count[path][1 + port]++;
1267 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1268 path, load_count[path][0], load_count[path][1],
1269 load_count[path][2]);
1270 if (load_count[path][0] == 1)
9f6c9258 1271 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 1272 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
1273 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1274 else
1275 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1276 }
1277
1278 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 1279 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
9f6c9258
DK
1280 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1281 bp->port.pmf = 1;
1282 else
1283 bp->port.pmf = 0;
1284 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1285
1286 /* Initialize HW */
1287 rc = bnx2x_init_hw(bp, load_code);
1288 if (rc) {
1289 BNX2X_ERR("HW init failed, aborting\n");
a22f0788
YR
1290 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1291 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1292 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9f6c9258
DK
1293 goto load_error2;
1294 }
1295
d6214d7a
DK
1296 /* Connect to IRQs */
1297 rc = bnx2x_setup_irqs(bp);
523224a3
DK
1298 if (rc) {
1299 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1300 goto load_error2;
1301 }
1302
9f6c9258
DK
1303 /* Setup NIC internals and enable interrupts */
1304 bnx2x_nic_init(bp, load_code);
1305
f2e0899f
DK
1306 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1307 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
9f6c9258
DK
1308 (bp->common.shmem2_base))
1309 SHMEM2_WR(bp, dcc_support,
1310 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1311 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1312
1313 /* Send LOAD_DONE command to MCP */
1314 if (!BP_NOMCP(bp)) {
a22f0788 1315 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1316 if (!load_code) {
1317 BNX2X_ERR("MCP response failure, aborting\n");
1318 rc = -EBUSY;
1319 goto load_error3;
1320 }
1321 }
1322
1323 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1324
523224a3
DK
1325 rc = bnx2x_func_start(bp);
1326 if (rc) {
1327 BNX2X_ERR("Function start failed!\n");
1328#ifndef BNX2X_STOP_ON_ERROR
1329 goto load_error3;
1330#else
1331 bp->panic = 1;
1332 return -EBUSY;
1333#endif
1334 }
1335
1336 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
9f6c9258
DK
1337 if (rc) {
1338 BNX2X_ERR("Setup leading failed!\n");
1339#ifndef BNX2X_STOP_ON_ERROR
1340 goto load_error3;
1341#else
1342 bp->panic = 1;
1343 return -EBUSY;
1344#endif
1345 }
1346
f2e0899f
DK
1347 if (!CHIP_IS_E1(bp) &&
1348 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1349 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1350 bp->flags |= MF_FUNC_DIS;
1351 }
9f6c9258 1352
9f6c9258 1353#ifdef BCM_CNIC
523224a3
DK
1354 /* Enable Timer scan */
1355 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
9f6c9258 1356#endif
523224a3
DK
1357 for_each_nondefault_queue(bp, i) {
1358 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1359 if (rc)
9f6c9258 1360#ifdef BCM_CNIC
523224a3 1361 goto load_error4;
9f6c9258 1362#else
523224a3 1363 goto load_error3;
9f6c9258 1364#endif
523224a3
DK
1365 }
1366
1367 /* Now when Clients are configured we are ready to work */
1368 bp->state = BNX2X_STATE_OPEN;
1369
1370 bnx2x_set_eth_mac(bp, 1);
9f6c9258 1371
9f6c9258 1372#ifdef BCM_CNIC
523224a3
DK
1373 /* Set iSCSI L2 MAC */
1374 mutex_lock(&bp->cnic_mutex);
1375 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1376 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1377 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1378 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
1379 BNX2X_VF_ID_INVALID, false,
1380 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
9f6c9258 1381 }
523224a3
DK
1382 mutex_unlock(&bp->cnic_mutex);
1383#endif
9f6c9258
DK
1384
1385 if (bp->port.pmf)
1386 bnx2x_initial_phy_init(bp, load_mode);
1387
1388 /* Start fast path */
1389 switch (load_mode) {
1390 case LOAD_NORMAL:
523224a3
DK
1391 /* Tx queue should be only reenabled */
1392 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1393 /* Initialize the receive filter. */
1394 bnx2x_set_rx_mode(bp->dev);
1395 break;
1396
1397 case LOAD_OPEN:
1398 netif_tx_start_all_queues(bp->dev);
523224a3 1399 smp_mb__after_clear_bit();
9f6c9258
DK
1400 /* Initialize the receive filter. */
1401 bnx2x_set_rx_mode(bp->dev);
1402 break;
1403
1404 case LOAD_DIAG:
1405 /* Initialize the receive filter. */
1406 bnx2x_set_rx_mode(bp->dev);
1407 bp->state = BNX2X_STATE_DIAG;
1408 break;
1409
1410 default:
1411 break;
1412 }
1413
1414 if (!bp->port.pmf)
1415 bnx2x__link_status_update(bp);
1416
1417 /* start the timer */
1418 mod_timer(&bp->timer, jiffies + bp->current_interval);
1419
1420#ifdef BCM_CNIC
1421 bnx2x_setup_cnic_irq_info(bp);
1422 if (bp->state == BNX2X_STATE_OPEN)
1423 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1424#endif
1425 bnx2x_inc_load_cnt(bp);
1426
6891dd25
DK
1427 bnx2x_release_firmware(bp);
1428
9f6c9258
DK
1429 return 0;
1430
1431#ifdef BCM_CNIC
1432load_error4:
1433 /* Disable Timer scan */
1434 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1435#endif
1436load_error3:
1437 bnx2x_int_disable_sync(bp, 1);
d6214d7a 1438
9f6c9258
DK
1439 /* Free SKBs, SGEs, TPA pool and driver internals */
1440 bnx2x_free_skbs(bp);
1441 for_each_queue(bp, i)
1442 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1443
9f6c9258 1444 /* Release IRQs */
d6214d7a
DK
1445 bnx2x_free_irq(bp);
1446load_error2:
1447 if (!BP_NOMCP(bp)) {
1448 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1449 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1450 }
1451
1452 bp->port.pmf = 0;
9f6c9258
DK
1453load_error1:
1454 bnx2x_napi_disable(bp);
d6214d7a 1455load_error0:
9f6c9258
DK
1456 bnx2x_free_mem(bp);
1457
6891dd25
DK
1458 bnx2x_release_firmware(bp);
1459
9f6c9258
DK
1460 return rc;
1461}
1462
1463/* must be called with rtnl_lock */
1464int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1465{
1466 int i;
1467
1468 if (bp->state == BNX2X_STATE_CLOSED) {
1469 /* Interface has been removed - nothing to recover */
1470 bp->recovery_state = BNX2X_RECOVERY_DONE;
1471 bp->is_leader = 0;
1472 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1473 smp_wmb();
1474
1475 return -EINVAL;
1476 }
1477
1478#ifdef BCM_CNIC
1479 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1480#endif
1481 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1482
1483 /* Set "drop all" */
1484 bp->rx_mode = BNX2X_RX_MODE_NONE;
1485 bnx2x_set_storm_rx_mode(bp);
1486
f2e0899f
DK
1487 /* Stop Tx */
1488 bnx2x_tx_disable(bp);
9f6c9258 1489 del_timer_sync(&bp->timer);
f2e0899f 1490 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
9f6c9258
DK
1491 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1492 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1493
9f6c9258
DK
1494
1495 /* Cleanup the chip if needed */
1496 if (unload_mode != UNLOAD_RECOVERY)
1497 bnx2x_chip_cleanup(bp, unload_mode);
523224a3
DK
1498 else {
1499 /* Disable HW interrupts, NAPI and Tx */
1500 bnx2x_netif_stop(bp, 1);
1501
1502 /* Release IRQs */
d6214d7a 1503 bnx2x_free_irq(bp);
523224a3 1504 }
9f6c9258
DK
1505
1506 bp->port.pmf = 0;
1507
1508 /* Free SKBs, SGEs, TPA pool and driver internals */
1509 bnx2x_free_skbs(bp);
1510 for_each_queue(bp, i)
1511 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1512
9f6c9258
DK
1513 bnx2x_free_mem(bp);
1514
1515 bp->state = BNX2X_STATE_CLOSED;
1516
1517 /* The last driver must disable a "close the gate" if there is no
1518 * parity attention or "process kill" pending.
1519 */
1520 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1521 bnx2x_reset_is_done(bp))
1522 bnx2x_disable_close_the_gate(bp);
1523
1524 /* Reset MCP mail box sequence if there is on going recovery */
1525 if (unload_mode == UNLOAD_RECOVERY)
1526 bp->fw_seq = 0;
1527
1528 return 0;
1529}
1530int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1531{
1532 u16 pmcsr;
1533
1534 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1535
1536 switch (state) {
1537 case PCI_D0:
1538 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1539 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1540 PCI_PM_CTRL_PME_STATUS));
1541
1542 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1543 /* delay required during transition out of D3hot */
1544 msleep(20);
1545 break;
1546
1547 case PCI_D3hot:
1548 /* If there are other clients above don't
1549 shut down the power */
1550 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1551 return 0;
1552 /* Don't shut down the power for emulation and FPGA */
1553 if (CHIP_REV_IS_SLOW(bp))
1554 return 0;
1555
1556 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1557 pmcsr |= 3;
1558
1559 if (bp->wol)
1560 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1561
1562 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1563 pmcsr);
1564
1565 /* No more memory access after this point until
1566 * device is brought back to D0.
1567 */
1568 break;
1569
1570 default:
1571 return -EINVAL;
1572 }
1573 return 0;
1574}
1575
1576
1577
1578/*
1579 * net_device service functions
1580 */
1581
d6214d7a 1582int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
1583{
1584 int work_done = 0;
1585 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1586 napi);
1587 struct bnx2x *bp = fp->bp;
1588
1589 while (1) {
1590#ifdef BNX2X_STOP_ON_ERROR
1591 if (unlikely(bp->panic)) {
1592 napi_complete(napi);
1593 return 0;
1594 }
1595#endif
1596
1597 if (bnx2x_has_tx_work(fp))
1598 bnx2x_tx_int(fp);
1599
1600 if (bnx2x_has_rx_work(fp)) {
1601 work_done += bnx2x_rx_int(fp, budget - work_done);
1602
1603 /* must not complete if we consumed full budget */
1604 if (work_done >= budget)
1605 break;
1606 }
1607
1608 /* Fall out from the NAPI loop if needed */
1609 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1610 bnx2x_update_fpsb_idx(fp);
523224a3
DK
1611 /* bnx2x_has_rx_work() reads the status block,
1612 * thus we need to ensure that status block indices
1613 * have been actually read (bnx2x_update_fpsb_idx)
1614 * prior to this check (bnx2x_has_rx_work) so that
1615 * we won't write the "newer" value of the status block
1616 * to IGU (if there was a DMA right after
1617 * bnx2x_has_rx_work and if there is no rmb, the memory
1618 * reading (bnx2x_update_fpsb_idx) may be postponed
1619 * to right before bnx2x_ack_sb). In this case there
1620 * will never be another interrupt until there is
1621 * another update of the status block, while there
1622 * is still unhandled work.
9f6c9258
DK
1623 */
1624 rmb();
1625
1626 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1627 napi_complete(napi);
1628 /* Re-enable interrupts */
523224a3
DK
1629 DP(NETIF_MSG_HW,
1630 "Update index to %d\n", fp->fp_hc_idx);
1631 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1632 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
1633 IGU_INT_ENABLE, 1);
1634 break;
1635 }
1636 }
1637 }
1638
1639 return work_done;
1640}
1641
1642
1643/* we split the first BD into headers and data BDs
1644 * to ease the pain of our fellow microcode engineers
1645 * we use one mapping for both BDs
1646 * So far this has only been observed to happen
1647 * in Other Operating Systems(TM)
1648 */
1649static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1650 struct bnx2x_fastpath *fp,
1651 struct sw_tx_bd *tx_buf,
1652 struct eth_tx_start_bd **tx_bd, u16 hlen,
1653 u16 bd_prod, int nbd)
1654{
1655 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1656 struct eth_tx_bd *d_tx_bd;
1657 dma_addr_t mapping;
1658 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1659
1660 /* first fix first BD */
1661 h_tx_bd->nbd = cpu_to_le16(nbd);
1662 h_tx_bd->nbytes = cpu_to_le16(hlen);
1663
1664 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1665 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1666 h_tx_bd->addr_lo, h_tx_bd->nbd);
1667
1668 /* now get a new data BD
1669 * (after the pbd) and fill it */
1670 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1671 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1672
1673 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1674 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1675
1676 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1677 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1678 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1679
1680 /* this marks the BD as one that has no individual mapping */
1681 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1682
1683 DP(NETIF_MSG_TX_QUEUED,
1684 "TSO split data size is %d (%x:%x)\n",
1685 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1686
1687 /* update tx_bd */
1688 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1689
1690 return bd_prod;
1691}
1692
1693static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1694{
1695 if (fix > 0)
1696 csum = (u16) ~csum_fold(csum_sub(csum,
1697 csum_partial(t_header - fix, fix, 0)));
1698
1699 else if (fix < 0)
1700 csum = (u16) ~csum_fold(csum_add(csum,
1701 csum_partial(t_header, -fix, 0)));
1702
1703 return swab16(csum);
1704}
1705
1706static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1707{
1708 u32 rc;
1709
1710 if (skb->ip_summed != CHECKSUM_PARTIAL)
1711 rc = XMIT_PLAIN;
1712
1713 else {
1714 if (skb->protocol == htons(ETH_P_IPV6)) {
1715 rc = XMIT_CSUM_V6;
1716 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1717 rc |= XMIT_CSUM_TCP;
1718
1719 } else {
1720 rc = XMIT_CSUM_V4;
1721 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1722 rc |= XMIT_CSUM_TCP;
1723 }
1724 }
1725
1726 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1727 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1728
1729 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1730 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1731
1732 return rc;
1733}
1734
1735#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1736/* check if packet requires linearization (packet is too fragmented)
1737 no need to check fragmentation if page size > 8K (there will be no
1738 violation to FW restrictions) */
1739static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1740 u32 xmit_type)
1741{
1742 int to_copy = 0;
1743 int hlen = 0;
1744 int first_bd_sz = 0;
1745
1746 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1747 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1748
1749 if (xmit_type & XMIT_GSO) {
1750 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1751 /* Check if LSO packet needs to be copied:
1752 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1753 int wnd_size = MAX_FETCH_BD - 3;
1754 /* Number of windows to check */
1755 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1756 int wnd_idx = 0;
1757 int frag_idx = 0;
1758 u32 wnd_sum = 0;
1759
1760 /* Headers length */
1761 hlen = (int)(skb_transport_header(skb) - skb->data) +
1762 tcp_hdrlen(skb);
1763
1764 /* Amount of data (w/o headers) on linear part of SKB*/
1765 first_bd_sz = skb_headlen(skb) - hlen;
1766
1767 wnd_sum = first_bd_sz;
1768
1769 /* Calculate the first sum - it's special */
1770 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1771 wnd_sum +=
1772 skb_shinfo(skb)->frags[frag_idx].size;
1773
1774 /* If there was data on linear skb data - check it */
1775 if (first_bd_sz > 0) {
1776 if (unlikely(wnd_sum < lso_mss)) {
1777 to_copy = 1;
1778 goto exit_lbl;
1779 }
1780
1781 wnd_sum -= first_bd_sz;
1782 }
1783
1784 /* Others are easier: run through the frag list and
1785 check all windows */
1786 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1787 wnd_sum +=
1788 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1789
1790 if (unlikely(wnd_sum < lso_mss)) {
1791 to_copy = 1;
1792 break;
1793 }
1794 wnd_sum -=
1795 skb_shinfo(skb)->frags[wnd_idx].size;
1796 }
1797 } else {
1798 /* in non-LSO too fragmented packet should always
1799 be linearized */
1800 to_copy = 1;
1801 }
1802 }
1803
1804exit_lbl:
1805 if (unlikely(to_copy))
1806 DP(NETIF_MSG_TX_QUEUED,
1807 "Linearization IS REQUIRED for %s packet. "
1808 "num_frags %d hlen %d first_bd_sz %d\n",
1809 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1810 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1811
1812 return to_copy;
1813}
1814#endif
1815
f2e0899f
DK
1816static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1817 struct eth_tx_parse_bd_e2 *pbd,
1818 u32 xmit_type)
1819{
1820 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1821 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1822 if ((xmit_type & XMIT_GSO_V6) &&
1823 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1824 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1825}
1826
1827/**
1828 * Update PBD in GSO case.
1829 *
1830 * @param skb
1831 * @param tx_start_bd
1832 * @param pbd
1833 * @param xmit_type
1834 */
1835static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1836 struct eth_tx_parse_bd_e1x *pbd,
1837 u32 xmit_type)
1838{
1839 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1840 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1841 pbd->tcp_flags = pbd_tcp_flags(skb);
1842
1843 if (xmit_type & XMIT_GSO_V4) {
1844 pbd->ip_id = swab16(ip_hdr(skb)->id);
1845 pbd->tcp_pseudo_csum =
1846 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1847 ip_hdr(skb)->daddr,
1848 0, IPPROTO_TCP, 0));
1849
1850 } else
1851 pbd->tcp_pseudo_csum =
1852 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1853 &ipv6_hdr(skb)->daddr,
1854 0, IPPROTO_TCP, 0));
1855
1856 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1857}
1858/**
1859 *
1860 * @param skb
1861 * @param tx_start_bd
1862 * @param pbd_e2
1863 * @param xmit_type
1864 *
1865 * @return header len
1866 */
1867static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1868 struct eth_tx_parse_bd_e2 *pbd,
1869 u32 xmit_type)
1870{
1871 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1872 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1873
1874 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1875 skb->data) / 2) <<
1876 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1877
1878 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1879}
1880
1881/**
1882 *
1883 * @param skb
1884 * @param tx_start_bd
1885 * @param pbd
1886 * @param xmit_type
1887 *
1888 * @return Header length
1889 */
1890static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1891 struct eth_tx_parse_bd_e1x *pbd,
1892 u32 xmit_type)
1893{
1894 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1895
1896 /* for now NS flag is not used in Linux */
1897 pbd->global_data =
1898 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1899 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1900
1901 pbd->ip_hlen_w = (skb_transport_header(skb) -
1902 skb_network_header(skb)) / 2;
1903
1904 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1905
1906 pbd->total_hlen_w = cpu_to_le16(hlen);
1907 hlen = hlen*2;
1908
1909 if (xmit_type & XMIT_CSUM_TCP) {
1910 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1911
1912 } else {
1913 s8 fix = SKB_CS_OFF(skb); /* signed! */
1914
1915 DP(NETIF_MSG_TX_QUEUED,
1916 "hlen %d fix %d csum before fix %x\n",
1917 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1918
1919 /* HW bug: fixup the CSUM */
1920 pbd->tcp_pseudo_csum =
1921 bnx2x_csum_fix(skb_transport_header(skb),
1922 SKB_CS(skb), fix);
1923
1924 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1925 pbd->tcp_pseudo_csum);
1926 }
1927
1928 return hlen;
1929}
9f6c9258
DK
1930/* called with netif_tx_lock
1931 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1932 * netif_wake_queue()
1933 */
1934netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1935{
1936 struct bnx2x *bp = netdev_priv(dev);
1937 struct bnx2x_fastpath *fp;
1938 struct netdev_queue *txq;
1939 struct sw_tx_bd *tx_buf;
1940 struct eth_tx_start_bd *tx_start_bd;
1941 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 1942 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 1943 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
9f6c9258
DK
1944 u16 pkt_prod, bd_prod;
1945 int nbd, fp_index;
1946 dma_addr_t mapping;
1947 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1948 int i;
1949 u8 hlen = 0;
1950 __le16 pkt_size = 0;
1951 struct ethhdr *eth;
1952 u8 mac_type = UNICAST_ADDRESS;
1953
1954#ifdef BNX2X_STOP_ON_ERROR
1955 if (unlikely(bp->panic))
1956 return NETDEV_TX_BUSY;
1957#endif
1958
1959 fp_index = skb_get_queue_mapping(skb);
1960 txq = netdev_get_tx_queue(dev, fp_index);
1961
1962 fp = &bp->fp[fp_index];
1963
1964 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1965 fp->eth_q_stats.driver_xoff++;
1966 netif_tx_stop_queue(txq);
1967 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1968 return NETDEV_TX_BUSY;
1969 }
1970
f2e0899f
DK
1971 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
1972 "protocol(%x,%x) gso type %x xmit_type %x\n",
1973 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
1974 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1975
1976 eth = (struct ethhdr *)skb->data;
1977
1978 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1979 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1980 if (is_broadcast_ether_addr(eth->h_dest))
1981 mac_type = BROADCAST_ADDRESS;
1982 else
1983 mac_type = MULTICAST_ADDRESS;
1984 }
1985
1986#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1987 /* First, check if we need to linearize the skb (due to FW
1988 restrictions). No need to check fragmentation if page size > 8K
1989 (there will be no violation to FW restrictions) */
1990 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1991 /* Statistics of linearization */
1992 bp->lin_cnt++;
1993 if (skb_linearize(skb) != 0) {
1994 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1995 "silently dropping this SKB\n");
1996 dev_kfree_skb_any(skb);
1997 return NETDEV_TX_OK;
1998 }
1999 }
2000#endif
2001
2002 /*
2003 Please read carefully. First we use one BD which we mark as start,
2004 then we have a parsing info BD (used for TSO or xsum),
2005 and only then we have the rest of the TSO BDs.
2006 (don't forget to mark the last one as last,
2007 and to unmap only AFTER you write to the BD ...)
2008 And above all, all pdb sizes are in words - NOT DWORDS!
2009 */
2010
2011 pkt_prod = fp->tx_pkt_prod++;
2012 bd_prod = TX_BD(fp->tx_bd_prod);
2013
2014 /* get a tx_buf and first BD */
2015 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2016 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2017
2018 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
523224a3
DK
2019 SET_FLAG(tx_start_bd->general_data,
2020 ETH_TX_START_BD_ETH_ADDR_TYPE,
2021 mac_type);
9f6c9258 2022 /* header nbd */
523224a3
DK
2023 SET_FLAG(tx_start_bd->general_data,
2024 ETH_TX_START_BD_HDR_NBDS,
2025 1);
9f6c9258
DK
2026
2027 /* remember the first BD of the packet */
2028 tx_buf->first_bd = fp->tx_bd_prod;
2029 tx_buf->skb = skb;
2030 tx_buf->flags = 0;
2031
2032 DP(NETIF_MSG_TX_QUEUED,
2033 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2034 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2035
2036#ifdef BCM_VLAN
2037 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
2038 (bp->flags & HW_VLAN_TX_FLAG)) {
523224a3
DK
2039 tx_start_bd->vlan_or_ethertype =
2040 cpu_to_le16(vlan_tx_tag_get(skb));
2041 tx_start_bd->bd_flags.as_bitfield |=
2042 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258
DK
2043 } else
2044#endif
523224a3 2045 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
2046
2047 /* turn on parsing and get a BD */
2048 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 2049
523224a3
DK
2050 if (xmit_type & XMIT_CSUM) {
2051 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2052
2053 if (xmit_type & XMIT_CSUM_V4)
2054 tx_start_bd->bd_flags.as_bitfield |=
2055 ETH_TX_BD_FLAGS_IP_CSUM;
2056 else
2057 tx_start_bd->bd_flags.as_bitfield |=
2058 ETH_TX_BD_FLAGS_IPV6;
9f6c9258 2059
523224a3
DK
2060 if (!(xmit_type & XMIT_CSUM_TCP))
2061 tx_start_bd->bd_flags.as_bitfield |=
2062 ETH_TX_BD_FLAGS_IS_UDP;
2063 }
9f6c9258 2064
f2e0899f
DK
2065 if (CHIP_IS_E2(bp)) {
2066 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2067 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2068 /* Set PBD in checksum offload case */
2069 if (xmit_type & XMIT_CSUM)
2070 hlen = bnx2x_set_pbd_csum_e2(bp,
2071 skb, pbd_e2, xmit_type);
2072 } else {
2073 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2074 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2075 /* Set PBD in checksum offload case */
2076 if (xmit_type & XMIT_CSUM)
2077 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 2078
9f6c9258
DK
2079 }
2080
2081 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2082 skb_headlen(skb), DMA_TO_DEVICE);
2083
2084 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2085 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2086 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2087 tx_start_bd->nbd = cpu_to_le16(nbd);
2088 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2089 pkt_size = tx_start_bd->nbytes;
2090
2091 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2092 " nbytes %d flags %x vlan %x\n",
2093 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2094 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
2095 tx_start_bd->bd_flags.as_bitfield,
2096 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
2097
2098 if (xmit_type & XMIT_GSO) {
2099
2100 DP(NETIF_MSG_TX_QUEUED,
2101 "TSO packet len %d hlen %d total len %d tso size %d\n",
2102 skb->len, hlen, skb_headlen(skb),
2103 skb_shinfo(skb)->gso_size);
2104
2105 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2106
2107 if (unlikely(skb_headlen(skb) > hlen))
2108 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2109 hlen, bd_prod, ++nbd);
f2e0899f
DK
2110 if (CHIP_IS_E2(bp))
2111 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2112 else
2113 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258
DK
2114 }
2115 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2116
2117 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2118 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2119
2120 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2121 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2122 if (total_pkt_bd == NULL)
2123 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2124
2125 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2126 frag->page_offset,
2127 frag->size, DMA_TO_DEVICE);
2128
2129 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2130 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2131 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2132 le16_add_cpu(&pkt_size, frag->size);
2133
2134 DP(NETIF_MSG_TX_QUEUED,
2135 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2136 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2137 le16_to_cpu(tx_data_bd->nbytes));
2138 }
2139
2140 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2141
2142 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2143
2144 /* now send a tx doorbell, counting the next BD
2145 * if the packet contains or ends with it
2146 */
2147 if (TX_BD_POFF(bd_prod) < nbd)
2148 nbd++;
2149
2150 if (total_pkt_bd != NULL)
2151 total_pkt_bd->total_pkt_bytes = pkt_size;
2152
523224a3 2153 if (pbd_e1x)
9f6c9258 2154 DP(NETIF_MSG_TX_QUEUED,
523224a3 2155 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9f6c9258 2156 " tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
2157 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2158 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2159 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2160 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
2161 if (pbd_e2)
2162 DP(NETIF_MSG_TX_QUEUED,
2163 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2164 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2165 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2166 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2167 pbd_e2->parsing_data);
9f6c9258
DK
2168 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2169
2170 /*
2171 * Make sure that the BD data is updated before updating the producer
2172 * since FW might read the BD right after the producer is updated.
2173 * This is only applicable for weak-ordered memory model archs such
2174 * as IA-64. The following barrier is also mandatory since FW will
2175 * assumes packets must have BDs.
2176 */
2177 wmb();
2178
2179 fp->tx_db.data.prod += nbd;
2180 barrier();
523224a3 2181 DOORBELL(bp, fp->cid, fp->tx_db.raw);
9f6c9258
DK
2182
2183 mmiowb();
2184
2185 fp->tx_bd_prod += nbd;
2186
2187 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2188 netif_tx_stop_queue(txq);
2189
2190 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2191 * ordering of set_bit() in netif_tx_stop_queue() and read of
2192 * fp->bd_tx_cons */
2193 smp_mb();
2194
2195 fp->eth_q_stats.driver_xoff++;
2196 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2197 netif_tx_wake_queue(txq);
2198 }
2199 fp->tx_pkt++;
2200
2201 return NETDEV_TX_OK;
2202}
2203/* called with rtnl_lock */
2204int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2205{
2206 struct sockaddr *addr = p;
2207 struct bnx2x *bp = netdev_priv(dev);
2208
2209 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2210 return -EINVAL;
2211
2212 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
523224a3
DK
2213 if (netif_running(dev))
2214 bnx2x_set_eth_mac(bp, 1);
9f6c9258
DK
2215
2216 return 0;
2217}
2218
d6214d7a
DK
2219
2220int bnx2x_setup_irqs(struct bnx2x *bp)
2221{
2222 int rc = 0;
2223 if (bp->flags & USING_MSIX_FLAG) {
2224 rc = bnx2x_req_msix_irqs(bp);
2225 if (rc)
2226 return rc;
2227 } else {
2228 bnx2x_ack_int(bp);
2229 rc = bnx2x_req_irq(bp);
2230 if (rc) {
2231 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2232 return rc;
2233 }
2234 if (bp->flags & USING_MSI_FLAG) {
2235 bp->dev->irq = bp->pdev->irq;
2236 netdev_info(bp->dev, "using MSI IRQ %d\n",
2237 bp->pdev->irq);
2238 }
2239 }
2240
2241 return 0;
2242}
2243
523224a3
DK
2244void bnx2x_free_mem_bp(struct bnx2x *bp)
2245{
2246 kfree(bp->fp);
2247 kfree(bp->msix_table);
2248 kfree(bp->ilt);
2249}
2250
2251int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2252{
2253 struct bnx2x_fastpath *fp;
2254 struct msix_entry *tbl;
2255 struct bnx2x_ilt *ilt;
2256
2257 /* fp array */
2258 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2259 if (!fp)
2260 goto alloc_err;
2261 bp->fp = fp;
2262
2263 /* msix table */
2264 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2265 GFP_KERNEL);
2266 if (!tbl)
2267 goto alloc_err;
2268 bp->msix_table = tbl;
2269
2270 /* ilt */
2271 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2272 if (!ilt)
2273 goto alloc_err;
2274 bp->ilt = ilt;
2275
2276 return 0;
2277alloc_err:
2278 bnx2x_free_mem_bp(bp);
2279 return -ENOMEM;
2280
2281}
2282
9f6c9258
DK
2283/* called with rtnl_lock */
2284int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2285{
2286 struct bnx2x *bp = netdev_priv(dev);
2287 int rc = 0;
2288
2289 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2290 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2291 return -EAGAIN;
2292 }
2293
2294 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2295 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2296 return -EINVAL;
2297
2298 /* This does not race with packet allocation
2299 * because the actual alloc size is
2300 * only updated as part of load
2301 */
2302 dev->mtu = new_mtu;
2303
2304 if (netif_running(dev)) {
2305 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2306 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2307 }
2308
2309 return rc;
2310}
2311
2312void bnx2x_tx_timeout(struct net_device *dev)
2313{
2314 struct bnx2x *bp = netdev_priv(dev);
2315
2316#ifdef BNX2X_STOP_ON_ERROR
2317 if (!bp->panic)
2318 bnx2x_panic();
2319#endif
2320 /* This allows the netif to be shutdown gracefully before resetting */
2321 schedule_delayed_work(&bp->reset_task, 0);
2322}
2323
2324#ifdef BCM_VLAN
2325/* called with rtnl_lock */
2326void bnx2x_vlan_rx_register(struct net_device *dev,
2327 struct vlan_group *vlgrp)
2328{
2329 struct bnx2x *bp = netdev_priv(dev);
2330
2331 bp->vlgrp = vlgrp;
9f6c9258
DK
2332}
2333
2334#endif
2335int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2336{
2337 struct net_device *dev = pci_get_drvdata(pdev);
2338 struct bnx2x *bp;
2339
2340 if (!dev) {
2341 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2342 return -ENODEV;
2343 }
2344 bp = netdev_priv(dev);
2345
2346 rtnl_lock();
2347
2348 pci_save_state(pdev);
2349
2350 if (!netif_running(dev)) {
2351 rtnl_unlock();
2352 return 0;
2353 }
2354
2355 netif_device_detach(dev);
2356
2357 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2358
2359 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2360
2361 rtnl_unlock();
2362
2363 return 0;
2364}
2365
2366int bnx2x_resume(struct pci_dev *pdev)
2367{
2368 struct net_device *dev = pci_get_drvdata(pdev);
2369 struct bnx2x *bp;
2370 int rc;
2371
2372 if (!dev) {
2373 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2374 return -ENODEV;
2375 }
2376 bp = netdev_priv(dev);
2377
2378 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2379 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2380 return -EAGAIN;
2381 }
2382
2383 rtnl_lock();
2384
2385 pci_restore_state(pdev);
2386
2387 if (!netif_running(dev)) {
2388 rtnl_unlock();
2389 return 0;
2390 }
2391
2392 bnx2x_set_power_state(bp, PCI_D0);
2393 netif_device_attach(dev);
2394
f2e0899f
DK
2395 /* Since the chip was reset, clear the FW sequence number */
2396 bp->fw_seq = 0;
9f6c9258
DK
2397 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2398
2399 rtnl_unlock();
2400
2401 return rc;
2402}