]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/benet/be_main.c
Merge branch 'linus' into x86/urgent
[net-next-2.6.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
294aedcf 2 * Copyright (C) 2005 - 2010 ServerEngines
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
8788fdc2 19#include "be_cmds.h"
65f71b8b 20#include <asm/div64.h>
6b7c5b94
SP
21
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
ba343c77 29static unsigned int num_vfs;
6b7c5b94 30module_param(rx_frag_size, uint, S_IRUGO);
ba343c77 31module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
3abcdeda
SP
35static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
6b7c5b94
SP
44 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276
AK
47/* UE Status Low CSR */
48static char *ue_status_low_desc[] = {
49 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
83static char *ue_status_hi_desc[] = {
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
107 "NETC"
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
6b7c5b94 117
3abcdeda
SP
118static inline bool be_multi_rxq(struct be_adapter *adapter)
119{
120 return (adapter->num_rx_qs > 1);
121}
122
6b7c5b94
SP
123static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
124{
125 struct be_dma_mem *mem = &q->dma_mem;
126 if (mem->va)
127 pci_free_consistent(adapter->pdev, mem->size,
128 mem->va, mem->dma);
129}
130
131static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
132 u16 len, u16 entry_size)
133{
134 struct be_dma_mem *mem = &q->dma_mem;
135
136 memset(q, 0, sizeof(*q));
137 q->len = len;
138 q->entry_size = entry_size;
139 mem->size = len * entry_size;
140 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
141 if (!mem->va)
142 return -1;
143 memset(mem->va, 0, mem->size);
144 return 0;
145}
146
8788fdc2 147static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 148{
8788fdc2 149 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
150 u32 reg = ioread32(addr);
151 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 152
cf588477
SP
153 if (adapter->eeh_err)
154 return;
155
5f0b849e 156 if (!enabled && enable)
6b7c5b94 157 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 158 else if (enabled && !enable)
6b7c5b94 159 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 160 else
6b7c5b94 161 return;
5f0b849e 162
6b7c5b94
SP
163 iowrite32(reg, addr);
164}
165
8788fdc2 166static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
167{
168 u32 val = 0;
169 val |= qid & DB_RQ_RING_ID_MASK;
170 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
171
172 wmb();
8788fdc2 173 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
174}
175
8788fdc2 176static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
177{
178 u32 val = 0;
179 val |= qid & DB_TXULP_RING_ID_MASK;
180 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
181
182 wmb();
8788fdc2 183 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
184}
185
8788fdc2 186static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
187 bool arm, bool clear_int, u16 num_popped)
188{
189 u32 val = 0;
190 val |= qid & DB_EQ_RING_ID_MASK;
cf588477
SP
191
192 if (adapter->eeh_err)
193 return;
194
6b7c5b94
SP
195 if (arm)
196 val |= 1 << DB_EQ_REARM_SHIFT;
197 if (clear_int)
198 val |= 1 << DB_EQ_CLR_SHIFT;
199 val |= 1 << DB_EQ_EVNT_SHIFT;
200 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 201 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
202}
203
8788fdc2 204void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
205{
206 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK;
cf588477
SP
208
209 if (adapter->eeh_err)
210 return;
211
6b7c5b94
SP
212 if (arm)
213 val |= 1 << DB_CQ_REARM_SHIFT;
214 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 215 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
216}
217
6b7c5b94
SP
218static int be_mac_addr_set(struct net_device *netdev, void *p)
219{
220 struct be_adapter *adapter = netdev_priv(netdev);
221 struct sockaddr *addr = p;
222 int status = 0;
223
ca9e4988
AK
224 if (!is_valid_ether_addr(addr->sa_data))
225 return -EADDRNOTAVAIL;
226
ba343c77
SB
227 /* MAC addr configuration will be done in hardware for VFs
228 * by their corresponding PFs. Just copy to netdev addr here
229 */
230 if (!be_physfn(adapter))
231 goto netdev_addr;
232
a65027e4
SP
233 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
234 if (status)
235 return status;
6b7c5b94 236
a65027e4
SP
237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
238 adapter->if_handle, &adapter->pmac_id);
ba343c77 239netdev_addr:
6b7c5b94
SP
240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
b31c50a7 246void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94 247{
3abcdeda 248 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
6b7c5b94
SP
249 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
250 struct be_port_rxf_stats *port_stats =
251 &rxf_stats->port[adapter->port_num];
78122a52 252 struct net_device_stats *dev_stats = &adapter->netdev->stats;
68110868 253 struct be_erx_stats *erx_stats = &hw_stats->erx;
3abcdeda
SP
254 struct be_rx_obj *rxo;
255 int i;
6b7c5b94 256
3abcdeda
SP
257 memset(dev_stats, 0, sizeof(*dev_stats));
258 for_all_rx_queues(adapter, rxo, i) {
259 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
260 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
261 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
262 /* no space in linux buffers: best possible approximation */
263 dev_stats->rx_dropped +=
264 erx_stats->rx_drops_no_fragments[rxo->q.id];
265 }
266
267 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
268 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
6b7c5b94
SP
269
270 /* bad pkts received */
271 dev_stats->rx_errors = port_stats->rx_crc_errors +
272 port_stats->rx_alignment_symbol_errors +
273 port_stats->rx_in_range_errors +
68110868
SP
274 port_stats->rx_out_range_errors +
275 port_stats->rx_frame_too_long +
276 port_stats->rx_dropped_too_small +
277 port_stats->rx_dropped_too_short +
278 port_stats->rx_dropped_header_too_small +
279 port_stats->rx_dropped_tcp_length +
280 port_stats->rx_dropped_runt +
281 port_stats->rx_tcp_checksum_errs +
282 port_stats->rx_ip_checksum_errs +
283 port_stats->rx_udp_checksum_errs;
284
6b7c5b94
SP
285 /* detailed rx errors */
286 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
68110868
SP
287 port_stats->rx_out_range_errors +
288 port_stats->rx_frame_too_long;
289
6b7c5b94
SP
290 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
291
292 /* frame alignment errors */
293 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
68110868 294
6b7c5b94
SP
295 /* receiver fifo overrun */
296 /* drops_no_pbuf is no per i/f, it's per BE card */
297 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
298 port_stats->rx_input_fifo_overflow +
299 rxf_stats->rx_drops_no_pbuf;
6b7c5b94
SP
300}
301
8788fdc2 302void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 303{
6b7c5b94
SP
304 struct net_device *netdev = adapter->netdev;
305
6b7c5b94 306 /* If link came up or went down */
a8f447bd 307 if (adapter->link_up != link_up) {
0dffc83e 308 adapter->link_speed = -1;
a8f447bd 309 if (link_up) {
6b7c5b94
SP
310 netif_start_queue(netdev);
311 netif_carrier_on(netdev);
312 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd
SP
313 } else {
314 netif_stop_queue(netdev);
315 netif_carrier_off(netdev);
316 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 317 }
a8f447bd 318 adapter->link_up = link_up;
6b7c5b94 319 }
6b7c5b94
SP
320}
321
322/* Update the EQ delay n BE based on the RX frags consumed / sec */
3abcdeda 323static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 324{
3abcdeda
SP
325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
326 struct be_rx_stats *stats = &rxo->stats;
4097f663
SP
327 ulong now = jiffies;
328 u32 eqd;
329
330 if (!rx_eq->enable_aic)
331 return;
332
333 /* Wrapped around */
334 if (time_before(now, stats->rx_fps_jiffies)) {
335 stats->rx_fps_jiffies = now;
336 return;
337 }
6b7c5b94
SP
338
339 /* Update once a second */
4097f663 340 if ((now - stats->rx_fps_jiffies) < HZ)
6b7c5b94
SP
341 return;
342
3abcdeda 343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
4097f663 344 ((now - stats->rx_fps_jiffies) / HZ);
6b7c5b94 345
4097f663 346 stats->rx_fps_jiffies = now;
3abcdeda
SP
347 stats->prev_rx_frags = stats->rx_frags;
348 eqd = stats->rx_fps / 110000;
6b7c5b94
SP
349 eqd = eqd << 3;
350 if (eqd > rx_eq->max_eqd)
351 eqd = rx_eq->max_eqd;
352 if (eqd < rx_eq->min_eqd)
353 eqd = rx_eq->min_eqd;
354 if (eqd < 10)
355 eqd = 0;
356 if (eqd != rx_eq->cur_eqd)
8788fdc2 357 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
6b7c5b94
SP
358
359 rx_eq->cur_eqd = eqd;
360}
361
65f71b8b
SH
362static u32 be_calc_rate(u64 bytes, unsigned long ticks)
363{
364 u64 rate = bytes;
365
366 do_div(rate, ticks / HZ);
367 rate <<= 3; /* bytes/sec -> bits/sec */
368 do_div(rate, 1000000ul); /* MB/Sec */
369
370 return rate;
371}
372
4097f663
SP
373static void be_tx_rate_update(struct be_adapter *adapter)
374{
3abcdeda 375 struct be_tx_stats *stats = tx_stats(adapter);
4097f663
SP
376 ulong now = jiffies;
377
378 /* Wrapped around? */
379 if (time_before(now, stats->be_tx_jiffies)) {
380 stats->be_tx_jiffies = now;
381 return;
382 }
383
384 /* Update tx rate once in two seconds */
385 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
65f71b8b
SH
386 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
387 - stats->be_tx_bytes_prev,
388 now - stats->be_tx_jiffies);
4097f663
SP
389 stats->be_tx_jiffies = now;
390 stats->be_tx_bytes_prev = stats->be_tx_bytes;
391 }
392}
393
6b7c5b94 394static void be_tx_stats_update(struct be_adapter *adapter,
91992e44 395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 396{
3abcdeda 397 struct be_tx_stats *stats = tx_stats(adapter);
6b7c5b94
SP
398 stats->be_tx_reqs++;
399 stats->be_tx_wrbs += wrb_cnt;
400 stats->be_tx_bytes += copied;
91992e44 401 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94
SP
402 if (stopped)
403 stats->be_tx_stops++;
6b7c5b94
SP
404}
405
406/* Determine number of WRB entries needed to xmit data in an skb */
407static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
408{
ebc8d2ab
DM
409 int cnt = (skb->len > skb->data_len);
410
411 cnt += skb_shinfo(skb)->nr_frags;
412
6b7c5b94
SP
413 /* to account for hdr wrb */
414 cnt++;
415 if (cnt & 1) {
416 /* add a dummy to make it an even num */
417 cnt++;
418 *dummy = true;
419 } else
420 *dummy = false;
421 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
422 return cnt;
423}
424
425static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
426{
427 wrb->frag_pa_hi = upper_32_bits(addr);
428 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
429 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
430}
431
cc4ce020
SK
432static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
433 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 434{
cc4ce020
SK
435 u8 vlan_prio = 0;
436 u16 vlan_tag = 0;
437
6b7c5b94
SP
438 memset(hdr, 0, sizeof(*hdr));
439
440 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
441
49e4b847 442 if (skb_is_gso(skb)) {
6b7c5b94
SP
443 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
444 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
445 hdr, skb_shinfo(skb)->gso_size);
49e4b847
AK
446 if (skb_is_gso_v6(skb))
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
448 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
449 if (is_tcp_pkt(skb))
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
451 else if (is_udp_pkt(skb))
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
453 }
454
cc4ce020 455 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
6b7c5b94 456 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
457 vlan_tag = vlan_tx_tag_get(skb);
458 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
459 /* If vlan priority provided by OS is NOT in available bmap */
460 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
461 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
462 adapter->recommended_prio;
463 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
464 }
465
466 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
469 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
470}
471
7101e111
SP
472static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
473 bool unmap_single)
474{
475 dma_addr_t dma;
476
477 be_dws_le_to_cpu(wrb, sizeof(*wrb));
478
479 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 480 if (wrb->frag_len) {
7101e111
SP
481 if (unmap_single)
482 pci_unmap_single(pdev, dma, wrb->frag_len,
483 PCI_DMA_TODEVICE);
484 else
485 pci_unmap_page(pdev, dma, wrb->frag_len,
486 PCI_DMA_TODEVICE);
487 }
488}
6b7c5b94
SP
489
490static int make_tx_wrbs(struct be_adapter *adapter,
491 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
492{
7101e111
SP
493 dma_addr_t busaddr;
494 int i, copied = 0;
6b7c5b94
SP
495 struct pci_dev *pdev = adapter->pdev;
496 struct sk_buff *first_skb = skb;
497 struct be_queue_info *txq = &adapter->tx_obj.q;
498 struct be_eth_wrb *wrb;
499 struct be_eth_hdr_wrb *hdr;
7101e111
SP
500 bool map_single = false;
501 u16 map_head;
6b7c5b94 502
6b7c5b94
SP
503 hdr = queue_head_node(txq);
504 queue_head_inc(txq);
7101e111 505 map_head = txq->head;
6b7c5b94 506
ebc8d2ab 507 if (skb->len > skb->data_len) {
e743d313 508 int len = skb_headlen(skb);
a73b796e
AD
509 busaddr = pci_map_single(pdev, skb->data, len,
510 PCI_DMA_TODEVICE);
7101e111
SP
511 if (pci_dma_mapping_error(pdev, busaddr))
512 goto dma_err;
513 map_single = true;
ebc8d2ab
DM
514 wrb = queue_head_node(txq);
515 wrb_fill(wrb, busaddr, len);
516 be_dws_cpu_to_le(wrb, sizeof(*wrb));
517 queue_head_inc(txq);
518 copied += len;
519 }
6b7c5b94 520
ebc8d2ab
DM
521 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
522 struct skb_frag_struct *frag =
523 &skb_shinfo(skb)->frags[i];
a73b796e
AD
524 busaddr = pci_map_page(pdev, frag->page,
525 frag->page_offset,
526 frag->size, PCI_DMA_TODEVICE);
7101e111
SP
527 if (pci_dma_mapping_error(pdev, busaddr))
528 goto dma_err;
ebc8d2ab
DM
529 wrb = queue_head_node(txq);
530 wrb_fill(wrb, busaddr, frag->size);
531 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532 queue_head_inc(txq);
533 copied += frag->size;
6b7c5b94
SP
534 }
535
536 if (dummy_wrb) {
537 wrb = queue_head_node(txq);
538 wrb_fill(wrb, 0, 0);
539 be_dws_cpu_to_le(wrb, sizeof(*wrb));
540 queue_head_inc(txq);
541 }
542
cc4ce020 543 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
544 be_dws_cpu_to_le(hdr, sizeof(*hdr));
545
546 return copied;
7101e111
SP
547dma_err:
548 txq->head = map_head;
549 while (copied) {
550 wrb = queue_head_node(txq);
551 unmap_tx_frag(pdev, wrb, map_single);
552 map_single = false;
553 copied -= wrb->frag_len;
554 queue_head_inc(txq);
555 }
556 return 0;
6b7c5b94
SP
557}
558
61357325 559static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 560 struct net_device *netdev)
6b7c5b94
SP
561{
562 struct be_adapter *adapter = netdev_priv(netdev);
563 struct be_tx_obj *tx_obj = &adapter->tx_obj;
564 struct be_queue_info *txq = &tx_obj->q;
565 u32 wrb_cnt = 0, copied = 0;
566 u32 start = txq->head;
567 bool dummy_wrb, stopped = false;
568
569 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
570
571 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
572 if (copied) {
573 /* record the sent skb in the sent_skb table */
574 BUG_ON(tx_obj->sent_skb_list[start]);
575 tx_obj->sent_skb_list[start] = skb;
576
577 /* Ensure txq has space for the next skb; Else stop the queue
578 * *BEFORE* ringing the tx doorbell, so that we serialze the
579 * tx compls of the current transmit which'll wake up the queue
580 */
7101e111 581 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
582 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
583 txq->len) {
584 netif_stop_queue(netdev);
585 stopped = true;
586 }
6b7c5b94 587
c190e3c8 588 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 589
91992e44
AK
590 be_tx_stats_update(adapter, wrb_cnt, copied,
591 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
592 } else {
593 txq->head = start;
594 dev_kfree_skb_any(skb);
6b7c5b94 595 }
6b7c5b94
SP
596 return NETDEV_TX_OK;
597}
598
599static int be_change_mtu(struct net_device *netdev, int new_mtu)
600{
601 struct be_adapter *adapter = netdev_priv(netdev);
602 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
603 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
604 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
605 dev_info(&adapter->pdev->dev,
606 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
607 BE_MIN_MTU,
608 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
609 return -EINVAL;
610 }
611 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
612 netdev->mtu, new_mtu);
613 netdev->mtu = new_mtu;
614 return 0;
615}
616
617/*
82903e4b
AK
618 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
619 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 620 */
1da87b7f 621static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 622{
6b7c5b94
SP
623 u16 vtag[BE_NUM_VLANS_SUPPORTED];
624 u16 ntags = 0, i;
82903e4b 625 int status = 0;
1da87b7f
AK
626 u32 if_handle;
627
628 if (vf) {
629 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
630 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
631 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
632 }
6b7c5b94 633
82903e4b 634 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 635 /* Construct VLAN Table to give to HW */
b738127d 636 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
637 if (adapter->vlan_tag[i]) {
638 vtag[ntags] = cpu_to_le16(i);
639 ntags++;
640 }
641 }
b31c50a7
SP
642 status = be_cmd_vlan_config(adapter, adapter->if_handle,
643 vtag, ntags, 1, 0);
6b7c5b94 644 } else {
b31c50a7
SP
645 status = be_cmd_vlan_config(adapter, adapter->if_handle,
646 NULL, 0, 1, 1);
6b7c5b94 647 }
1da87b7f 648
b31c50a7 649 return status;
6b7c5b94
SP
650}
651
652static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
653{
654 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 655
6b7c5b94 656 adapter->vlan_grp = grp;
6b7c5b94
SP
657}
658
659static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
660{
661 struct be_adapter *adapter = netdev_priv(netdev);
662
1da87b7f 663 adapter->vlans_added++;
ba343c77
SB
664 if (!be_physfn(adapter))
665 return;
666
6b7c5b94 667 adapter->vlan_tag[vid] = 1;
82903e4b 668 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 669 be_vid_config(adapter, false, 0);
6b7c5b94
SP
670}
671
672static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
673{
674 struct be_adapter *adapter = netdev_priv(netdev);
675
1da87b7f
AK
676 adapter->vlans_added--;
677 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
678
ba343c77
SB
679 if (!be_physfn(adapter))
680 return;
681
6b7c5b94 682 adapter->vlan_tag[vid] = 0;
82903e4b 683 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 684 be_vid_config(adapter, false, 0);
6b7c5b94
SP
685}
686
24307eef 687static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
688{
689 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 690
24307eef 691 if (netdev->flags & IFF_PROMISC) {
8788fdc2 692 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
24307eef
SP
693 adapter->promiscuous = true;
694 goto done;
6b7c5b94
SP
695 }
696
24307eef
SP
697 /* BE was previously in promiscous mode; disable it */
698 if (adapter->promiscuous) {
699 adapter->promiscuous = false;
8788fdc2 700 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
6b7c5b94
SP
701 }
702
e7b909a6 703 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
704 if (netdev->flags & IFF_ALLMULTI ||
705 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 706 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 707 &adapter->mc_cmd_mem);
24307eef 708 goto done;
6b7c5b94 709 }
6b7c5b94 710
0ddf477b 711 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 712 &adapter->mc_cmd_mem);
24307eef
SP
713done:
714 return;
6b7c5b94
SP
715}
716
ba343c77
SB
717static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
718{
719 struct be_adapter *adapter = netdev_priv(netdev);
720 int status;
721
722 if (!adapter->sriov_enabled)
723 return -EPERM;
724
725 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
726 return -EINVAL;
727
64600ea5
AK
728 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
729 status = be_cmd_pmac_del(adapter,
730 adapter->vf_cfg[vf].vf_if_handle,
731 adapter->vf_cfg[vf].vf_pmac_id);
ba343c77 732
64600ea5
AK
733 status = be_cmd_pmac_add(adapter, mac,
734 adapter->vf_cfg[vf].vf_if_handle,
735 &adapter->vf_cfg[vf].vf_pmac_id);
736
737 if (status)
ba343c77
SB
738 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
739 mac, vf);
64600ea5
AK
740 else
741 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
742
ba343c77
SB
743 return status;
744}
745
64600ea5
AK
746static int be_get_vf_config(struct net_device *netdev, int vf,
747 struct ifla_vf_info *vi)
748{
749 struct be_adapter *adapter = netdev_priv(netdev);
750
751 if (!adapter->sriov_enabled)
752 return -EPERM;
753
754 if (vf >= num_vfs)
755 return -EINVAL;
756
757 vi->vf = vf;
e1d18735 758 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 759 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
760 vi->qos = 0;
761 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
762
763 return 0;
764}
765
1da87b7f
AK
766static int be_set_vf_vlan(struct net_device *netdev,
767 int vf, u16 vlan, u8 qos)
768{
769 struct be_adapter *adapter = netdev_priv(netdev);
770 int status = 0;
771
772 if (!adapter->sriov_enabled)
773 return -EPERM;
774
775 if ((vf >= num_vfs) || (vlan > 4095))
776 return -EINVAL;
777
778 if (vlan) {
779 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
780 adapter->vlans_added++;
781 } else {
782 adapter->vf_cfg[vf].vf_vlan_tag = 0;
783 adapter->vlans_added--;
784 }
785
786 status = be_vid_config(adapter, true, vf);
787
788 if (status)
789 dev_info(&adapter->pdev->dev,
790 "VLAN %d config on VF %d failed\n", vlan, vf);
791 return status;
792}
793
e1d18735
AK
794static int be_set_vf_tx_rate(struct net_device *netdev,
795 int vf, int rate)
796{
797 struct be_adapter *adapter = netdev_priv(netdev);
798 int status = 0;
799
800 if (!adapter->sriov_enabled)
801 return -EPERM;
802
803 if ((vf >= num_vfs) || (rate < 0))
804 return -EINVAL;
805
806 if (rate > 10000)
807 rate = 10000;
808
809 adapter->vf_cfg[vf].vf_tx_rate = rate;
810 status = be_cmd_set_qos(adapter, rate / 10, vf);
811
812 if (status)
813 dev_info(&adapter->pdev->dev,
814 "tx rate %d on VF %d failed\n", rate, vf);
815 return status;
816}
817
3abcdeda 818static void be_rx_rate_update(struct be_rx_obj *rxo)
6b7c5b94 819{
3abcdeda 820 struct be_rx_stats *stats = &rxo->stats;
4097f663 821 ulong now = jiffies;
6b7c5b94 822
4097f663 823 /* Wrapped around */
3abcdeda
SP
824 if (time_before(now, stats->rx_jiffies)) {
825 stats->rx_jiffies = now;
4097f663
SP
826 return;
827 }
6b7c5b94
SP
828
829 /* Update the rate once in two seconds */
3abcdeda 830 if ((now - stats->rx_jiffies) < 2 * HZ)
6b7c5b94
SP
831 return;
832
3abcdeda
SP
833 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
834 now - stats->rx_jiffies);
835 stats->rx_jiffies = now;
836 stats->rx_bytes_prev = stats->rx_bytes;
6b7c5b94
SP
837}
838
3abcdeda 839static void be_rx_stats_update(struct be_rx_obj *rxo,
1ef78abe 840 u32 pktsize, u16 numfrags, u8 pkt_type)
4097f663 841{
3abcdeda 842 struct be_rx_stats *stats = &rxo->stats;
1ef78abe 843
3abcdeda
SP
844 stats->rx_compl++;
845 stats->rx_frags += numfrags;
846 stats->rx_bytes += pktsize;
847 stats->rx_pkts++;
1ef78abe 848 if (pkt_type == BE_MULTICAST_PACKET)
3abcdeda 849 stats->rx_mcast_pkts++;
4097f663
SP
850}
851
728a9972
AK
852static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
853{
854 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
855
856 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
857 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
858 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
859 if (ip_version) {
860 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
861 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
862 }
863 ipv6_chk = (ip_version && (tcpf || udpf));
864
865 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
866}
867
6b7c5b94 868static struct be_rx_page_info *
3abcdeda
SP
869get_rx_page_info(struct be_adapter *adapter,
870 struct be_rx_obj *rxo,
871 u16 frag_idx)
6b7c5b94
SP
872{
873 struct be_rx_page_info *rx_page_info;
3abcdeda 874 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 875
3abcdeda 876 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
877 BUG_ON(!rx_page_info->page);
878
205859a2 879 if (rx_page_info->last_page_user) {
fac6da5b 880 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
6b7c5b94 881 adapter->big_page_size, PCI_DMA_FROMDEVICE);
205859a2
AK
882 rx_page_info->last_page_user = false;
883 }
6b7c5b94
SP
884
885 atomic_dec(&rxq->used);
886 return rx_page_info;
887}
888
889/* Throwaway the data in the Rx completion */
890static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda
SP
891 struct be_rx_obj *rxo,
892 struct be_eth_rx_compl *rxcp)
6b7c5b94 893{
3abcdeda 894 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
895 struct be_rx_page_info *page_info;
896 u16 rxq_idx, i, num_rcvd;
897
898 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
899 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
900
901 for (i = 0; i < num_rcvd; i++) {
3abcdeda 902 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
6b7c5b94
SP
903 put_page(page_info->page);
904 memset(page_info, 0, sizeof(*page_info));
905 index_inc(&rxq_idx, rxq->len);
906 }
907}
908
909/*
910 * skb_fill_rx_data forms a complete skb for an ether frame
911 * indicated by rxcp.
912 */
3abcdeda 913static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
89420424
SP
914 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
915 u16 num_rcvd)
6b7c5b94 916{
3abcdeda 917 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 918 struct be_rx_page_info *page_info;
89420424 919 u16 rxq_idx, i, j;
fa77406a 920 u32 pktsize, hdr_len, curr_frag_len, size;
6b7c5b94 921 u8 *start;
1ef78abe 922 u8 pkt_type;
6b7c5b94
SP
923
924 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
925 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1ef78abe 926 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
6b7c5b94 927
3abcdeda 928 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
6b7c5b94
SP
929
930 start = page_address(page_info->page) + page_info->page_offset;
931 prefetch(start);
932
933 /* Copy data in the first descriptor of this completion */
934 curr_frag_len = min(pktsize, rx_frag_size);
935
936 /* Copy the header portion into skb_data */
937 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
938 memcpy(skb->data, start, hdr_len);
939 skb->len = curr_frag_len;
940 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
941 /* Complete packet has now been moved to data */
942 put_page(page_info->page);
943 skb->data_len = 0;
944 skb->tail += curr_frag_len;
945 } else {
946 skb_shinfo(skb)->nr_frags = 1;
947 skb_shinfo(skb)->frags[0].page = page_info->page;
948 skb_shinfo(skb)->frags[0].page_offset =
949 page_info->page_offset + hdr_len;
950 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
951 skb->data_len = curr_frag_len - hdr_len;
952 skb->tail += hdr_len;
953 }
205859a2 954 page_info->page = NULL;
6b7c5b94
SP
955
956 if (pktsize <= rx_frag_size) {
957 BUG_ON(num_rcvd != 1);
76fbb429 958 goto done;
6b7c5b94
SP
959 }
960
961 /* More frags present for this completion */
fa77406a 962 size = pktsize;
bd46cb6c 963 for (i = 1, j = 0; i < num_rcvd; i++) {
fa77406a 964 size -= curr_frag_len;
6b7c5b94 965 index_inc(&rxq_idx, rxq->len);
3abcdeda 966 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
6b7c5b94 967
fa77406a 968 curr_frag_len = min(size, rx_frag_size);
6b7c5b94 969
bd46cb6c
AK
970 /* Coalesce all frags from the same physical page in one slot */
971 if (page_info->page_offset == 0) {
972 /* Fresh page */
973 j++;
974 skb_shinfo(skb)->frags[j].page = page_info->page;
975 skb_shinfo(skb)->frags[j].page_offset =
976 page_info->page_offset;
977 skb_shinfo(skb)->frags[j].size = 0;
978 skb_shinfo(skb)->nr_frags++;
979 } else {
980 put_page(page_info->page);
981 }
982
983 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
984 skb->len += curr_frag_len;
985 skb->data_len += curr_frag_len;
6b7c5b94 986
205859a2 987 page_info->page = NULL;
6b7c5b94 988 }
bd46cb6c 989 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 990
76fbb429 991done:
3abcdeda 992 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
6b7c5b94
SP
993}
994
5be93b9a 995/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 996static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 997 struct be_rx_obj *rxo,
6b7c5b94
SP
998 struct be_eth_rx_compl *rxcp)
999{
1000 struct sk_buff *skb;
dcb9b564 1001 u32 vlanf, vid;
89420424 1002 u16 num_rcvd;
dcb9b564 1003 u8 vtm;
6b7c5b94 1004
89420424
SP
1005 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1006 /* Is it a flush compl that has no data */
1007 if (unlikely(num_rcvd == 0))
1008 return;
1009
89d71a66 1010 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
a058a632 1011 if (unlikely(!skb)) {
6b7c5b94
SP
1012 if (net_ratelimit())
1013 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
3abcdeda 1014 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1015 return;
1016 }
1017
3abcdeda 1018 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
6b7c5b94 1019
728a9972 1020 if (do_pkt_csum(rxcp, adapter->rx_csum))
bc8acf2c 1021 skb_checksum_none_assert(skb);
728a9972
AK
1022 else
1023 skb->ip_summed = CHECKSUM_UNNECESSARY;
6b7c5b94
SP
1024
1025 skb->truesize = skb->len + sizeof(struct sk_buff);
1026 skb->protocol = eth_type_trans(skb, adapter->netdev);
6b7c5b94 1027
a058a632
SP
1028 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1029 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1030
1031 /* vlanf could be wrongly set in some cards.
1032 * ignore if vtm is not set */
3486be29 1033 if ((adapter->function_mode & 0x400) && !vtm)
a058a632
SP
1034 vlanf = 0;
1035
1036 if (unlikely(vlanf)) {
82903e4b 1037 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
6b7c5b94
SP
1038 kfree_skb(skb);
1039 return;
1040 }
1041 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
9cae9e4f 1042 vid = swab16(vid);
6b7c5b94
SP
1043 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1044 } else {
1045 netif_receive_skb(skb);
1046 }
6b7c5b94
SP
1047}
1048
5be93b9a
AK
1049/* Process the RX completion indicated by rxcp when GRO is enabled */
1050static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda
SP
1051 struct be_rx_obj *rxo,
1052 struct be_eth_rx_compl *rxcp)
6b7c5b94
SP
1053{
1054 struct be_rx_page_info *page_info;
5be93b9a 1055 struct sk_buff *skb = NULL;
3abcdeda
SP
1056 struct be_queue_info *rxq = &rxo->q;
1057 struct be_eq_obj *eq_obj = &rxo->rx_eq;
6b7c5b94 1058 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
bd46cb6c 1059 u16 i, rxq_idx = 0, vid, j;
dcb9b564 1060 u8 vtm;
1ef78abe 1061 u8 pkt_type;
6b7c5b94
SP
1062
1063 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
89420424
SP
1064 /* Is it a flush compl that has no data */
1065 if (unlikely(num_rcvd == 0))
1066 return;
1067
6b7c5b94
SP
1068 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1069 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1070 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
dcb9b564 1071 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1ef78abe 1072 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
dcb9b564
AK
1073
1074 /* vlanf could be wrongly set in some cards.
1075 * ignore if vtm is not set */
3486be29 1076 if ((adapter->function_mode & 0x400) && !vtm)
dcb9b564 1077 vlanf = 0;
6b7c5b94 1078
5be93b9a
AK
1079 skb = napi_get_frags(&eq_obj->napi);
1080 if (!skb) {
3abcdeda 1081 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1082 return;
1083 }
1084
6b7c5b94 1085 remaining = pkt_size;
bd46cb6c 1086 for (i = 0, j = -1; i < num_rcvd; i++) {
3abcdeda 1087 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
6b7c5b94
SP
1088
1089 curr_frag_len = min(remaining, rx_frag_size);
1090
bd46cb6c
AK
1091 /* Coalesce all frags from the same physical page in one slot */
1092 if (i == 0 || page_info->page_offset == 0) {
1093 /* First frag or Fresh page */
1094 j++;
5be93b9a
AK
1095 skb_shinfo(skb)->frags[j].page = page_info->page;
1096 skb_shinfo(skb)->frags[j].page_offset =
1097 page_info->page_offset;
1098 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1099 } else {
1100 put_page(page_info->page);
1101 }
5be93b9a 1102 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1103
bd46cb6c 1104 remaining -= curr_frag_len;
6b7c5b94 1105 index_inc(&rxq_idx, rxq->len);
6b7c5b94
SP
1106 memset(page_info, 0, sizeof(*page_info));
1107 }
bd46cb6c 1108 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1109
5be93b9a
AK
1110 skb_shinfo(skb)->nr_frags = j + 1;
1111 skb->len = pkt_size;
1112 skb->data_len = pkt_size;
1113 skb->truesize += pkt_size;
1114 skb->ip_summed = CHECKSUM_UNNECESSARY;
1115
6b7c5b94 1116 if (likely(!vlanf)) {
5be93b9a 1117 napi_gro_frags(&eq_obj->napi);
6b7c5b94
SP
1118 } else {
1119 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
9cae9e4f 1120 vid = swab16(vid);
6b7c5b94 1121
82903e4b 1122 if (!adapter->vlan_grp || adapter->vlans_added == 0)
6b7c5b94
SP
1123 return;
1124
5be93b9a 1125 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
6b7c5b94
SP
1126 }
1127
3abcdeda 1128 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
6b7c5b94
SP
1129}
1130
3abcdeda 1131static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
6b7c5b94 1132{
3abcdeda 1133 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
6b7c5b94
SP
1134
1135 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1136 return NULL;
1137
f3eb62d2 1138 rmb();
6b7c5b94
SP
1139 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1140
3abcdeda 1141 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1142 return rxcp;
1143}
1144
a7a0ef31
SP
1145/* To reset the valid bit, we need to reset the whole word as
1146 * when walking the queue the valid entries are little-endian
1147 * and invalid entries are host endian
1148 */
1149static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1150{
1151 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1152}
1153
6b7c5b94
SP
1154static inline struct page *be_alloc_pages(u32 size)
1155{
1156 gfp_t alloc_flags = GFP_ATOMIC;
1157 u32 order = get_order(size);
1158 if (order > 0)
1159 alloc_flags |= __GFP_COMP;
1160 return alloc_pages(alloc_flags, order);
1161}
1162
1163/*
1164 * Allocate a page, split it to fragments of size rx_frag_size and post as
1165 * receive buffers to BE
1166 */
3abcdeda 1167static void be_post_rx_frags(struct be_rx_obj *rxo)
6b7c5b94 1168{
3abcdeda
SP
1169 struct be_adapter *adapter = rxo->adapter;
1170 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1171 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1172 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1173 struct page *pagep = NULL;
1174 struct be_eth_rx_d *rxd;
1175 u64 page_dmaaddr = 0, frag_dmaaddr;
1176 u32 posted, page_offset = 0;
1177
3abcdeda 1178 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1179 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1180 if (!pagep) {
1181 pagep = be_alloc_pages(adapter->big_page_size);
1182 if (unlikely(!pagep)) {
3abcdeda 1183 rxo->stats.rx_post_fail++;
6b7c5b94
SP
1184 break;
1185 }
1186 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1187 adapter->big_page_size,
1188 PCI_DMA_FROMDEVICE);
1189 page_info->page_offset = 0;
1190 } else {
1191 get_page(pagep);
1192 page_info->page_offset = page_offset + rx_frag_size;
1193 }
1194 page_offset = page_info->page_offset;
1195 page_info->page = pagep;
fac6da5b 1196 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1197 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1198
1199 rxd = queue_head_node(rxq);
1200 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1201 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1202
1203 /* Any space left in the current big page for another frag? */
1204 if ((page_offset + rx_frag_size + rx_frag_size) >
1205 adapter->big_page_size) {
1206 pagep = NULL;
1207 page_info->last_page_user = true;
1208 }
26d92f92
SP
1209
1210 prev_page_info = page_info;
1211 queue_head_inc(rxq);
6b7c5b94
SP
1212 page_info = &page_info_tbl[rxq->head];
1213 }
1214 if (pagep)
26d92f92 1215 prev_page_info->last_page_user = true;
6b7c5b94
SP
1216
1217 if (posted) {
6b7c5b94 1218 atomic_add(posted, &rxq->used);
8788fdc2 1219 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1220 } else if (atomic_read(&rxq->used) == 0) {
1221 /* Let be_worker replenish when memory is available */
3abcdeda 1222 rxo->rx_post_starved = true;
6b7c5b94 1223 }
6b7c5b94
SP
1224}
1225
5fb379ee 1226static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1227{
6b7c5b94
SP
1228 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1229
1230 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1231 return NULL;
1232
f3eb62d2 1233 rmb();
6b7c5b94
SP
1234 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1235
1236 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1237
1238 queue_tail_inc(tx_cq);
1239 return txcp;
1240}
1241
1242static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1243{
1244 struct be_queue_info *txq = &adapter->tx_obj.q;
a73b796e 1245 struct be_eth_wrb *wrb;
6b7c5b94
SP
1246 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1247 struct sk_buff *sent_skb;
ec43b1a6
SP
1248 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1249 bool unmap_skb_hdr = true;
6b7c5b94 1250
ec43b1a6 1251 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1252 BUG_ON(!sent_skb);
ec43b1a6
SP
1253 sent_skbs[txq->tail] = NULL;
1254
1255 /* skip header wrb */
a73b796e 1256 queue_tail_inc(txq);
6b7c5b94 1257
ec43b1a6 1258 do {
6b7c5b94 1259 cur_index = txq->tail;
a73b796e 1260 wrb = queue_tail_node(txq);
ec43b1a6 1261 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
e743d313 1262 skb_headlen(sent_skb)));
ec43b1a6
SP
1263 unmap_skb_hdr = false;
1264
6b7c5b94
SP
1265 num_wrbs++;
1266 queue_tail_inc(txq);
ec43b1a6 1267 } while (cur_index != last_index);
6b7c5b94
SP
1268
1269 atomic_sub(num_wrbs, &txq->used);
a73b796e 1270
6b7c5b94
SP
1271 kfree_skb(sent_skb);
1272}
1273
859b1e4e
SP
1274static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1275{
1276 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1277
1278 if (!eqe->evt)
1279 return NULL;
1280
f3eb62d2 1281 rmb();
859b1e4e
SP
1282 eqe->evt = le32_to_cpu(eqe->evt);
1283 queue_tail_inc(&eq_obj->q);
1284 return eqe;
1285}
1286
1287static int event_handle(struct be_adapter *adapter,
1288 struct be_eq_obj *eq_obj)
1289{
1290 struct be_eq_entry *eqe;
1291 u16 num = 0;
1292
1293 while ((eqe = event_get(eq_obj)) != NULL) {
1294 eqe->evt = 0;
1295 num++;
1296 }
1297
1298 /* Deal with any spurious interrupts that come
1299 * without events
1300 */
1301 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1302 if (num)
1303 napi_schedule(&eq_obj->napi);
1304
1305 return num;
1306}
1307
1308/* Just read and notify events without processing them.
1309 * Used at the time of destroying event queues */
1310static void be_eq_clean(struct be_adapter *adapter,
1311 struct be_eq_obj *eq_obj)
1312{
1313 struct be_eq_entry *eqe;
1314 u16 num = 0;
1315
1316 while ((eqe = event_get(eq_obj)) != NULL) {
1317 eqe->evt = 0;
1318 num++;
1319 }
1320
1321 if (num)
1322 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1323}
1324
3abcdeda 1325static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1326{
1327 struct be_rx_page_info *page_info;
3abcdeda
SP
1328 struct be_queue_info *rxq = &rxo->q;
1329 struct be_queue_info *rx_cq = &rxo->cq;
6b7c5b94
SP
1330 struct be_eth_rx_compl *rxcp;
1331 u16 tail;
1332
1333 /* First cleanup pending rx completions */
3abcdeda
SP
1334 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1335 be_rx_compl_discard(adapter, rxo, rxcp);
a7a0ef31 1336 be_rx_compl_reset(rxcp);
8788fdc2 1337 be_cq_notify(adapter, rx_cq->id, true, 1);
6b7c5b94
SP
1338 }
1339
1340 /* Then free posted rx buffer that were not used */
1341 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1342 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1343 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1344 put_page(page_info->page);
1345 memset(page_info, 0, sizeof(*page_info));
1346 }
1347 BUG_ON(atomic_read(&rxq->used));
1348}
1349
a8e9179a 1350static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1351{
a8e9179a 1352 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1353 struct be_queue_info *txq = &adapter->tx_obj.q;
a8e9179a
SP
1354 struct be_eth_tx_compl *txcp;
1355 u16 end_idx, cmpl = 0, timeo = 0;
b03388d6
SP
1356 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1357 struct sk_buff *sent_skb;
1358 bool dummy_wrb;
a8e9179a
SP
1359
1360 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1361 do {
1362 while ((txcp = be_tx_compl_get(tx_cq))) {
1363 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1364 wrb_index, txcp);
1365 be_tx_compl_process(adapter, end_idx);
1366 cmpl++;
1367 }
1368 if (cmpl) {
1369 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1370 cmpl = 0;
1371 }
1372
1373 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1374 break;
1375
1376 mdelay(1);
1377 } while (true);
1378
1379 if (atomic_read(&txq->used))
1380 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1381 atomic_read(&txq->used));
b03388d6
SP
1382
1383 /* free posted tx for which compls will never arrive */
1384 while (atomic_read(&txq->used)) {
1385 sent_skb = sent_skbs[txq->tail];
1386 end_idx = txq->tail;
1387 index_adv(&end_idx,
1388 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1389 be_tx_compl_process(adapter, end_idx);
1390 }
6b7c5b94
SP
1391}
1392
5fb379ee
SP
1393static void be_mcc_queues_destroy(struct be_adapter *adapter)
1394{
1395 struct be_queue_info *q;
5fb379ee 1396
8788fdc2 1397 q = &adapter->mcc_obj.q;
5fb379ee 1398 if (q->created)
8788fdc2 1399 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1400 be_queue_free(adapter, q);
1401
8788fdc2 1402 q = &adapter->mcc_obj.cq;
5fb379ee 1403 if (q->created)
8788fdc2 1404 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1405 be_queue_free(adapter, q);
1406}
1407
1408/* Must be called only after TX qs are created as MCC shares TX EQ */
1409static int be_mcc_queues_create(struct be_adapter *adapter)
1410{
1411 struct be_queue_info *q, *cq;
5fb379ee
SP
1412
1413 /* Alloc MCC compl queue */
8788fdc2 1414 cq = &adapter->mcc_obj.cq;
5fb379ee 1415 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1416 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1417 goto err;
1418
1419 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1420 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1421 goto mcc_cq_free;
1422
1423 /* Alloc MCC queue */
8788fdc2 1424 q = &adapter->mcc_obj.q;
5fb379ee
SP
1425 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1426 goto mcc_cq_destroy;
1427
1428 /* Ask BE to create MCC queue */
8788fdc2 1429 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1430 goto mcc_q_free;
1431
1432 return 0;
1433
1434mcc_q_free:
1435 be_queue_free(adapter, q);
1436mcc_cq_destroy:
8788fdc2 1437 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1438mcc_cq_free:
1439 be_queue_free(adapter, cq);
1440err:
1441 return -1;
1442}
1443
6b7c5b94
SP
1444static void be_tx_queues_destroy(struct be_adapter *adapter)
1445{
1446 struct be_queue_info *q;
1447
1448 q = &adapter->tx_obj.q;
a8e9179a 1449 if (q->created)
8788fdc2 1450 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
6b7c5b94
SP
1451 be_queue_free(adapter, q);
1452
1453 q = &adapter->tx_obj.cq;
1454 if (q->created)
8788fdc2 1455 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
6b7c5b94
SP
1456 be_queue_free(adapter, q);
1457
859b1e4e
SP
1458 /* Clear any residual events */
1459 be_eq_clean(adapter, &adapter->tx_eq);
1460
6b7c5b94
SP
1461 q = &adapter->tx_eq.q;
1462 if (q->created)
8788fdc2 1463 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1464 be_queue_free(adapter, q);
1465}
1466
1467static int be_tx_queues_create(struct be_adapter *adapter)
1468{
1469 struct be_queue_info *eq, *q, *cq;
1470
1471 adapter->tx_eq.max_eqd = 0;
1472 adapter->tx_eq.min_eqd = 0;
1473 adapter->tx_eq.cur_eqd = 96;
1474 adapter->tx_eq.enable_aic = false;
1475 /* Alloc Tx Event queue */
1476 eq = &adapter->tx_eq.q;
1477 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1478 return -1;
1479
1480 /* Ask BE to create Tx Event queue */
8788fdc2 1481 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
6b7c5b94 1482 goto tx_eq_free;
ba343c77
SB
1483 adapter->base_eq_id = adapter->tx_eq.q.id;
1484
6b7c5b94
SP
1485 /* Alloc TX eth compl queue */
1486 cq = &adapter->tx_obj.cq;
1487 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1488 sizeof(struct be_eth_tx_compl)))
1489 goto tx_eq_destroy;
1490
1491 /* Ask BE to create Tx eth compl queue */
8788fdc2 1492 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
6b7c5b94
SP
1493 goto tx_cq_free;
1494
1495 /* Alloc TX eth queue */
1496 q = &adapter->tx_obj.q;
1497 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1498 goto tx_cq_destroy;
1499
1500 /* Ask BE to create Tx eth queue */
8788fdc2 1501 if (be_cmd_txq_create(adapter, q, cq))
6b7c5b94
SP
1502 goto tx_q_free;
1503 return 0;
1504
1505tx_q_free:
1506 be_queue_free(adapter, q);
1507tx_cq_destroy:
8788fdc2 1508 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
6b7c5b94
SP
1509tx_cq_free:
1510 be_queue_free(adapter, cq);
1511tx_eq_destroy:
8788fdc2 1512 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
6b7c5b94
SP
1513tx_eq_free:
1514 be_queue_free(adapter, eq);
1515 return -1;
1516}
1517
1518static void be_rx_queues_destroy(struct be_adapter *adapter)
1519{
1520 struct be_queue_info *q;
3abcdeda
SP
1521 struct be_rx_obj *rxo;
1522 int i;
1523
1524 for_all_rx_queues(adapter, rxo, i) {
1525 q = &rxo->q;
1526 if (q->created) {
1527 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1528 /* After the rxq is invalidated, wait for a grace time
1529 * of 1ms for all dma to end and the flush compl to
1530 * arrive
1531 */
1532 mdelay(1);
1533 be_rx_q_clean(adapter, rxo);
1534 }
1535 be_queue_free(adapter, q);
1536
1537 q = &rxo->cq;
1538 if (q->created)
1539 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1540 be_queue_free(adapter, q);
1541
1542 /* Clear any residual events */
1543 q = &rxo->rx_eq.q;
1544 if (q->created) {
1545 be_eq_clean(adapter, &rxo->rx_eq);
1546 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1547 }
1548 be_queue_free(adapter, q);
6b7c5b94 1549 }
6b7c5b94
SP
1550}
1551
1552static int be_rx_queues_create(struct be_adapter *adapter)
1553{
1554 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1555 struct be_rx_obj *rxo;
1556 int rc, i;
6b7c5b94 1557
6b7c5b94 1558 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1559 for_all_rx_queues(adapter, rxo, i) {
1560 rxo->adapter = adapter;
1561 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1562 rxo->rx_eq.enable_aic = true;
1563
1564 /* EQ */
1565 eq = &rxo->rx_eq.q;
1566 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1567 sizeof(struct be_eq_entry));
1568 if (rc)
1569 goto err;
1570
1571 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1572 if (rc)
1573 goto err;
1574
1575 /* CQ */
1576 cq = &rxo->cq;
1577 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1578 sizeof(struct be_eth_rx_compl));
1579 if (rc)
1580 goto err;
1581
1582 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1583 if (rc)
1584 goto err;
1585
1586 /* Rx Q */
1587 q = &rxo->q;
1588 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1589 sizeof(struct be_eth_rx_d));
1590 if (rc)
1591 goto err;
1592
1593 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1594 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1595 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1596 if (rc)
1597 goto err;
1598 }
1599
1600 if (be_multi_rxq(adapter)) {
1601 u8 rsstable[MAX_RSS_QS];
1602
1603 for_all_rss_queues(adapter, rxo, i)
1604 rsstable[i] = rxo->rss_id;
1605
1606 rc = be_cmd_rss_config(adapter, rsstable,
1607 adapter->num_rx_qs - 1);
1608 if (rc)
1609 goto err;
1610 }
6b7c5b94
SP
1611
1612 return 0;
3abcdeda
SP
1613err:
1614 be_rx_queues_destroy(adapter);
1615 return -1;
6b7c5b94 1616}
6b7c5b94 1617
b628bde2
SP
1618/* There are 8 evt ids per func. Retruns the evt id's bit number */
1619static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1620{
ba343c77 1621 return eq_id - adapter->base_eq_id;
b628bde2
SP
1622}
1623
6b7c5b94
SP
1624static irqreturn_t be_intx(int irq, void *dev)
1625{
1626 struct be_adapter *adapter = dev;
3abcdeda
SP
1627 struct be_rx_obj *rxo;
1628 int isr, i;
6b7c5b94 1629
8788fdc2 1630 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
55bdeed9 1631 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
c001c213 1632 if (!isr)
8788fdc2 1633 return IRQ_NONE;
6b7c5b94 1634
3abcdeda
SP
1635 if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
1636 event_handle(adapter, &adapter->tx_eq);
1637
1638 for_all_rx_queues(adapter, rxo, i) {
1639 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
1640 event_handle(adapter, &rxo->rx_eq);
1641 }
c001c213 1642
8788fdc2 1643 return IRQ_HANDLED;
6b7c5b94
SP
1644}
1645
1646static irqreturn_t be_msix_rx(int irq, void *dev)
1647{
3abcdeda
SP
1648 struct be_rx_obj *rxo = dev;
1649 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1650
3abcdeda 1651 event_handle(adapter, &rxo->rx_eq);
6b7c5b94
SP
1652
1653 return IRQ_HANDLED;
1654}
1655
5fb379ee 1656static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1657{
1658 struct be_adapter *adapter = dev;
1659
8788fdc2 1660 event_handle(adapter, &adapter->tx_eq);
6b7c5b94
SP
1661
1662 return IRQ_HANDLED;
1663}
1664
3abcdeda 1665static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
6b7c5b94
SP
1666 struct be_eth_rx_compl *rxcp)
1667{
1668 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1669 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1670
1671 if (err)
3abcdeda 1672 rxo->stats.rxcp_err++;
6b7c5b94 1673
5be93b9a 1674 return (tcp_frame && !err) ? true : false;
6b7c5b94
SP
1675}
1676
1677int be_poll_rx(struct napi_struct *napi, int budget)
1678{
1679 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1680 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1681 struct be_adapter *adapter = rxo->adapter;
1682 struct be_queue_info *rx_cq = &rxo->cq;
6b7c5b94
SP
1683 struct be_eth_rx_compl *rxcp;
1684 u32 work_done;
1685
3abcdeda 1686 rxo->stats.rx_polls++;
6b7c5b94 1687 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1688 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1689 if (!rxcp)
1690 break;
1691
3abcdeda
SP
1692 if (do_gro(adapter, rxo, rxcp))
1693 be_rx_compl_process_gro(adapter, rxo, rxcp);
6b7c5b94 1694 else
3abcdeda 1695 be_rx_compl_process(adapter, rxo, rxcp);
a7a0ef31
SP
1696
1697 be_rx_compl_reset(rxcp);
6b7c5b94
SP
1698 }
1699
6b7c5b94 1700 /* Refill the queue */
3abcdeda
SP
1701 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1702 be_post_rx_frags(rxo);
6b7c5b94
SP
1703
1704 /* All consumed */
1705 if (work_done < budget) {
1706 napi_complete(napi);
8788fdc2 1707 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1708 } else {
1709 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1710 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1711 }
1712 return work_done;
1713}
1714
f31e50a8
SP
1715/* As TX and MCC share the same EQ check for both TX and MCC completions.
1716 * For TX/MCC we don't honour budget; consume everything
1717 */
1718static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1719{
f31e50a8
SP
1720 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1721 struct be_adapter *adapter =
1722 container_of(tx_eq, struct be_adapter, tx_eq);
5fb379ee
SP
1723 struct be_queue_info *txq = &adapter->tx_obj.q;
1724 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
6b7c5b94 1725 struct be_eth_tx_compl *txcp;
f31e50a8 1726 int tx_compl = 0, mcc_compl, status = 0;
6b7c5b94
SP
1727 u16 end_idx;
1728
5fb379ee 1729 while ((txcp = be_tx_compl_get(tx_cq))) {
6b7c5b94 1730 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
f31e50a8 1731 wrb_index, txcp);
6b7c5b94 1732 be_tx_compl_process(adapter, end_idx);
f31e50a8 1733 tx_compl++;
6b7c5b94
SP
1734 }
1735
f31e50a8
SP
1736 mcc_compl = be_process_mcc(adapter, &status);
1737
1738 napi_complete(napi);
1739
1740 if (mcc_compl) {
1741 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1742 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1743 }
1744
1745 if (tx_compl) {
1746 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
5fb379ee
SP
1747
1748 /* As Tx wrbs have been freed up, wake up netdev queue if
1749 * it was stopped due to lack of tx wrbs.
1750 */
1751 if (netif_queue_stopped(adapter->netdev) &&
6b7c5b94 1752 atomic_read(&txq->used) < txq->len / 2) {
5fb379ee
SP
1753 netif_wake_queue(adapter->netdev);
1754 }
1755
3abcdeda
SP
1756 tx_stats(adapter)->be_tx_events++;
1757 tx_stats(adapter)->be_tx_compl += tx_compl;
6b7c5b94 1758 }
6b7c5b94
SP
1759
1760 return 1;
1761}
1762
d053de91 1763void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1764{
1765 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1766 u32 i;
1767
1768 pci_read_config_dword(adapter->pdev,
1769 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1770 pci_read_config_dword(adapter->pdev,
1771 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1772 pci_read_config_dword(adapter->pdev,
1773 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1774 pci_read_config_dword(adapter->pdev,
1775 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1776
1777 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1778 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1779
d053de91
AK
1780 if (ue_status_lo || ue_status_hi) {
1781 adapter->ue_detected = true;
1782 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1783 }
1784
7c185276
AK
1785 if (ue_status_lo) {
1786 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1787 if (ue_status_lo & 1)
1788 dev_err(&adapter->pdev->dev,
1789 "UE: %s bit set\n", ue_status_low_desc[i]);
1790 }
1791 }
1792 if (ue_status_hi) {
1793 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1794 if (ue_status_hi & 1)
1795 dev_err(&adapter->pdev->dev,
1796 "UE: %s bit set\n", ue_status_hi_desc[i]);
1797 }
1798 }
1799
1800}
1801
ea1dae11
SP
1802static void be_worker(struct work_struct *work)
1803{
1804 struct be_adapter *adapter =
1805 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
1806 struct be_rx_obj *rxo;
1807 int i;
ea1dae11 1808
0fc48c37 1809 if (!adapter->stats_ioctl_sent)
3abcdeda 1810 be_cmd_get_stats(adapter, &adapter->stats_cmd);
ea1dae11 1811
4097f663 1812 be_tx_rate_update(adapter);
4097f663 1813
3abcdeda
SP
1814 for_all_rx_queues(adapter, rxo, i) {
1815 be_rx_rate_update(rxo);
1816 be_rx_eqd_update(adapter, rxo);
1817
1818 if (rxo->rx_post_starved) {
1819 rxo->rx_post_starved = false;
1820 be_post_rx_frags(rxo);
1821 }
ea1dae11 1822 }
3abcdeda 1823
d053de91
AK
1824 if (!adapter->ue_detected)
1825 be_detect_dump_ue(adapter);
ea1dae11
SP
1826
1827 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1828}
1829
8d56ff11
SP
1830static void be_msix_disable(struct be_adapter *adapter)
1831{
1832 if (adapter->msix_enabled) {
1833 pci_disable_msix(adapter->pdev);
1834 adapter->msix_enabled = false;
1835 }
1836}
1837
3abcdeda
SP
1838static int be_num_rxqs_get(struct be_adapter *adapter)
1839{
1840 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1841 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1842 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1843 } else {
1844 dev_warn(&adapter->pdev->dev,
1845 "No support for multiple RX queues\n");
1846 return 1;
1847 }
1848}
1849
6b7c5b94
SP
1850static void be_msix_enable(struct be_adapter *adapter)
1851{
3abcdeda 1852#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
6b7c5b94
SP
1853 int i, status;
1854
3abcdeda
SP
1855 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1856
1857 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
6b7c5b94
SP
1858 adapter->msix_entries[i].entry = i;
1859
1860 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
3abcdeda
SP
1861 adapter->num_rx_qs + 1);
1862 if (status == 0) {
1863 goto done;
1864 } else if (status >= BE_MIN_MSIX_VECTORS) {
1865 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1866 status) == 0) {
1867 adapter->num_rx_qs = status - 1;
1868 dev_warn(&adapter->pdev->dev,
1869 "Could alloc only %d MSIx vectors. "
1870 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1871 goto done;
1872 }
1873 }
1874 return;
1875done:
1876 adapter->msix_enabled = true;
6b7c5b94
SP
1877}
1878
ba343c77
SB
1879static void be_sriov_enable(struct be_adapter *adapter)
1880{
344dbf10 1881 be_check_sriov_fn_type(adapter);
6dedec81 1882#ifdef CONFIG_PCI_IOV
ba343c77 1883 if (be_physfn(adapter) && num_vfs) {
6dedec81
AK
1884 int status;
1885
ba343c77
SB
1886 status = pci_enable_sriov(adapter->pdev, num_vfs);
1887 adapter->sriov_enabled = status ? false : true;
1888 }
1889#endif
ba343c77
SB
1890}
1891
1892static void be_sriov_disable(struct be_adapter *adapter)
1893{
1894#ifdef CONFIG_PCI_IOV
1895 if (adapter->sriov_enabled) {
1896 pci_disable_sriov(adapter->pdev);
1897 adapter->sriov_enabled = false;
1898 }
1899#endif
1900}
1901
6b7c5b94
SP
1902static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1903{
b628bde2
SP
1904 return adapter->msix_entries[
1905 be_evt_bit_get(adapter, eq_id)].vector;
6b7c5b94
SP
1906}
1907
b628bde2
SP
1908static int be_request_irq(struct be_adapter *adapter,
1909 struct be_eq_obj *eq_obj,
3abcdeda 1910 void *handler, char *desc, void *context)
6b7c5b94
SP
1911{
1912 struct net_device *netdev = adapter->netdev;
b628bde2
SP
1913 int vec;
1914
1915 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1916 vec = be_msix_vec_get(adapter, eq_obj->q.id);
3abcdeda 1917 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
1918}
1919
3abcdeda
SP
1920static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1921 void *context)
b628bde2
SP
1922{
1923 int vec = be_msix_vec_get(adapter, eq_obj->q.id);
3abcdeda 1924 free_irq(vec, context);
b628bde2 1925}
6b7c5b94 1926
b628bde2
SP
1927static int be_msix_register(struct be_adapter *adapter)
1928{
3abcdeda
SP
1929 struct be_rx_obj *rxo;
1930 int status, i;
1931 char qname[10];
b628bde2 1932
3abcdeda
SP
1933 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1934 adapter);
6b7c5b94
SP
1935 if (status)
1936 goto err;
1937
3abcdeda
SP
1938 for_all_rx_queues(adapter, rxo, i) {
1939 sprintf(qname, "rxq%d", i);
1940 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1941 qname, rxo);
1942 if (status)
1943 goto err_msix;
1944 }
b628bde2 1945
6b7c5b94 1946 return 0;
b628bde2 1947
3abcdeda
SP
1948err_msix:
1949 be_free_irq(adapter, &adapter->tx_eq, adapter);
1950
1951 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
1952 be_free_irq(adapter, &rxo->rx_eq, rxo);
1953
6b7c5b94
SP
1954err:
1955 dev_warn(&adapter->pdev->dev,
1956 "MSIX Request IRQ failed - err %d\n", status);
1957 pci_disable_msix(adapter->pdev);
1958 adapter->msix_enabled = false;
1959 return status;
1960}
1961
1962static int be_irq_register(struct be_adapter *adapter)
1963{
1964 struct net_device *netdev = adapter->netdev;
1965 int status;
1966
1967 if (adapter->msix_enabled) {
1968 status = be_msix_register(adapter);
1969 if (status == 0)
1970 goto done;
ba343c77
SB
1971 /* INTx is not supported for VF */
1972 if (!be_physfn(adapter))
1973 return status;
6b7c5b94
SP
1974 }
1975
1976 /* INTx */
1977 netdev->irq = adapter->pdev->irq;
1978 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1979 adapter);
1980 if (status) {
1981 dev_err(&adapter->pdev->dev,
1982 "INTx request IRQ failed - err %d\n", status);
1983 return status;
1984 }
1985done:
1986 adapter->isr_registered = true;
1987 return 0;
1988}
1989
1990static void be_irq_unregister(struct be_adapter *adapter)
1991{
1992 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
1993 struct be_rx_obj *rxo;
1994 int i;
6b7c5b94
SP
1995
1996 if (!adapter->isr_registered)
1997 return;
1998
1999 /* INTx */
2000 if (!adapter->msix_enabled) {
2001 free_irq(netdev->irq, adapter);
2002 goto done;
2003 }
2004
2005 /* MSIx */
3abcdeda
SP
2006 be_free_irq(adapter, &adapter->tx_eq, adapter);
2007
2008 for_all_rx_queues(adapter, rxo, i)
2009 be_free_irq(adapter, &rxo->rx_eq, rxo);
2010
6b7c5b94
SP
2011done:
2012 adapter->isr_registered = false;
6b7c5b94
SP
2013}
2014
889cd4b2
SP
2015static int be_close(struct net_device *netdev)
2016{
2017 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2018 struct be_rx_obj *rxo;
889cd4b2 2019 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2020 int vec, i;
889cd4b2
SP
2021
2022 cancel_delayed_work_sync(&adapter->work);
2023
2024 be_async_mcc_disable(adapter);
2025
2026 netif_stop_queue(netdev);
2027 netif_carrier_off(netdev);
2028 adapter->link_up = false;
2029
2030 be_intr_set(adapter, false);
2031
2032 if (adapter->msix_enabled) {
2033 vec = be_msix_vec_get(adapter, tx_eq->q.id);
2034 synchronize_irq(vec);
3abcdeda
SP
2035
2036 for_all_rx_queues(adapter, rxo, i) {
2037 vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
2038 synchronize_irq(vec);
2039 }
889cd4b2
SP
2040 } else {
2041 synchronize_irq(netdev->irq);
2042 }
2043 be_irq_unregister(adapter);
2044
3abcdeda
SP
2045 for_all_rx_queues(adapter, rxo, i)
2046 napi_disable(&rxo->rx_eq.napi);
2047
889cd4b2
SP
2048 napi_disable(&tx_eq->napi);
2049
2050 /* Wait for all pending tx completions to arrive so that
2051 * all tx skbs are freed.
2052 */
2053 be_tx_compl_clean(adapter);
2054
2055 return 0;
2056}
2057
6b7c5b94
SP
2058static int be_open(struct net_device *netdev)
2059{
2060 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2061 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2062 struct be_rx_obj *rxo;
a8f447bd 2063 bool link_up;
3abcdeda 2064 int status, i;
0388f251
SB
2065 u8 mac_speed;
2066 u16 link_speed;
5fb379ee 2067
3abcdeda
SP
2068 for_all_rx_queues(adapter, rxo, i) {
2069 be_post_rx_frags(rxo);
2070 napi_enable(&rxo->rx_eq.napi);
2071 }
5fb379ee
SP
2072 napi_enable(&tx_eq->napi);
2073
2074 be_irq_register(adapter);
2075
8788fdc2 2076 be_intr_set(adapter, true);
5fb379ee
SP
2077
2078 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2079 for_all_rx_queues(adapter, rxo, i) {
2080 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2081 be_cq_notify(adapter, rxo->cq.id, true, 0);
2082 }
8788fdc2 2083 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2084
7a1e9b20
SP
2085 /* Now that interrupts are on we can process async mcc */
2086 be_async_mcc_enable(adapter);
2087
889cd4b2
SP
2088 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2089
0388f251
SB
2090 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2091 &link_speed);
a8f447bd 2092 if (status)
889cd4b2 2093 goto err;
a8f447bd 2094 be_link_status_update(adapter, link_up);
5fb379ee 2095
889cd4b2 2096 if (be_physfn(adapter)) {
1da87b7f 2097 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2098 if (status)
2099 goto err;
4f2aa89c 2100
ba343c77
SB
2101 status = be_cmd_set_flow_control(adapter,
2102 adapter->tx_fc, adapter->rx_fc);
2103 if (status)
889cd4b2 2104 goto err;
ba343c77 2105 }
4f2aa89c 2106
889cd4b2
SP
2107 return 0;
2108err:
2109 be_close(adapter->netdev);
2110 return -EIO;
5fb379ee
SP
2111}
2112
71d8d1b5
AK
2113static int be_setup_wol(struct be_adapter *adapter, bool enable)
2114{
2115 struct be_dma_mem cmd;
2116 int status = 0;
2117 u8 mac[ETH_ALEN];
2118
2119 memset(mac, 0, ETH_ALEN);
2120
2121 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2122 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2123 if (cmd.va == NULL)
2124 return -1;
2125 memset(cmd.va, 0, cmd.size);
2126
2127 if (enable) {
2128 status = pci_write_config_dword(adapter->pdev,
2129 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2130 if (status) {
2131 dev_err(&adapter->pdev->dev,
2381a55c 2132 "Could not enable Wake-on-lan\n");
71d8d1b5
AK
2133 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2134 cmd.dma);
2135 return status;
2136 }
2137 status = be_cmd_enable_magic_wol(adapter,
2138 adapter->netdev->dev_addr, &cmd);
2139 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2140 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2141 } else {
2142 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2143 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2144 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2145 }
2146
2147 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2148 return status;
2149}
2150
6d87f5c3
AK
2151/*
2152 * Generate a seed MAC address from the PF MAC Address using jhash.
2153 * MAC Address for VFs are assigned incrementally starting from the seed.
2154 * These addresses are programmed in the ASIC by the PF and the VF driver
2155 * queries for the MAC address during its probe.
2156 */
2157static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2158{
2159 u32 vf = 0;
3abcdeda 2160 int status = 0;
6d87f5c3
AK
2161 u8 mac[ETH_ALEN];
2162
2163 be_vf_eth_addr_generate(adapter, mac);
2164
2165 for (vf = 0; vf < num_vfs; vf++) {
2166 status = be_cmd_pmac_add(adapter, mac,
2167 adapter->vf_cfg[vf].vf_if_handle,
2168 &adapter->vf_cfg[vf].vf_pmac_id);
2169 if (status)
2170 dev_err(&adapter->pdev->dev,
2171 "Mac address add failed for VF %d\n", vf);
2172 else
2173 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2174
2175 mac[5] += 1;
2176 }
2177 return status;
2178}
2179
2180static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2181{
2182 u32 vf;
2183
2184 for (vf = 0; vf < num_vfs; vf++) {
2185 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2186 be_cmd_pmac_del(adapter,
2187 adapter->vf_cfg[vf].vf_if_handle,
2188 adapter->vf_cfg[vf].vf_pmac_id);
2189 }
2190}
2191
5fb379ee
SP
2192static int be_setup(struct be_adapter *adapter)
2193{
5fb379ee 2194 struct net_device *netdev = adapter->netdev;
ba343c77 2195 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2196 int status;
ba343c77
SB
2197 u8 mac[ETH_ALEN];
2198
2199 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
6b7c5b94 2200
ba343c77
SB
2201 if (be_physfn(adapter)) {
2202 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2203 BE_IF_FLAGS_PROMISCUOUS |
2204 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2205 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda
SP
2206
2207 if (be_multi_rxq(adapter)) {
2208 cap_flags |= BE_IF_FLAGS_RSS;
2209 en_flags |= BE_IF_FLAGS_RSS;
2210 }
ba343c77 2211 }
73d540f2
SP
2212
2213 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2214 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2215 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2216 if (status != 0)
2217 goto do_none;
2218
ba343c77
SB
2219 if (be_physfn(adapter)) {
2220 while (vf < num_vfs) {
2221 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2222 | BE_IF_FLAGS_BROADCAST;
2223 status = be_cmd_if_create(adapter, cap_flags, en_flags,
64600ea5
AK
2224 mac, true,
2225 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77
SB
2226 NULL, vf+1);
2227 if (status) {
2228 dev_err(&adapter->pdev->dev,
2229 "Interface Create failed for VF %d\n", vf);
2230 goto if_destroy;
2231 }
64600ea5 2232 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
ba343c77 2233 vf++;
84e5b9f7 2234 }
ba343c77
SB
2235 } else if (!be_physfn(adapter)) {
2236 status = be_cmd_mac_addr_query(adapter, mac,
2237 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2238 if (!status) {
2239 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2240 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2241 }
2242 }
2243
6b7c5b94
SP
2244 status = be_tx_queues_create(adapter);
2245 if (status != 0)
2246 goto if_destroy;
2247
2248 status = be_rx_queues_create(adapter);
2249 if (status != 0)
2250 goto tx_qs_destroy;
2251
5fb379ee
SP
2252 status = be_mcc_queues_create(adapter);
2253 if (status != 0)
2254 goto rx_qs_destroy;
6b7c5b94 2255
6d87f5c3
AK
2256 if (be_physfn(adapter)) {
2257 status = be_vf_eth_addr_config(adapter);
2258 if (status)
2259 goto mcc_q_destroy;
2260 }
2261
0dffc83e
AK
2262 adapter->link_speed = -1;
2263
6b7c5b94
SP
2264 return 0;
2265
6d87f5c3
AK
2266mcc_q_destroy:
2267 if (be_physfn(adapter))
2268 be_vf_eth_addr_rem(adapter);
2269 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2270rx_qs_destroy:
2271 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2272tx_qs_destroy:
2273 be_tx_queues_destroy(adapter);
2274if_destroy:
ba343c77 2275 for (vf = 0; vf < num_vfs; vf++)
64600ea5
AK
2276 if (adapter->vf_cfg[vf].vf_if_handle)
2277 be_cmd_if_destroy(adapter,
2278 adapter->vf_cfg[vf].vf_if_handle);
8788fdc2 2279 be_cmd_if_destroy(adapter, adapter->if_handle);
6b7c5b94
SP
2280do_none:
2281 return status;
2282}
2283
5fb379ee
SP
2284static int be_clear(struct be_adapter *adapter)
2285{
6d87f5c3
AK
2286 if (be_physfn(adapter))
2287 be_vf_eth_addr_rem(adapter);
2288
1a8887d8 2289 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2290 be_rx_queues_destroy(adapter);
2291 be_tx_queues_destroy(adapter);
2292
8788fdc2 2293 be_cmd_if_destroy(adapter, adapter->if_handle);
5fb379ee 2294
2243e2e9
SP
2295 /* tell fw we're done with firing cmds */
2296 be_cmd_fw_clean(adapter);
5fb379ee
SP
2297 return 0;
2298}
2299
6b7c5b94 2300
84517482
AK
2301#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2302char flash_cookie[2][16] = {"*** SE FLAS",
2303 "H DIRECTORY *** "};
fa9a6fed
SB
2304
2305static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2306 const u8 *p, u32 img_start, int image_size,
2307 int hdr_size)
fa9a6fed
SB
2308{
2309 u32 crc_offset;
2310 u8 flashed_crc[4];
2311 int status;
3f0d4560
AK
2312
2313 crc_offset = hdr_size + img_start + image_size - 4;
2314
fa9a6fed 2315 p += crc_offset;
3f0d4560
AK
2316
2317 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2318 (image_size - 4));
fa9a6fed
SB
2319 if (status) {
2320 dev_err(&adapter->pdev->dev,
2321 "could not get crc from flash, not flashing redboot\n");
2322 return false;
2323 }
2324
2325 /*update redboot only if crc does not match*/
2326 if (!memcmp(flashed_crc, p, 4))
2327 return false;
2328 else
2329 return true;
fa9a6fed
SB
2330}
2331
3f0d4560 2332static int be_flash_data(struct be_adapter *adapter,
84517482 2333 const struct firmware *fw,
3f0d4560
AK
2334 struct be_dma_mem *flash_cmd, int num_of_images)
2335
84517482 2336{
3f0d4560
AK
2337 int status = 0, i, filehdr_size = 0;
2338 u32 total_bytes = 0, flash_op;
84517482
AK
2339 int num_bytes;
2340 const u8 *p = fw->data;
2341 struct be_cmd_write_flashrom *req = flash_cmd->va;
3f0d4560 2342 struct flash_comp *pflashcomp;
9fe96934 2343 int num_comp;
3f0d4560 2344
9fe96934 2345 struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2346 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2347 FLASH_IMAGE_MAX_SIZE_g3},
2348 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2349 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2350 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2351 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2352 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2353 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2354 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2355 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2356 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2357 FLASH_IMAGE_MAX_SIZE_g3},
2358 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2359 FLASH_IMAGE_MAX_SIZE_g3},
2360 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2361 FLASH_IMAGE_MAX_SIZE_g3},
2362 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2363 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560
AK
2364 };
2365 struct flash_comp gen2_flash_types[8] = {
2366 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2367 FLASH_IMAGE_MAX_SIZE_g2},
2368 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2369 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2370 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2371 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2372 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2373 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2374 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2375 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2376 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2377 FLASH_IMAGE_MAX_SIZE_g2},
2378 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2379 FLASH_IMAGE_MAX_SIZE_g2},
2380 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2381 FLASH_IMAGE_MAX_SIZE_g2}
2382 };
2383
2384 if (adapter->generation == BE_GEN3) {
2385 pflashcomp = gen3_flash_types;
2386 filehdr_size = sizeof(struct flash_file_hdr_g3);
9fe96934 2387 num_comp = 9;
3f0d4560
AK
2388 } else {
2389 pflashcomp = gen2_flash_types;
2390 filehdr_size = sizeof(struct flash_file_hdr_g2);
9fe96934 2391 num_comp = 8;
84517482 2392 }
9fe96934
SB
2393 for (i = 0; i < num_comp; i++) {
2394 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2395 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2396 continue;
3f0d4560
AK
2397 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2398 (!be_flash_redboot(adapter, fw->data,
2399 pflashcomp[i].offset, pflashcomp[i].size,
2400 filehdr_size)))
2401 continue;
2402 p = fw->data;
2403 p += filehdr_size + pflashcomp[i].offset
2404 + (num_of_images * sizeof(struct image_hdr));
2405 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2406 return -1;
3f0d4560
AK
2407 total_bytes = pflashcomp[i].size;
2408 while (total_bytes) {
2409 if (total_bytes > 32*1024)
2410 num_bytes = 32*1024;
2411 else
2412 num_bytes = total_bytes;
2413 total_bytes -= num_bytes;
2414
2415 if (!total_bytes)
2416 flash_op = FLASHROM_OPER_FLASH;
2417 else
2418 flash_op = FLASHROM_OPER_SAVE;
2419 memcpy(req->params.data_buf, p, num_bytes);
2420 p += num_bytes;
2421 status = be_cmd_write_flashrom(adapter, flash_cmd,
2422 pflashcomp[i].optype, flash_op, num_bytes);
2423 if (status) {
2424 dev_err(&adapter->pdev->dev,
2425 "cmd to write to flash rom failed.\n");
2426 return -1;
2427 }
2428 yield();
84517482 2429 }
84517482 2430 }
84517482
AK
2431 return 0;
2432}
2433
3f0d4560
AK
2434static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2435{
2436 if (fhdr == NULL)
2437 return 0;
2438 if (fhdr->build[0] == '3')
2439 return BE_GEN3;
2440 else if (fhdr->build[0] == '2')
2441 return BE_GEN2;
2442 else
2443 return 0;
2444}
2445
84517482
AK
2446int be_load_fw(struct be_adapter *adapter, u8 *func)
2447{
2448 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2449 const struct firmware *fw;
3f0d4560
AK
2450 struct flash_file_hdr_g2 *fhdr;
2451 struct flash_file_hdr_g3 *fhdr3;
2452 struct image_hdr *img_hdr_ptr = NULL;
84517482 2453 struct be_dma_mem flash_cmd;
8b93b710 2454 int status, i = 0, num_imgs = 0;
84517482 2455 const u8 *p;
84517482 2456
84517482
AK
2457 strcpy(fw_file, func);
2458
2459 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2460 if (status)
2461 goto fw_exit;
2462
2463 p = fw->data;
3f0d4560 2464 fhdr = (struct flash_file_hdr_g2 *) p;
84517482
AK
2465 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2466
84517482
AK
2467 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2468 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2469 &flash_cmd.dma);
2470 if (!flash_cmd.va) {
2471 status = -ENOMEM;
2472 dev_err(&adapter->pdev->dev,
2473 "Memory allocation failure while flashing\n");
2474 goto fw_exit;
2475 }
2476
3f0d4560
AK
2477 if ((adapter->generation == BE_GEN3) &&
2478 (get_ufigen_type(fhdr) == BE_GEN3)) {
2479 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2480 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2481 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2482 img_hdr_ptr = (struct image_hdr *) (fw->data +
2483 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2484 i * sizeof(struct image_hdr)));
2485 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2486 status = be_flash_data(adapter, fw, &flash_cmd,
2487 num_imgs);
3f0d4560
AK
2488 }
2489 } else if ((adapter->generation == BE_GEN2) &&
2490 (get_ufigen_type(fhdr) == BE_GEN2)) {
2491 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2492 } else {
2493 dev_err(&adapter->pdev->dev,
2494 "UFI and Interface are not compatible for flashing\n");
2495 status = -1;
84517482
AK
2496 }
2497
2498 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2499 flash_cmd.dma);
2500 if (status) {
2501 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2502 goto fw_exit;
2503 }
2504
af901ca1 2505 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482
AK
2506
2507fw_exit:
2508 release_firmware(fw);
2509 return status;
2510}
2511
6b7c5b94
SP
2512static struct net_device_ops be_netdev_ops = {
2513 .ndo_open = be_open,
2514 .ndo_stop = be_close,
2515 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2516 .ndo_set_rx_mode = be_set_multicast_list,
2517 .ndo_set_mac_address = be_mac_addr_set,
2518 .ndo_change_mtu = be_change_mtu,
2519 .ndo_validate_addr = eth_validate_addr,
2520 .ndo_vlan_rx_register = be_vlan_register,
2521 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2522 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2523 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2524 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2525 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2526 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2527};
2528
2529static void be_netdev_init(struct net_device *netdev)
2530{
2531 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2532 struct be_rx_obj *rxo;
2533 int i;
6b7c5b94
SP
2534
2535 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
583e3f34 2536 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
49e4b847 2537 NETIF_F_GRO | NETIF_F_TSO6;
6b7c5b94 2538
51c59870
AK
2539 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2540
6b7c5b94
SP
2541 netdev->flags |= IFF_MULTICAST;
2542
728a9972
AK
2543 adapter->rx_csum = true;
2544
9e90c961
AK
2545 /* Default settings for Rx and Tx flow control */
2546 adapter->rx_fc = true;
2547 adapter->tx_fc = true;
2548
c190e3c8
AK
2549 netif_set_gso_max_size(netdev, 65535);
2550
6b7c5b94
SP
2551 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2552
2553 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2554
3abcdeda
SP
2555 for_all_rx_queues(adapter, rxo, i)
2556 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2557 BE_NAPI_WEIGHT);
2558
5fb379ee 2559 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94
SP
2560 BE_NAPI_WEIGHT);
2561
2562 netif_carrier_off(netdev);
2563 netif_stop_queue(netdev);
2564}
2565
2566static void be_unmap_pci_bars(struct be_adapter *adapter)
2567{
8788fdc2
SP
2568 if (adapter->csr)
2569 iounmap(adapter->csr);
2570 if (adapter->db)
2571 iounmap(adapter->db);
ba343c77 2572 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2573 iounmap(adapter->pcicfg);
6b7c5b94
SP
2574}
2575
2576static int be_map_pci_bars(struct be_adapter *adapter)
2577{
2578 u8 __iomem *addr;
ba343c77 2579 int pcicfg_reg, db_reg;
6b7c5b94 2580
ba343c77
SB
2581 if (be_physfn(adapter)) {
2582 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2583 pci_resource_len(adapter->pdev, 2));
2584 if (addr == NULL)
2585 return -ENOMEM;
2586 adapter->csr = addr;
2587 }
6b7c5b94 2588
ba343c77 2589 if (adapter->generation == BE_GEN2) {
7b139c83 2590 pcicfg_reg = 1;
ba343c77
SB
2591 db_reg = 4;
2592 } else {
7b139c83 2593 pcicfg_reg = 0;
ba343c77
SB
2594 if (be_physfn(adapter))
2595 db_reg = 4;
2596 else
2597 db_reg = 0;
2598 }
2599 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2600 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2601 if (addr == NULL)
2602 goto pci_map_err;
ba343c77
SB
2603 adapter->db = addr;
2604
2605 if (be_physfn(adapter)) {
2606 addr = ioremap_nocache(
2607 pci_resource_start(adapter->pdev, pcicfg_reg),
2608 pci_resource_len(adapter->pdev, pcicfg_reg));
2609 if (addr == NULL)
2610 goto pci_map_err;
2611 adapter->pcicfg = addr;
2612 } else
2613 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
2614
2615 return 0;
2616pci_map_err:
2617 be_unmap_pci_bars(adapter);
2618 return -ENOMEM;
2619}
2620
2621
2622static void be_ctrl_cleanup(struct be_adapter *adapter)
2623{
8788fdc2 2624 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
2625
2626 be_unmap_pci_bars(adapter);
2627
2628 if (mem->va)
2629 pci_free_consistent(adapter->pdev, mem->size,
2630 mem->va, mem->dma);
e7b909a6
SP
2631
2632 mem = &adapter->mc_cmd_mem;
2633 if (mem->va)
2634 pci_free_consistent(adapter->pdev, mem->size,
2635 mem->va, mem->dma);
6b7c5b94
SP
2636}
2637
6b7c5b94
SP
2638static int be_ctrl_init(struct be_adapter *adapter)
2639{
8788fdc2
SP
2640 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2641 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 2642 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 2643 int status;
6b7c5b94
SP
2644
2645 status = be_map_pci_bars(adapter);
2646 if (status)
e7b909a6 2647 goto done;
6b7c5b94
SP
2648
2649 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2650 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2651 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2652 if (!mbox_mem_alloc->va) {
e7b909a6
SP
2653 status = -ENOMEM;
2654 goto unmap_pci_bars;
6b7c5b94 2655 }
e7b909a6 2656
6b7c5b94
SP
2657 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2658 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2659 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2660 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
2661
2662 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2663 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2664 &mc_cmd_mem->dma);
2665 if (mc_cmd_mem->va == NULL) {
2666 status = -ENOMEM;
2667 goto free_mbox;
2668 }
2669 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2670
8788fdc2
SP
2671 spin_lock_init(&adapter->mbox_lock);
2672 spin_lock_init(&adapter->mcc_lock);
2673 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 2674
dd131e76 2675 init_completion(&adapter->flash_compl);
cf588477 2676 pci_save_state(adapter->pdev);
6b7c5b94 2677 return 0;
e7b909a6
SP
2678
2679free_mbox:
2680 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2681 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2682
2683unmap_pci_bars:
2684 be_unmap_pci_bars(adapter);
2685
2686done:
2687 return status;
6b7c5b94
SP
2688}
2689
2690static void be_stats_cleanup(struct be_adapter *adapter)
2691{
3abcdeda 2692 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2693
2694 if (cmd->va)
2695 pci_free_consistent(adapter->pdev, cmd->size,
2696 cmd->va, cmd->dma);
2697}
2698
2699static int be_stats_init(struct be_adapter *adapter)
2700{
3abcdeda 2701 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
2702
2703 cmd->size = sizeof(struct be_cmd_req_get_stats);
2704 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2705 if (cmd->va == NULL)
2706 return -1;
d291b9af 2707 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
2708 return 0;
2709}
2710
2711static void __devexit be_remove(struct pci_dev *pdev)
2712{
2713 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 2714
6b7c5b94
SP
2715 if (!adapter)
2716 return;
2717
2718 unregister_netdev(adapter->netdev);
2719
5fb379ee
SP
2720 be_clear(adapter);
2721
6b7c5b94
SP
2722 be_stats_cleanup(adapter);
2723
2724 be_ctrl_cleanup(adapter);
2725
ba343c77
SB
2726 be_sriov_disable(adapter);
2727
8d56ff11 2728 be_msix_disable(adapter);
6b7c5b94
SP
2729
2730 pci_set_drvdata(pdev, NULL);
2731 pci_release_regions(pdev);
2732 pci_disable_device(pdev);
2733
2734 free_netdev(adapter->netdev);
2735}
2736
2243e2e9 2737static int be_get_config(struct be_adapter *adapter)
6b7c5b94 2738{
6b7c5b94 2739 int status;
2243e2e9 2740 u8 mac[ETH_ALEN];
6b7c5b94 2741
2243e2e9 2742 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
2743 if (status)
2744 return status;
2745
3abcdeda
SP
2746 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2747 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
2748 if (status)
2749 return status;
2750
2243e2e9 2751 memset(mac, 0, ETH_ALEN);
ba343c77
SB
2752
2753 if (be_physfn(adapter)) {
2754 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 2755 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 2756
ba343c77
SB
2757 if (status)
2758 return status;
ca9e4988 2759
ba343c77
SB
2760 if (!is_valid_ether_addr(mac))
2761 return -EADDRNOTAVAIL;
2762
2763 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2764 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2765 }
6b7c5b94 2766
3486be29 2767 if (adapter->function_mode & 0x400)
82903e4b
AK
2768 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2769 else
2770 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2771
2243e2e9 2772 return 0;
6b7c5b94
SP
2773}
2774
2775static int __devinit be_probe(struct pci_dev *pdev,
2776 const struct pci_device_id *pdev_id)
2777{
2778 int status = 0;
2779 struct be_adapter *adapter;
2780 struct net_device *netdev;
6b7c5b94
SP
2781
2782 status = pci_enable_device(pdev);
2783 if (status)
2784 goto do_none;
2785
2786 status = pci_request_regions(pdev, DRV_NAME);
2787 if (status)
2788 goto disable_dev;
2789 pci_set_master(pdev);
2790
2791 netdev = alloc_etherdev(sizeof(struct be_adapter));
2792 if (netdev == NULL) {
2793 status = -ENOMEM;
2794 goto rel_reg;
2795 }
2796 adapter = netdev_priv(netdev);
7b139c83
AK
2797
2798 switch (pdev->device) {
2799 case BE_DEVICE_ID1:
2800 case OC_DEVICE_ID1:
2801 adapter->generation = BE_GEN2;
2802 break;
2803 case BE_DEVICE_ID2:
2804 case OC_DEVICE_ID2:
2805 adapter->generation = BE_GEN3;
2806 break;
2807 default:
2808 adapter->generation = 0;
2809 }
2810
6b7c5b94
SP
2811 adapter->pdev = pdev;
2812 pci_set_drvdata(pdev, adapter);
2813 adapter->netdev = netdev;
2243e2e9 2814 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 2815
e930438c 2816 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
6b7c5b94
SP
2817 if (!status) {
2818 netdev->features |= NETIF_F_HIGHDMA;
2819 } else {
e930438c 2820 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6b7c5b94
SP
2821 if (status) {
2822 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2823 goto free_netdev;
2824 }
2825 }
2826
ba343c77
SB
2827 be_sriov_enable(adapter);
2828
6b7c5b94
SP
2829 status = be_ctrl_init(adapter);
2830 if (status)
2831 goto free_netdev;
2832
2243e2e9 2833 /* sync up with fw's ready state */
ba343c77
SB
2834 if (be_physfn(adapter)) {
2835 status = be_cmd_POST(adapter);
2836 if (status)
2837 goto ctrl_clean;
ba343c77 2838 }
6b7c5b94 2839
2243e2e9
SP
2840 /* tell fw we're ready to fire cmds */
2841 status = be_cmd_fw_init(adapter);
6b7c5b94 2842 if (status)
2243e2e9
SP
2843 goto ctrl_clean;
2844
556ae191
SB
2845 if (be_physfn(adapter)) {
2846 status = be_cmd_reset_function(adapter);
2847 if (status)
2848 goto ctrl_clean;
2849 }
2850
2243e2e9
SP
2851 status = be_stats_init(adapter);
2852 if (status)
2853 goto ctrl_clean;
2854
2855 status = be_get_config(adapter);
6b7c5b94
SP
2856 if (status)
2857 goto stats_clean;
6b7c5b94 2858
3abcdeda
SP
2859 be_msix_enable(adapter);
2860
6b7c5b94 2861 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 2862
5fb379ee
SP
2863 status = be_setup(adapter);
2864 if (status)
3abcdeda 2865 goto msix_disable;
2243e2e9 2866
3abcdeda 2867 be_netdev_init(netdev);
6b7c5b94
SP
2868 status = register_netdev(netdev);
2869 if (status != 0)
5fb379ee 2870 goto unsetup;
6b7c5b94 2871
c4ca2374 2872 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
6b7c5b94
SP
2873 return 0;
2874
5fb379ee
SP
2875unsetup:
2876 be_clear(adapter);
3abcdeda
SP
2877msix_disable:
2878 be_msix_disable(adapter);
6b7c5b94
SP
2879stats_clean:
2880 be_stats_cleanup(adapter);
2881ctrl_clean:
2882 be_ctrl_cleanup(adapter);
2883free_netdev:
ba343c77 2884 be_sriov_disable(adapter);
6b7c5b94 2885 free_netdev(adapter->netdev);
8d56ff11 2886 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
2887rel_reg:
2888 pci_release_regions(pdev);
2889disable_dev:
2890 pci_disable_device(pdev);
2891do_none:
c4ca2374 2892 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
2893 return status;
2894}
2895
2896static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2897{
2898 struct be_adapter *adapter = pci_get_drvdata(pdev);
2899 struct net_device *netdev = adapter->netdev;
2900
71d8d1b5
AK
2901 if (adapter->wol)
2902 be_setup_wol(adapter, true);
2903
6b7c5b94
SP
2904 netif_device_detach(netdev);
2905 if (netif_running(netdev)) {
2906 rtnl_lock();
2907 be_close(netdev);
2908 rtnl_unlock();
2909 }
9e90c961 2910 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 2911 be_clear(adapter);
6b7c5b94
SP
2912
2913 pci_save_state(pdev);
2914 pci_disable_device(pdev);
2915 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2916 return 0;
2917}
2918
2919static int be_resume(struct pci_dev *pdev)
2920{
2921 int status = 0;
2922 struct be_adapter *adapter = pci_get_drvdata(pdev);
2923 struct net_device *netdev = adapter->netdev;
2924
2925 netif_device_detach(netdev);
2926
2927 status = pci_enable_device(pdev);
2928 if (status)
2929 return status;
2930
2931 pci_set_power_state(pdev, 0);
2932 pci_restore_state(pdev);
2933
2243e2e9
SP
2934 /* tell fw we're ready to fire cmds */
2935 status = be_cmd_fw_init(adapter);
2936 if (status)
2937 return status;
2938
9b0365f1 2939 be_setup(adapter);
6b7c5b94
SP
2940 if (netif_running(netdev)) {
2941 rtnl_lock();
2942 be_open(netdev);
2943 rtnl_unlock();
2944 }
2945 netif_device_attach(netdev);
71d8d1b5
AK
2946
2947 if (adapter->wol)
2948 be_setup_wol(adapter, false);
6b7c5b94
SP
2949 return 0;
2950}
2951
82456b03
SP
2952/*
2953 * An FLR will stop BE from DMAing any data.
2954 */
2955static void be_shutdown(struct pci_dev *pdev)
2956{
2957 struct be_adapter *adapter = pci_get_drvdata(pdev);
2958 struct net_device *netdev = adapter->netdev;
2959
2960 netif_device_detach(netdev);
2961
2962 be_cmd_reset_function(adapter);
2963
2964 if (adapter->wol)
2965 be_setup_wol(adapter, true);
2966
2967 pci_disable_device(pdev);
82456b03
SP
2968}
2969
cf588477
SP
2970static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2971 pci_channel_state_t state)
2972{
2973 struct be_adapter *adapter = pci_get_drvdata(pdev);
2974 struct net_device *netdev = adapter->netdev;
2975
2976 dev_err(&adapter->pdev->dev, "EEH error detected\n");
2977
2978 adapter->eeh_err = true;
2979
2980 netif_device_detach(netdev);
2981
2982 if (netif_running(netdev)) {
2983 rtnl_lock();
2984 be_close(netdev);
2985 rtnl_unlock();
2986 }
2987 be_clear(adapter);
2988
2989 if (state == pci_channel_io_perm_failure)
2990 return PCI_ERS_RESULT_DISCONNECT;
2991
2992 pci_disable_device(pdev);
2993
2994 return PCI_ERS_RESULT_NEED_RESET;
2995}
2996
2997static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2998{
2999 struct be_adapter *adapter = pci_get_drvdata(pdev);
3000 int status;
3001
3002 dev_info(&adapter->pdev->dev, "EEH reset\n");
3003 adapter->eeh_err = false;
3004
3005 status = pci_enable_device(pdev);
3006 if (status)
3007 return PCI_ERS_RESULT_DISCONNECT;
3008
3009 pci_set_master(pdev);
3010 pci_set_power_state(pdev, 0);
3011 pci_restore_state(pdev);
3012
3013 /* Check if card is ok and fw is ready */
3014 status = be_cmd_POST(adapter);
3015 if (status)
3016 return PCI_ERS_RESULT_DISCONNECT;
3017
3018 return PCI_ERS_RESULT_RECOVERED;
3019}
3020
3021static void be_eeh_resume(struct pci_dev *pdev)
3022{
3023 int status = 0;
3024 struct be_adapter *adapter = pci_get_drvdata(pdev);
3025 struct net_device *netdev = adapter->netdev;
3026
3027 dev_info(&adapter->pdev->dev, "EEH resume\n");
3028
3029 pci_save_state(pdev);
3030
3031 /* tell fw we're ready to fire cmds */
3032 status = be_cmd_fw_init(adapter);
3033 if (status)
3034 goto err;
3035
3036 status = be_setup(adapter);
3037 if (status)
3038 goto err;
3039
3040 if (netif_running(netdev)) {
3041 status = be_open(netdev);
3042 if (status)
3043 goto err;
3044 }
3045 netif_device_attach(netdev);
3046 return;
3047err:
3048 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3049}
3050
3051static struct pci_error_handlers be_eeh_handlers = {
3052 .error_detected = be_eeh_err_detected,
3053 .slot_reset = be_eeh_reset,
3054 .resume = be_eeh_resume,
3055};
3056
6b7c5b94
SP
3057static struct pci_driver be_driver = {
3058 .name = DRV_NAME,
3059 .id_table = be_dev_ids,
3060 .probe = be_probe,
3061 .remove = be_remove,
3062 .suspend = be_suspend,
cf588477 3063 .resume = be_resume,
82456b03 3064 .shutdown = be_shutdown,
cf588477 3065 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3066};
3067
3068static int __init be_init_module(void)
3069{
8e95a202
JP
3070 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3071 rx_frag_size != 2048) {
6b7c5b94
SP
3072 printk(KERN_WARNING DRV_NAME
3073 " : Module param rx_frag_size must be 2048/4096/8192."
3074 " Using 2048\n");
3075 rx_frag_size = 2048;
3076 }
6b7c5b94 3077
ba343c77
SB
3078 if (num_vfs > 32) {
3079 printk(KERN_WARNING DRV_NAME
3080 " : Module param num_vfs must not be greater than 32."
3081 "Using 32\n");
3082 num_vfs = 32;
3083 }
3084
6b7c5b94
SP
3085 return pci_register_driver(&be_driver);
3086}
3087module_init(be_init_module);
3088
3089static void __exit be_exit_module(void)
3090{
3091 pci_unregister_driver(&be_driver);
3092}
3093module_exit(be_exit_module);