]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/benet/be_main.c
be2net: fix net-snmp error because of wrong packet stats
[net-next-2.6.git] / drivers / net / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
36         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
37         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
38         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
39         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
40         { 0 }
41 };
42 MODULE_DEVICE_TABLE(pci, be_dev_ids);
43 /* UE Status Low CSR */
44 static char *ue_status_low_desc[] = {
45         "CEV",
46         "CTX",
47         "DBUF",
48         "ERX",
49         "Host",
50         "MPU",
51         "NDMA",
52         "PTC ",
53         "RDMA ",
54         "RXF ",
55         "RXIPS ",
56         "RXULP0 ",
57         "RXULP1 ",
58         "RXULP2 ",
59         "TIM ",
60         "TPOST ",
61         "TPRE ",
62         "TXIPS ",
63         "TXULP0 ",
64         "TXULP1 ",
65         "UC ",
66         "WDMA ",
67         "TXULP2 ",
68         "HOST1 ",
69         "P0_OB_LINK ",
70         "P1_OB_LINK ",
71         "HOST_GPIO ",
72         "MBOX ",
73         "AXGMAC0",
74         "AXGMAC1",
75         "JTAG",
76         "MPU_INTPEND"
77 };
78 /* UE Status High CSR */
79 static char *ue_status_hi_desc[] = {
80         "LPCMEMHOST",
81         "MGMT_MAC",
82         "PCS0ONLINE",
83         "MPU_IRAM",
84         "PCS1ONLINE",
85         "PCTL0",
86         "PCTL1",
87         "PMEM",
88         "RR",
89         "TXPB",
90         "RXPP",
91         "XAUI",
92         "TXP",
93         "ARM",
94         "IPC",
95         "HOST2",
96         "HOST3",
97         "HOST4",
98         "HOST5",
99         "HOST6",
100         "HOST7",
101         "HOST8",
102         "HOST9",
103         "NETC"
104         "Unknown",
105         "Unknown",
106         "Unknown",
107         "Unknown",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown"
112 };
113
114 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
115 {
116         struct be_dma_mem *mem = &q->dma_mem;
117         if (mem->va)
118                 pci_free_consistent(adapter->pdev, mem->size,
119                         mem->va, mem->dma);
120 }
121
122 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
123                 u16 len, u16 entry_size)
124 {
125         struct be_dma_mem *mem = &q->dma_mem;
126
127         memset(q, 0, sizeof(*q));
128         q->len = len;
129         q->entry_size = entry_size;
130         mem->size = len * entry_size;
131         mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
132         if (!mem->va)
133                 return -1;
134         memset(mem->va, 0, mem->size);
135         return 0;
136 }
137
138 static void be_intr_set(struct be_adapter *adapter, bool enable)
139 {
140         u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
141         u32 reg = ioread32(addr);
142         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
143
144         if (adapter->eeh_err)
145                 return;
146
147         if (!enabled && enable)
148                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
149         else if (enabled && !enable)
150                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
151         else
152                 return;
153
154         iowrite32(reg, addr);
155 }
156
157 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
158 {
159         u32 val = 0;
160         val |= qid & DB_RQ_RING_ID_MASK;
161         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
162
163         wmb();
164         iowrite32(val, adapter->db + DB_RQ_OFFSET);
165 }
166
167 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
168 {
169         u32 val = 0;
170         val |= qid & DB_TXULP_RING_ID_MASK;
171         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
172
173         wmb();
174         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
175 }
176
177 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
178                 bool arm, bool clear_int, u16 num_popped)
179 {
180         u32 val = 0;
181         val |= qid & DB_EQ_RING_ID_MASK;
182
183         if (adapter->eeh_err)
184                 return;
185
186         if (arm)
187                 val |= 1 << DB_EQ_REARM_SHIFT;
188         if (clear_int)
189                 val |= 1 << DB_EQ_CLR_SHIFT;
190         val |= 1 << DB_EQ_EVNT_SHIFT;
191         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
192         iowrite32(val, adapter->db + DB_EQ_OFFSET);
193 }
194
195 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
196 {
197         u32 val = 0;
198         val |= qid & DB_CQ_RING_ID_MASK;
199
200         if (adapter->eeh_err)
201                 return;
202
203         if (arm)
204                 val |= 1 << DB_CQ_REARM_SHIFT;
205         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
206         iowrite32(val, adapter->db + DB_CQ_OFFSET);
207 }
208
209 static int be_mac_addr_set(struct net_device *netdev, void *p)
210 {
211         struct be_adapter *adapter = netdev_priv(netdev);
212         struct sockaddr *addr = p;
213         int status = 0;
214
215         if (!is_valid_ether_addr(addr->sa_data))
216                 return -EADDRNOTAVAIL;
217
218         /* MAC addr configuration will be done in hardware for VFs
219          * by their corresponding PFs. Just copy to netdev addr here
220          */
221         if (!be_physfn(adapter))
222                 goto netdev_addr;
223
224         status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
225         if (status)
226                 return status;
227
228         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
229                         adapter->if_handle, &adapter->pmac_id);
230 netdev_addr:
231         if (!status)
232                 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
233
234         return status;
235 }
236
237 void netdev_stats_update(struct be_adapter *adapter)
238 {
239         struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
240         struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
241         struct be_port_rxf_stats *port_stats =
242                         &rxf_stats->port[adapter->port_num];
243         struct net_device_stats *dev_stats = &adapter->netdev->stats;
244         struct be_erx_stats *erx_stats = &hw_stats->erx;
245
246         dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
247         dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
248         dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
249         dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
250         dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt;
251
252         /* bad pkts received */
253         dev_stats->rx_errors = port_stats->rx_crc_errors +
254                 port_stats->rx_alignment_symbol_errors +
255                 port_stats->rx_in_range_errors +
256                 port_stats->rx_out_range_errors +
257                 port_stats->rx_frame_too_long +
258                 port_stats->rx_dropped_too_small +
259                 port_stats->rx_dropped_too_short +
260                 port_stats->rx_dropped_header_too_small +
261                 port_stats->rx_dropped_tcp_length +
262                 port_stats->rx_dropped_runt +
263                 port_stats->rx_tcp_checksum_errs +
264                 port_stats->rx_ip_checksum_errs +
265                 port_stats->rx_udp_checksum_errs;
266
267         /*  no space in linux buffers: best possible approximation */
268         dev_stats->rx_dropped =
269                 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
270
271         /* detailed rx errors */
272         dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
273                 port_stats->rx_out_range_errors +
274                 port_stats->rx_frame_too_long;
275
276         /* receive ring buffer overflow */
277         dev_stats->rx_over_errors = 0;
278
279         dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
280
281         /* frame alignment errors */
282         dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
283
284         /* receiver fifo overrun */
285         /* drops_no_pbuf is no per i/f, it's per BE card */
286         dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
287                                         port_stats->rx_input_fifo_overflow +
288                                         rxf_stats->rx_drops_no_pbuf;
289         /* receiver missed packetd */
290         dev_stats->rx_missed_errors = 0;
291
292         /*  packet transmit problems */
293         dev_stats->tx_errors = 0;
294
295         /* no space available in linux */
296         dev_stats->tx_dropped = 0;
297
298         dev_stats->collisions = 0;
299
300         /* detailed tx_errors */
301         dev_stats->tx_aborted_errors = 0;
302         dev_stats->tx_carrier_errors = 0;
303         dev_stats->tx_fifo_errors = 0;
304         dev_stats->tx_heartbeat_errors = 0;
305         dev_stats->tx_window_errors = 0;
306 }
307
308 void be_link_status_update(struct be_adapter *adapter, bool link_up)
309 {
310         struct net_device *netdev = adapter->netdev;
311
312         /* If link came up or went down */
313         if (adapter->link_up != link_up) {
314                 adapter->link_speed = -1;
315                 if (link_up) {
316                         netif_start_queue(netdev);
317                         netif_carrier_on(netdev);
318                         printk(KERN_INFO "%s: Link up\n", netdev->name);
319                 } else {
320                         netif_stop_queue(netdev);
321                         netif_carrier_off(netdev);
322                         printk(KERN_INFO "%s: Link down\n", netdev->name);
323                 }
324                 adapter->link_up = link_up;
325         }
326 }
327
328 /* Update the EQ delay n BE based on the RX frags consumed / sec */
329 static void be_rx_eqd_update(struct be_adapter *adapter)
330 {
331         struct be_eq_obj *rx_eq = &adapter->rx_eq;
332         struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
333         ulong now = jiffies;
334         u32 eqd;
335
336         if (!rx_eq->enable_aic)
337                 return;
338
339         /* Wrapped around */
340         if (time_before(now, stats->rx_fps_jiffies)) {
341                 stats->rx_fps_jiffies = now;
342                 return;
343         }
344
345         /* Update once a second */
346         if ((now - stats->rx_fps_jiffies) < HZ)
347                 return;
348
349         stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
350                         ((now - stats->rx_fps_jiffies) / HZ);
351
352         stats->rx_fps_jiffies = now;
353         stats->be_prev_rx_frags = stats->be_rx_frags;
354         eqd = stats->be_rx_fps / 110000;
355         eqd = eqd << 3;
356         if (eqd > rx_eq->max_eqd)
357                 eqd = rx_eq->max_eqd;
358         if (eqd < rx_eq->min_eqd)
359                 eqd = rx_eq->min_eqd;
360         if (eqd < 10)
361                 eqd = 0;
362         if (eqd != rx_eq->cur_eqd)
363                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
364
365         rx_eq->cur_eqd = eqd;
366 }
367
368 static struct net_device_stats *be_get_stats(struct net_device *dev)
369 {
370         return &dev->stats;
371 }
372
373 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
374 {
375         u64 rate = bytes;
376
377         do_div(rate, ticks / HZ);
378         rate <<= 3;                     /* bytes/sec -> bits/sec */
379         do_div(rate, 1000000ul);        /* MB/Sec */
380
381         return rate;
382 }
383
384 static void be_tx_rate_update(struct be_adapter *adapter)
385 {
386         struct be_drvr_stats *stats = drvr_stats(adapter);
387         ulong now = jiffies;
388
389         /* Wrapped around? */
390         if (time_before(now, stats->be_tx_jiffies)) {
391                 stats->be_tx_jiffies = now;
392                 return;
393         }
394
395         /* Update tx rate once in two seconds */
396         if ((now - stats->be_tx_jiffies) > 2 * HZ) {
397                 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
398                                                   - stats->be_tx_bytes_prev,
399                                                  now - stats->be_tx_jiffies);
400                 stats->be_tx_jiffies = now;
401                 stats->be_tx_bytes_prev = stats->be_tx_bytes;
402         }
403 }
404
405 static void be_tx_stats_update(struct be_adapter *adapter,
406                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
407 {
408         struct be_drvr_stats *stats = drvr_stats(adapter);
409         stats->be_tx_reqs++;
410         stats->be_tx_wrbs += wrb_cnt;
411         stats->be_tx_bytes += copied;
412         stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
413         if (stopped)
414                 stats->be_tx_stops++;
415 }
416
417 /* Determine number of WRB entries needed to xmit data in an skb */
418 static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
419 {
420         int cnt = (skb->len > skb->data_len);
421
422         cnt += skb_shinfo(skb)->nr_frags;
423
424         /* to account for hdr wrb */
425         cnt++;
426         if (cnt & 1) {
427                 /* add a dummy to make it an even num */
428                 cnt++;
429                 *dummy = true;
430         } else
431                 *dummy = false;
432         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
433         return cnt;
434 }
435
436 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
437 {
438         wrb->frag_pa_hi = upper_32_bits(addr);
439         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
440         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
441 }
442
443 static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
444                 bool vlan, u32 wrb_cnt, u32 len)
445 {
446         memset(hdr, 0, sizeof(*hdr));
447
448         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
449
450         if (skb_is_gso(skb)) {
451                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
452                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
453                         hdr, skb_shinfo(skb)->gso_size);
454                 if (skb_is_gso_v6(skb))
455                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
456         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
457                 if (is_tcp_pkt(skb))
458                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
459                 else if (is_udp_pkt(skb))
460                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
461         }
462
463         if (vlan && vlan_tx_tag_present(skb)) {
464                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
465                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
466                         hdr, vlan_tx_tag_get(skb));
467         }
468
469         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
470         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
471         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
472         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
473 }
474
475 static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
476                 bool unmap_single)
477 {
478         dma_addr_t dma;
479
480         be_dws_le_to_cpu(wrb, sizeof(*wrb));
481
482         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
483         if (wrb->frag_len) {
484                 if (unmap_single)
485                         pci_unmap_single(pdev, dma, wrb->frag_len,
486                                 PCI_DMA_TODEVICE);
487                 else
488                         pci_unmap_page(pdev, dma, wrb->frag_len,
489                                 PCI_DMA_TODEVICE);
490         }
491 }
492
493 static int make_tx_wrbs(struct be_adapter *adapter,
494                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
495 {
496         dma_addr_t busaddr;
497         int i, copied = 0;
498         struct pci_dev *pdev = adapter->pdev;
499         struct sk_buff *first_skb = skb;
500         struct be_queue_info *txq = &adapter->tx_obj.q;
501         struct be_eth_wrb *wrb;
502         struct be_eth_hdr_wrb *hdr;
503         bool map_single = false;
504         u16 map_head;
505
506         hdr = queue_head_node(txq);
507         queue_head_inc(txq);
508         map_head = txq->head;
509
510         if (skb->len > skb->data_len) {
511                 int len = skb_headlen(skb);
512                 busaddr = pci_map_single(pdev, skb->data, len,
513                                          PCI_DMA_TODEVICE);
514                 if (pci_dma_mapping_error(pdev, busaddr))
515                         goto dma_err;
516                 map_single = true;
517                 wrb = queue_head_node(txq);
518                 wrb_fill(wrb, busaddr, len);
519                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
520                 queue_head_inc(txq);
521                 copied += len;
522         }
523
524         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
525                 struct skb_frag_struct *frag =
526                         &skb_shinfo(skb)->frags[i];
527                 busaddr = pci_map_page(pdev, frag->page,
528                                        frag->page_offset,
529                                        frag->size, PCI_DMA_TODEVICE);
530                 if (pci_dma_mapping_error(pdev, busaddr))
531                         goto dma_err;
532                 wrb = queue_head_node(txq);
533                 wrb_fill(wrb, busaddr, frag->size);
534                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
535                 queue_head_inc(txq);
536                 copied += frag->size;
537         }
538
539         if (dummy_wrb) {
540                 wrb = queue_head_node(txq);
541                 wrb_fill(wrb, 0, 0);
542                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
543                 queue_head_inc(txq);
544         }
545
546         wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
547                 wrb_cnt, copied);
548         be_dws_cpu_to_le(hdr, sizeof(*hdr));
549
550         return copied;
551 dma_err:
552         txq->head = map_head;
553         while (copied) {
554                 wrb = queue_head_node(txq);
555                 unmap_tx_frag(pdev, wrb, map_single);
556                 map_single = false;
557                 copied -= wrb->frag_len;
558                 queue_head_inc(txq);
559         }
560         return 0;
561 }
562
563 static netdev_tx_t be_xmit(struct sk_buff *skb,
564                         struct net_device *netdev)
565 {
566         struct be_adapter *adapter = netdev_priv(netdev);
567         struct be_tx_obj *tx_obj = &adapter->tx_obj;
568         struct be_queue_info *txq = &tx_obj->q;
569         u32 wrb_cnt = 0, copied = 0;
570         u32 start = txq->head;
571         bool dummy_wrb, stopped = false;
572
573         wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
574
575         copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
576         if (copied) {
577                 /* record the sent skb in the sent_skb table */
578                 BUG_ON(tx_obj->sent_skb_list[start]);
579                 tx_obj->sent_skb_list[start] = skb;
580
581                 /* Ensure txq has space for the next skb; Else stop the queue
582                  * *BEFORE* ringing the tx doorbell, so that we serialze the
583                  * tx compls of the current transmit which'll wake up the queue
584                  */
585                 atomic_add(wrb_cnt, &txq->used);
586                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
587                                                                 txq->len) {
588                         netif_stop_queue(netdev);
589                         stopped = true;
590                 }
591
592                 be_txq_notify(adapter, txq->id, wrb_cnt);
593
594                 be_tx_stats_update(adapter, wrb_cnt, copied,
595                                 skb_shinfo(skb)->gso_segs, stopped);
596         } else {
597                 txq->head = start;
598                 dev_kfree_skb_any(skb);
599         }
600         return NETDEV_TX_OK;
601 }
602
603 static int be_change_mtu(struct net_device *netdev, int new_mtu)
604 {
605         struct be_adapter *adapter = netdev_priv(netdev);
606         if (new_mtu < BE_MIN_MTU ||
607                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
608                                         (ETH_HLEN + ETH_FCS_LEN))) {
609                 dev_info(&adapter->pdev->dev,
610                         "MTU must be between %d and %d bytes\n",
611                         BE_MIN_MTU,
612                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
613                 return -EINVAL;
614         }
615         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
616                         netdev->mtu, new_mtu);
617         netdev->mtu = new_mtu;
618         return 0;
619 }
620
621 /*
622  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
623  * If the user configures more, place BE in vlan promiscuous mode.
624  */
625 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
626 {
627         u16 vtag[BE_NUM_VLANS_SUPPORTED];
628         u16 ntags = 0, i;
629         int status = 0;
630         u32 if_handle;
631
632         if (vf) {
633                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
634                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
635                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
636         }
637
638         if (adapter->vlans_added <= adapter->max_vlans)  {
639                 /* Construct VLAN Table to give to HW */
640                 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
641                         if (adapter->vlan_tag[i]) {
642                                 vtag[ntags] = cpu_to_le16(i);
643                                 ntags++;
644                         }
645                 }
646                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
647                                         vtag, ntags, 1, 0);
648         } else {
649                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
650                                         NULL, 0, 1, 1);
651         }
652
653         return status;
654 }
655
656 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
657 {
658         struct be_adapter *adapter = netdev_priv(netdev);
659         struct be_eq_obj *rx_eq = &adapter->rx_eq;
660         struct be_eq_obj *tx_eq = &adapter->tx_eq;
661
662         be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
663         be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
664         adapter->vlan_grp = grp;
665         be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
666         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
667 }
668
669 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
670 {
671         struct be_adapter *adapter = netdev_priv(netdev);
672
673         adapter->vlans_added++;
674         if (!be_physfn(adapter))
675                 return;
676
677         adapter->vlan_tag[vid] = 1;
678         if (adapter->vlans_added <= (adapter->max_vlans + 1))
679                 be_vid_config(adapter, false, 0);
680 }
681
682 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
683 {
684         struct be_adapter *adapter = netdev_priv(netdev);
685
686         adapter->vlans_added--;
687         vlan_group_set_device(adapter->vlan_grp, vid, NULL);
688
689         if (!be_physfn(adapter))
690                 return;
691
692         adapter->vlan_tag[vid] = 0;
693         if (adapter->vlans_added <= adapter->max_vlans)
694                 be_vid_config(adapter, false, 0);
695 }
696
697 static void be_set_multicast_list(struct net_device *netdev)
698 {
699         struct be_adapter *adapter = netdev_priv(netdev);
700
701         if (netdev->flags & IFF_PROMISC) {
702                 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
703                 adapter->promiscuous = true;
704                 goto done;
705         }
706
707         /* BE was previously in promiscous mode; disable it */
708         if (adapter->promiscuous) {
709                 adapter->promiscuous = false;
710                 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
711         }
712
713         /* Enable multicast promisc if num configured exceeds what we support */
714         if (netdev->flags & IFF_ALLMULTI ||
715             netdev_mc_count(netdev) > BE_MAX_MC) {
716                 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
717                                 &adapter->mc_cmd_mem);
718                 goto done;
719         }
720
721         be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
722                 &adapter->mc_cmd_mem);
723 done:
724         return;
725 }
726
727 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
728 {
729         struct be_adapter *adapter = netdev_priv(netdev);
730         int status;
731
732         if (!adapter->sriov_enabled)
733                 return -EPERM;
734
735         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
736                 return -EINVAL;
737
738         if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
739                 status = be_cmd_pmac_del(adapter,
740                                         adapter->vf_cfg[vf].vf_if_handle,
741                                         adapter->vf_cfg[vf].vf_pmac_id);
742
743         status = be_cmd_pmac_add(adapter, mac,
744                                 adapter->vf_cfg[vf].vf_if_handle,
745                                 &adapter->vf_cfg[vf].vf_pmac_id);
746
747         if (status)
748                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
749                                 mac, vf);
750         else
751                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
752
753         return status;
754 }
755
756 static int be_get_vf_config(struct net_device *netdev, int vf,
757                         struct ifla_vf_info *vi)
758 {
759         struct be_adapter *adapter = netdev_priv(netdev);
760
761         if (!adapter->sriov_enabled)
762                 return -EPERM;
763
764         if (vf >= num_vfs)
765                 return -EINVAL;
766
767         vi->vf = vf;
768         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
769         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
770         vi->qos = 0;
771         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
772
773         return 0;
774 }
775
776 static int be_set_vf_vlan(struct net_device *netdev,
777                         int vf, u16 vlan, u8 qos)
778 {
779         struct be_adapter *adapter = netdev_priv(netdev);
780         int status = 0;
781
782         if (!adapter->sriov_enabled)
783                 return -EPERM;
784
785         if ((vf >= num_vfs) || (vlan > 4095))
786                 return -EINVAL;
787
788         if (vlan) {
789                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
790                 adapter->vlans_added++;
791         } else {
792                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
793                 adapter->vlans_added--;
794         }
795
796         status = be_vid_config(adapter, true, vf);
797
798         if (status)
799                 dev_info(&adapter->pdev->dev,
800                                 "VLAN %d config on VF %d failed\n", vlan, vf);
801         return status;
802 }
803
804 static int be_set_vf_tx_rate(struct net_device *netdev,
805                         int vf, int rate)
806 {
807         struct be_adapter *adapter = netdev_priv(netdev);
808         int status = 0;
809
810         if (!adapter->sriov_enabled)
811                 return -EPERM;
812
813         if ((vf >= num_vfs) || (rate < 0))
814                 return -EINVAL;
815
816         if (rate > 10000)
817                 rate = 10000;
818
819         adapter->vf_cfg[vf].vf_tx_rate = rate;
820         status = be_cmd_set_qos(adapter, rate / 10, vf);
821
822         if (status)
823                 dev_info(&adapter->pdev->dev,
824                                 "tx rate %d on VF %d failed\n", rate, vf);
825         return status;
826 }
827
828 static void be_rx_rate_update(struct be_adapter *adapter)
829 {
830         struct be_drvr_stats *stats = drvr_stats(adapter);
831         ulong now = jiffies;
832
833         /* Wrapped around */
834         if (time_before(now, stats->be_rx_jiffies)) {
835                 stats->be_rx_jiffies = now;
836                 return;
837         }
838
839         /* Update the rate once in two seconds */
840         if ((now - stats->be_rx_jiffies) < 2 * HZ)
841                 return;
842
843         stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
844                                           - stats->be_rx_bytes_prev,
845                                          now - stats->be_rx_jiffies);
846         stats->be_rx_jiffies = now;
847         stats->be_rx_bytes_prev = stats->be_rx_bytes;
848 }
849
850 static void be_rx_stats_update(struct be_adapter *adapter,
851                 u32 pktsize, u16 numfrags, u8 pkt_type)
852 {
853         struct be_drvr_stats *stats = drvr_stats(adapter);
854
855         stats->be_rx_compl++;
856         stats->be_rx_frags += numfrags;
857         stats->be_rx_bytes += pktsize;
858         stats->be_rx_pkts++;
859
860         if (pkt_type == BE_MULTICAST_PACKET)
861                 stats->be_rx_mcast_pkt++;
862 }
863
864 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
865 {
866         u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
867
868         l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
869         ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
870         ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
871         if (ip_version) {
872                 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
873                 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
874         }
875         ipv6_chk = (ip_version && (tcpf || udpf));
876
877         return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
878 }
879
880 static struct be_rx_page_info *
881 get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
882 {
883         struct be_rx_page_info *rx_page_info;
884         struct be_queue_info *rxq = &adapter->rx_obj.q;
885
886         rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
887         BUG_ON(!rx_page_info->page);
888
889         if (rx_page_info->last_page_user) {
890                 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
891                         adapter->big_page_size, PCI_DMA_FROMDEVICE);
892                 rx_page_info->last_page_user = false;
893         }
894
895         atomic_dec(&rxq->used);
896         return rx_page_info;
897 }
898
899 /* Throwaway the data in the Rx completion */
900 static void be_rx_compl_discard(struct be_adapter *adapter,
901                         struct be_eth_rx_compl *rxcp)
902 {
903         struct be_queue_info *rxq = &adapter->rx_obj.q;
904         struct be_rx_page_info *page_info;
905         u16 rxq_idx, i, num_rcvd;
906
907         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
908         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
909
910         for (i = 0; i < num_rcvd; i++) {
911                 page_info = get_rx_page_info(adapter, rxq_idx);
912                 put_page(page_info->page);
913                 memset(page_info, 0, sizeof(*page_info));
914                 index_inc(&rxq_idx, rxq->len);
915         }
916 }
917
918 /*
919  * skb_fill_rx_data forms a complete skb for an ether frame
920  * indicated by rxcp.
921  */
922 static void skb_fill_rx_data(struct be_adapter *adapter,
923                         struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
924                         u16 num_rcvd)
925 {
926         struct be_queue_info *rxq = &adapter->rx_obj.q;
927         struct be_rx_page_info *page_info;
928         u16 rxq_idx, i, j;
929         u32 pktsize, hdr_len, curr_frag_len, size;
930         u8 *start;
931         u8 pkt_type;
932
933         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
934         pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
935         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
936
937         page_info = get_rx_page_info(adapter, rxq_idx);
938
939         start = page_address(page_info->page) + page_info->page_offset;
940         prefetch(start);
941
942         /* Copy data in the first descriptor of this completion */
943         curr_frag_len = min(pktsize, rx_frag_size);
944
945         /* Copy the header portion into skb_data */
946         hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
947         memcpy(skb->data, start, hdr_len);
948         skb->len = curr_frag_len;
949         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
950                 /* Complete packet has now been moved to data */
951                 put_page(page_info->page);
952                 skb->data_len = 0;
953                 skb->tail += curr_frag_len;
954         } else {
955                 skb_shinfo(skb)->nr_frags = 1;
956                 skb_shinfo(skb)->frags[0].page = page_info->page;
957                 skb_shinfo(skb)->frags[0].page_offset =
958                                         page_info->page_offset + hdr_len;
959                 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
960                 skb->data_len = curr_frag_len - hdr_len;
961                 skb->tail += hdr_len;
962         }
963         page_info->page = NULL;
964
965         if (pktsize <= rx_frag_size) {
966                 BUG_ON(num_rcvd != 1);
967                 goto done;
968         }
969
970         /* More frags present for this completion */
971         size = pktsize;
972         for (i = 1, j = 0; i < num_rcvd; i++) {
973                 size -= curr_frag_len;
974                 index_inc(&rxq_idx, rxq->len);
975                 page_info = get_rx_page_info(adapter, rxq_idx);
976
977                 curr_frag_len = min(size, rx_frag_size);
978
979                 /* Coalesce all frags from the same physical page in one slot */
980                 if (page_info->page_offset == 0) {
981                         /* Fresh page */
982                         j++;
983                         skb_shinfo(skb)->frags[j].page = page_info->page;
984                         skb_shinfo(skb)->frags[j].page_offset =
985                                                         page_info->page_offset;
986                         skb_shinfo(skb)->frags[j].size = 0;
987                         skb_shinfo(skb)->nr_frags++;
988                 } else {
989                         put_page(page_info->page);
990                 }
991
992                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
993                 skb->len += curr_frag_len;
994                 skb->data_len += curr_frag_len;
995
996                 page_info->page = NULL;
997         }
998         BUG_ON(j > MAX_SKB_FRAGS);
999
1000 done:
1001         be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type);
1002 }
1003
1004 /* Process the RX completion indicated by rxcp when GRO is disabled */
1005 static void be_rx_compl_process(struct be_adapter *adapter,
1006                         struct be_eth_rx_compl *rxcp)
1007 {
1008         struct sk_buff *skb;
1009         u32 vlanf, vid;
1010         u16 num_rcvd;
1011         u8 vtm;
1012
1013         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1014         /* Is it a flush compl that has no data */
1015         if (unlikely(num_rcvd == 0))
1016                 return;
1017
1018         skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1019         if (unlikely(!skb)) {
1020                 if (net_ratelimit())
1021                         dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1022                 be_rx_compl_discard(adapter, rxcp);
1023                 return;
1024         }
1025
1026         skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
1027
1028         if (do_pkt_csum(rxcp, adapter->rx_csum))
1029                 skb->ip_summed = CHECKSUM_NONE;
1030         else
1031                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1032
1033         skb->truesize = skb->len + sizeof(struct sk_buff);
1034         skb->protocol = eth_type_trans(skb, adapter->netdev);
1035
1036         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1037         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1038
1039         /* vlanf could be wrongly set in some cards.
1040          * ignore if vtm is not set */
1041         if ((adapter->function_mode & 0x400) && !vtm)
1042                 vlanf = 0;
1043
1044         if (unlikely(vlanf)) {
1045                 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1046                         kfree_skb(skb);
1047                         return;
1048                 }
1049                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1050                 vid = swab16(vid);
1051                 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1052         } else {
1053                 netif_receive_skb(skb);
1054         }
1055 }
1056
1057 /* Process the RX completion indicated by rxcp when GRO is enabled */
1058 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1059                         struct be_eth_rx_compl *rxcp)
1060 {
1061         struct be_rx_page_info *page_info;
1062         struct sk_buff *skb = NULL;
1063         struct be_queue_info *rxq = &adapter->rx_obj.q;
1064         struct be_eq_obj *eq_obj =  &adapter->rx_eq;
1065         u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1066         u16 i, rxq_idx = 0, vid, j;
1067         u8 vtm;
1068         u8 pkt_type;
1069
1070         num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1071         /* Is it a flush compl that has no data */
1072         if (unlikely(num_rcvd == 0))
1073                 return;
1074
1075         pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1076         vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1077         rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1078         vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1079         pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1080
1081         /* vlanf could be wrongly set in some cards.
1082          * ignore if vtm is not set */
1083         if ((adapter->function_mode & 0x400) && !vtm)
1084                 vlanf = 0;
1085
1086         skb = napi_get_frags(&eq_obj->napi);
1087         if (!skb) {
1088                 be_rx_compl_discard(adapter, rxcp);
1089                 return;
1090         }
1091
1092         remaining = pkt_size;
1093         for (i = 0, j = -1; i < num_rcvd; i++) {
1094                 page_info = get_rx_page_info(adapter, rxq_idx);
1095
1096                 curr_frag_len = min(remaining, rx_frag_size);
1097
1098                 /* Coalesce all frags from the same physical page in one slot */
1099                 if (i == 0 || page_info->page_offset == 0) {
1100                         /* First frag or Fresh page */
1101                         j++;
1102                         skb_shinfo(skb)->frags[j].page = page_info->page;
1103                         skb_shinfo(skb)->frags[j].page_offset =
1104                                                         page_info->page_offset;
1105                         skb_shinfo(skb)->frags[j].size = 0;
1106                 } else {
1107                         put_page(page_info->page);
1108                 }
1109                 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1110
1111                 remaining -= curr_frag_len;
1112                 index_inc(&rxq_idx, rxq->len);
1113                 memset(page_info, 0, sizeof(*page_info));
1114         }
1115         BUG_ON(j > MAX_SKB_FRAGS);
1116
1117         skb_shinfo(skb)->nr_frags = j + 1;
1118         skb->len = pkt_size;
1119         skb->data_len = pkt_size;
1120         skb->truesize += pkt_size;
1121         skb->ip_summed = CHECKSUM_UNNECESSARY;
1122
1123         if (likely(!vlanf)) {
1124                 napi_gro_frags(&eq_obj->napi);
1125         } else {
1126                 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1127                 vid = swab16(vid);
1128
1129                 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1130                         return;
1131
1132                 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1133         }
1134
1135         be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type);
1136 }
1137
1138 static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
1139 {
1140         struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
1141
1142         if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1143                 return NULL;
1144
1145         rmb();
1146         be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1147
1148         queue_tail_inc(&adapter->rx_obj.cq);
1149         return rxcp;
1150 }
1151
1152 /* To reset the valid bit, we need to reset the whole word as
1153  * when walking the queue the valid entries are little-endian
1154  * and invalid entries are host endian
1155  */
1156 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1157 {
1158         rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1159 }
1160
1161 static inline struct page *be_alloc_pages(u32 size)
1162 {
1163         gfp_t alloc_flags = GFP_ATOMIC;
1164         u32 order = get_order(size);
1165         if (order > 0)
1166                 alloc_flags |= __GFP_COMP;
1167         return  alloc_pages(alloc_flags, order);
1168 }
1169
1170 /*
1171  * Allocate a page, split it to fragments of size rx_frag_size and post as
1172  * receive buffers to BE
1173  */
1174 static void be_post_rx_frags(struct be_adapter *adapter)
1175 {
1176         struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
1177         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1178         struct be_queue_info *rxq = &adapter->rx_obj.q;
1179         struct page *pagep = NULL;
1180         struct be_eth_rx_d *rxd;
1181         u64 page_dmaaddr = 0, frag_dmaaddr;
1182         u32 posted, page_offset = 0;
1183
1184         page_info = &page_info_tbl[rxq->head];
1185         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1186                 if (!pagep) {
1187                         pagep = be_alloc_pages(adapter->big_page_size);
1188                         if (unlikely(!pagep)) {
1189                                 drvr_stats(adapter)->be_ethrx_post_fail++;
1190                                 break;
1191                         }
1192                         page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1193                                                 adapter->big_page_size,
1194                                                 PCI_DMA_FROMDEVICE);
1195                         page_info->page_offset = 0;
1196                 } else {
1197                         get_page(pagep);
1198                         page_info->page_offset = page_offset + rx_frag_size;
1199                 }
1200                 page_offset = page_info->page_offset;
1201                 page_info->page = pagep;
1202                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1203                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1204
1205                 rxd = queue_head_node(rxq);
1206                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1207                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1208
1209                 /* Any space left in the current big page for another frag? */
1210                 if ((page_offset + rx_frag_size + rx_frag_size) >
1211                                         adapter->big_page_size) {
1212                         pagep = NULL;
1213                         page_info->last_page_user = true;
1214                 }
1215
1216                 prev_page_info = page_info;
1217                 queue_head_inc(rxq);
1218                 page_info = &page_info_tbl[rxq->head];
1219         }
1220         if (pagep)
1221                 prev_page_info->last_page_user = true;
1222
1223         if (posted) {
1224                 atomic_add(posted, &rxq->used);
1225                 be_rxq_notify(adapter, rxq->id, posted);
1226         } else if (atomic_read(&rxq->used) == 0) {
1227                 /* Let be_worker replenish when memory is available */
1228                 adapter->rx_post_starved = true;
1229         }
1230 }
1231
1232 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1233 {
1234         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1235
1236         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1237                 return NULL;
1238
1239         rmb();
1240         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1241
1242         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1243
1244         queue_tail_inc(tx_cq);
1245         return txcp;
1246 }
1247
1248 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1249 {
1250         struct be_queue_info *txq = &adapter->tx_obj.q;
1251         struct be_eth_wrb *wrb;
1252         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1253         struct sk_buff *sent_skb;
1254         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1255         bool unmap_skb_hdr = true;
1256
1257         sent_skb = sent_skbs[txq->tail];
1258         BUG_ON(!sent_skb);
1259         sent_skbs[txq->tail] = NULL;
1260
1261         /* skip header wrb */
1262         queue_tail_inc(txq);
1263
1264         do {
1265                 cur_index = txq->tail;
1266                 wrb = queue_tail_node(txq);
1267                 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1268                                         skb_headlen(sent_skb)));
1269                 unmap_skb_hdr = false;
1270
1271                 num_wrbs++;
1272                 queue_tail_inc(txq);
1273         } while (cur_index != last_index);
1274
1275         atomic_sub(num_wrbs, &txq->used);
1276
1277         kfree_skb(sent_skb);
1278 }
1279
1280 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1281 {
1282         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1283
1284         if (!eqe->evt)
1285                 return NULL;
1286
1287         rmb();
1288         eqe->evt = le32_to_cpu(eqe->evt);
1289         queue_tail_inc(&eq_obj->q);
1290         return eqe;
1291 }
1292
1293 static int event_handle(struct be_adapter *adapter,
1294                         struct be_eq_obj *eq_obj)
1295 {
1296         struct be_eq_entry *eqe;
1297         u16 num = 0;
1298
1299         while ((eqe = event_get(eq_obj)) != NULL) {
1300                 eqe->evt = 0;
1301                 num++;
1302         }
1303
1304         /* Deal with any spurious interrupts that come
1305          * without events
1306          */
1307         be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1308         if (num)
1309                 napi_schedule(&eq_obj->napi);
1310
1311         return num;
1312 }
1313
1314 /* Just read and notify events without processing them.
1315  * Used at the time of destroying event queues */
1316 static void be_eq_clean(struct be_adapter *adapter,
1317                         struct be_eq_obj *eq_obj)
1318 {
1319         struct be_eq_entry *eqe;
1320         u16 num = 0;
1321
1322         while ((eqe = event_get(eq_obj)) != NULL) {
1323                 eqe->evt = 0;
1324                 num++;
1325         }
1326
1327         if (num)
1328                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1329 }
1330
1331 static void be_rx_q_clean(struct be_adapter *adapter)
1332 {
1333         struct be_rx_page_info *page_info;
1334         struct be_queue_info *rxq = &adapter->rx_obj.q;
1335         struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1336         struct be_eth_rx_compl *rxcp;
1337         u16 tail;
1338
1339         /* First cleanup pending rx completions */
1340         while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1341                 be_rx_compl_discard(adapter, rxcp);
1342                 be_rx_compl_reset(rxcp);
1343                 be_cq_notify(adapter, rx_cq->id, true, 1);
1344         }
1345
1346         /* Then free posted rx buffer that were not used */
1347         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1348         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1349                 page_info = get_rx_page_info(adapter, tail);
1350                 put_page(page_info->page);
1351                 memset(page_info, 0, sizeof(*page_info));
1352         }
1353         BUG_ON(atomic_read(&rxq->used));
1354 }
1355
1356 static void be_tx_compl_clean(struct be_adapter *adapter)
1357 {
1358         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1359         struct be_queue_info *txq = &adapter->tx_obj.q;
1360         struct be_eth_tx_compl *txcp;
1361         u16 end_idx, cmpl = 0, timeo = 0;
1362         struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1363         struct sk_buff *sent_skb;
1364         bool dummy_wrb;
1365
1366         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1367         do {
1368                 while ((txcp = be_tx_compl_get(tx_cq))) {
1369                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1370                                         wrb_index, txcp);
1371                         be_tx_compl_process(adapter, end_idx);
1372                         cmpl++;
1373                 }
1374                 if (cmpl) {
1375                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1376                         cmpl = 0;
1377                 }
1378
1379                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1380                         break;
1381
1382                 mdelay(1);
1383         } while (true);
1384
1385         if (atomic_read(&txq->used))
1386                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1387                         atomic_read(&txq->used));
1388
1389         /* free posted tx for which compls will never arrive */
1390         while (atomic_read(&txq->used)) {
1391                 sent_skb = sent_skbs[txq->tail];
1392                 end_idx = txq->tail;
1393                 index_adv(&end_idx,
1394                         wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1395                 be_tx_compl_process(adapter, end_idx);
1396         }
1397 }
1398
1399 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1400 {
1401         struct be_queue_info *q;
1402
1403         q = &adapter->mcc_obj.q;
1404         if (q->created)
1405                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1406         be_queue_free(adapter, q);
1407
1408         q = &adapter->mcc_obj.cq;
1409         if (q->created)
1410                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1411         be_queue_free(adapter, q);
1412 }
1413
1414 /* Must be called only after TX qs are created as MCC shares TX EQ */
1415 static int be_mcc_queues_create(struct be_adapter *adapter)
1416 {
1417         struct be_queue_info *q, *cq;
1418
1419         /* Alloc MCC compl queue */
1420         cq = &adapter->mcc_obj.cq;
1421         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1422                         sizeof(struct be_mcc_compl)))
1423                 goto err;
1424
1425         /* Ask BE to create MCC compl queue; share TX's eq */
1426         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1427                 goto mcc_cq_free;
1428
1429         /* Alloc MCC queue */
1430         q = &adapter->mcc_obj.q;
1431         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1432                 goto mcc_cq_destroy;
1433
1434         /* Ask BE to create MCC queue */
1435         if (be_cmd_mccq_create(adapter, q, cq))
1436                 goto mcc_q_free;
1437
1438         return 0;
1439
1440 mcc_q_free:
1441         be_queue_free(adapter, q);
1442 mcc_cq_destroy:
1443         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1444 mcc_cq_free:
1445         be_queue_free(adapter, cq);
1446 err:
1447         return -1;
1448 }
1449
1450 static void be_tx_queues_destroy(struct be_adapter *adapter)
1451 {
1452         struct be_queue_info *q;
1453
1454         q = &adapter->tx_obj.q;
1455         if (q->created)
1456                 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1457         be_queue_free(adapter, q);
1458
1459         q = &adapter->tx_obj.cq;
1460         if (q->created)
1461                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1462         be_queue_free(adapter, q);
1463
1464         /* Clear any residual events */
1465         be_eq_clean(adapter, &adapter->tx_eq);
1466
1467         q = &adapter->tx_eq.q;
1468         if (q->created)
1469                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1470         be_queue_free(adapter, q);
1471 }
1472
1473 static int be_tx_queues_create(struct be_adapter *adapter)
1474 {
1475         struct be_queue_info *eq, *q, *cq;
1476
1477         adapter->tx_eq.max_eqd = 0;
1478         adapter->tx_eq.min_eqd = 0;
1479         adapter->tx_eq.cur_eqd = 96;
1480         adapter->tx_eq.enable_aic = false;
1481         /* Alloc Tx Event queue */
1482         eq = &adapter->tx_eq.q;
1483         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1484                 return -1;
1485
1486         /* Ask BE to create Tx Event queue */
1487         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1488                 goto tx_eq_free;
1489         adapter->base_eq_id = adapter->tx_eq.q.id;
1490
1491         /* Alloc TX eth compl queue */
1492         cq = &adapter->tx_obj.cq;
1493         if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1494                         sizeof(struct be_eth_tx_compl)))
1495                 goto tx_eq_destroy;
1496
1497         /* Ask BE to create Tx eth compl queue */
1498         if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1499                 goto tx_cq_free;
1500
1501         /* Alloc TX eth queue */
1502         q = &adapter->tx_obj.q;
1503         if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1504                 goto tx_cq_destroy;
1505
1506         /* Ask BE to create Tx eth queue */
1507         if (be_cmd_txq_create(adapter, q, cq))
1508                 goto tx_q_free;
1509         return 0;
1510
1511 tx_q_free:
1512         be_queue_free(adapter, q);
1513 tx_cq_destroy:
1514         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1515 tx_cq_free:
1516         be_queue_free(adapter, cq);
1517 tx_eq_destroy:
1518         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1519 tx_eq_free:
1520         be_queue_free(adapter, eq);
1521         return -1;
1522 }
1523
1524 static void be_rx_queues_destroy(struct be_adapter *adapter)
1525 {
1526         struct be_queue_info *q;
1527
1528         q = &adapter->rx_obj.q;
1529         if (q->created) {
1530                 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1531
1532                 /* After the rxq is invalidated, wait for a grace time
1533                  * of 1ms for all dma to end and the flush compl to arrive
1534                  */
1535                 mdelay(1);
1536                 be_rx_q_clean(adapter);
1537         }
1538         be_queue_free(adapter, q);
1539
1540         q = &adapter->rx_obj.cq;
1541         if (q->created)
1542                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1543         be_queue_free(adapter, q);
1544
1545         /* Clear any residual events */
1546         be_eq_clean(adapter, &adapter->rx_eq);
1547
1548         q = &adapter->rx_eq.q;
1549         if (q->created)
1550                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1551         be_queue_free(adapter, q);
1552 }
1553
1554 static int be_rx_queues_create(struct be_adapter *adapter)
1555 {
1556         struct be_queue_info *eq, *q, *cq;
1557         int rc;
1558
1559         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1560         adapter->rx_eq.max_eqd = BE_MAX_EQD;
1561         adapter->rx_eq.min_eqd = 0;
1562         adapter->rx_eq.cur_eqd = 0;
1563         adapter->rx_eq.enable_aic = true;
1564
1565         /* Alloc Rx Event queue */
1566         eq = &adapter->rx_eq.q;
1567         rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1568                                 sizeof(struct be_eq_entry));
1569         if (rc)
1570                 return rc;
1571
1572         /* Ask BE to create Rx Event queue */
1573         rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
1574         if (rc)
1575                 goto rx_eq_free;
1576
1577         /* Alloc RX eth compl queue */
1578         cq = &adapter->rx_obj.cq;
1579         rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1580                         sizeof(struct be_eth_rx_compl));
1581         if (rc)
1582                 goto rx_eq_destroy;
1583
1584         /* Ask BE to create Rx eth compl queue */
1585         rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1586         if (rc)
1587                 goto rx_cq_free;
1588
1589         /* Alloc RX eth queue */
1590         q = &adapter->rx_obj.q;
1591         rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1592         if (rc)
1593                 goto rx_cq_destroy;
1594
1595         /* Ask BE to create Rx eth queue */
1596         rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1597                 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1598         if (rc)
1599                 goto rx_q_free;
1600
1601         return 0;
1602 rx_q_free:
1603         be_queue_free(adapter, q);
1604 rx_cq_destroy:
1605         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1606 rx_cq_free:
1607         be_queue_free(adapter, cq);
1608 rx_eq_destroy:
1609         be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1610 rx_eq_free:
1611         be_queue_free(adapter, eq);
1612         return rc;
1613 }
1614
1615 /* There are 8 evt ids per func. Retruns the evt id's bit number */
1616 static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1617 {
1618         return eq_id - adapter->base_eq_id;
1619 }
1620
1621 static irqreturn_t be_intx(int irq, void *dev)
1622 {
1623         struct be_adapter *adapter = dev;
1624         int isr;
1625
1626         isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1627                 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1628         if (!isr)
1629                 return IRQ_NONE;
1630
1631         event_handle(adapter, &adapter->tx_eq);
1632         event_handle(adapter, &adapter->rx_eq);
1633
1634         return IRQ_HANDLED;
1635 }
1636
1637 static irqreturn_t be_msix_rx(int irq, void *dev)
1638 {
1639         struct be_adapter *adapter = dev;
1640
1641         event_handle(adapter, &adapter->rx_eq);
1642
1643         return IRQ_HANDLED;
1644 }
1645
1646 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1647 {
1648         struct be_adapter *adapter = dev;
1649
1650         event_handle(adapter, &adapter->tx_eq);
1651
1652         return IRQ_HANDLED;
1653 }
1654
1655 static inline bool do_gro(struct be_adapter *adapter,
1656                         struct be_eth_rx_compl *rxcp)
1657 {
1658         int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1659         int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1660
1661         if (err)
1662                 drvr_stats(adapter)->be_rxcp_err++;
1663
1664         return (tcp_frame && !err) ? true : false;
1665 }
1666
1667 int be_poll_rx(struct napi_struct *napi, int budget)
1668 {
1669         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1670         struct be_adapter *adapter =
1671                 container_of(rx_eq, struct be_adapter, rx_eq);
1672         struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1673         struct be_eth_rx_compl *rxcp;
1674         u32 work_done;
1675
1676         adapter->stats.drvr_stats.be_rx_polls++;
1677         for (work_done = 0; work_done < budget; work_done++) {
1678                 rxcp = be_rx_compl_get(adapter);
1679                 if (!rxcp)
1680                         break;
1681
1682                 if (do_gro(adapter, rxcp))
1683                         be_rx_compl_process_gro(adapter, rxcp);
1684                 else
1685                         be_rx_compl_process(adapter, rxcp);
1686
1687                 be_rx_compl_reset(rxcp);
1688         }
1689
1690         /* Refill the queue */
1691         if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1692                 be_post_rx_frags(adapter);
1693
1694         /* All consumed */
1695         if (work_done < budget) {
1696                 napi_complete(napi);
1697                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1698         } else {
1699                 /* More to be consumed; continue with interrupts disabled */
1700                 be_cq_notify(adapter, rx_cq->id, false, work_done);
1701         }
1702         return work_done;
1703 }
1704
1705 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1706  * For TX/MCC we don't honour budget; consume everything
1707  */
1708 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1709 {
1710         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1711         struct be_adapter *adapter =
1712                 container_of(tx_eq, struct be_adapter, tx_eq);
1713         struct be_queue_info *txq = &adapter->tx_obj.q;
1714         struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1715         struct be_eth_tx_compl *txcp;
1716         int tx_compl = 0, mcc_compl, status = 0;
1717         u16 end_idx;
1718
1719         while ((txcp = be_tx_compl_get(tx_cq))) {
1720                 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1721                                 wrb_index, txcp);
1722                 be_tx_compl_process(adapter, end_idx);
1723                 tx_compl++;
1724         }
1725
1726         mcc_compl = be_process_mcc(adapter, &status);
1727
1728         napi_complete(napi);
1729
1730         if (mcc_compl) {
1731                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1732                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1733         }
1734
1735         if (tx_compl) {
1736                 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1737
1738                 /* As Tx wrbs have been freed up, wake up netdev queue if
1739                  * it was stopped due to lack of tx wrbs.
1740                  */
1741                 if (netif_queue_stopped(adapter->netdev) &&
1742                         atomic_read(&txq->used) < txq->len / 2) {
1743                         netif_wake_queue(adapter->netdev);
1744                 }
1745
1746                 drvr_stats(adapter)->be_tx_events++;
1747                 drvr_stats(adapter)->be_tx_compl += tx_compl;
1748         }
1749
1750         return 1;
1751 }
1752
1753 static inline bool be_detect_ue(struct be_adapter *adapter)
1754 {
1755         u32 online0 = 0, online1 = 0;
1756
1757         pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
1758
1759         pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
1760
1761         if (!online0 || !online1) {
1762                 adapter->ue_detected = true;
1763                 dev_err(&adapter->pdev->dev,
1764                         "UE Detected!! online0=%d online1=%d\n",
1765                         online0, online1);
1766                 return true;
1767         }
1768
1769         return false;
1770 }
1771
1772 void be_dump_ue(struct be_adapter *adapter)
1773 {
1774         u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1775         u32 i;
1776
1777         pci_read_config_dword(adapter->pdev,
1778                                 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1779         pci_read_config_dword(adapter->pdev,
1780                                 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1781         pci_read_config_dword(adapter->pdev,
1782                                 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1783         pci_read_config_dword(adapter->pdev,
1784                                 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1785
1786         ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1787         ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1788
1789         if (ue_status_lo) {
1790                 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1791                         if (ue_status_lo & 1)
1792                                 dev_err(&adapter->pdev->dev,
1793                                 "UE: %s bit set\n", ue_status_low_desc[i]);
1794                 }
1795         }
1796         if (ue_status_hi) {
1797                 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1798                         if (ue_status_hi & 1)
1799                                 dev_err(&adapter->pdev->dev,
1800                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
1801                 }
1802         }
1803
1804 }
1805
1806 static void be_worker(struct work_struct *work)
1807 {
1808         struct be_adapter *adapter =
1809                 container_of(work, struct be_adapter, work.work);
1810
1811         if (!adapter->stats_ioctl_sent)
1812                 be_cmd_get_stats(adapter, &adapter->stats.cmd);
1813
1814         /* Set EQ delay */
1815         be_rx_eqd_update(adapter);
1816
1817         be_tx_rate_update(adapter);
1818         be_rx_rate_update(adapter);
1819
1820         if (adapter->rx_post_starved) {
1821                 adapter->rx_post_starved = false;
1822                 be_post_rx_frags(adapter);
1823         }
1824         if (!adapter->ue_detected) {
1825                 if (be_detect_ue(adapter))
1826                         be_dump_ue(adapter);
1827         }
1828
1829         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1830 }
1831
1832 static void be_msix_disable(struct be_adapter *adapter)
1833 {
1834         if (adapter->msix_enabled) {
1835                 pci_disable_msix(adapter->pdev);
1836                 adapter->msix_enabled = false;
1837         }
1838 }
1839
1840 static void be_msix_enable(struct be_adapter *adapter)
1841 {
1842         int i, status;
1843
1844         for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1845                 adapter->msix_entries[i].entry = i;
1846
1847         status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1848                 BE_NUM_MSIX_VECTORS);
1849         if (status == 0)
1850                 adapter->msix_enabled = true;
1851 }
1852
1853 static void be_sriov_enable(struct be_adapter *adapter)
1854 {
1855         be_check_sriov_fn_type(adapter);
1856 #ifdef CONFIG_PCI_IOV
1857         if (be_physfn(adapter) && num_vfs) {
1858                 int status;
1859
1860                 status = pci_enable_sriov(adapter->pdev, num_vfs);
1861                 adapter->sriov_enabled = status ? false : true;
1862         }
1863 #endif
1864 }
1865
1866 static void be_sriov_disable(struct be_adapter *adapter)
1867 {
1868 #ifdef CONFIG_PCI_IOV
1869         if (adapter->sriov_enabled) {
1870                 pci_disable_sriov(adapter->pdev);
1871                 adapter->sriov_enabled = false;
1872         }
1873 #endif
1874 }
1875
1876 static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1877 {
1878         return adapter->msix_entries[
1879                         be_evt_bit_get(adapter, eq_id)].vector;
1880 }
1881
1882 static int be_request_irq(struct be_adapter *adapter,
1883                 struct be_eq_obj *eq_obj,
1884                 void *handler, char *desc)
1885 {
1886         struct net_device *netdev = adapter->netdev;
1887         int vec;
1888
1889         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1890         vec = be_msix_vec_get(adapter, eq_obj->q.id);
1891         return request_irq(vec, handler, 0, eq_obj->desc, adapter);
1892 }
1893
1894 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
1895 {
1896         int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1897         free_irq(vec, adapter);
1898 }
1899
1900 static int be_msix_register(struct be_adapter *adapter)
1901 {
1902         int status;
1903
1904         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
1905         if (status)
1906                 goto err;
1907
1908         status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
1909         if (status)
1910                 goto free_tx_irq;
1911
1912         return 0;
1913
1914 free_tx_irq:
1915         be_free_irq(adapter, &adapter->tx_eq);
1916 err:
1917         dev_warn(&adapter->pdev->dev,
1918                 "MSIX Request IRQ failed - err %d\n", status);
1919         pci_disable_msix(adapter->pdev);
1920         adapter->msix_enabled = false;
1921         return status;
1922 }
1923
1924 static int be_irq_register(struct be_adapter *adapter)
1925 {
1926         struct net_device *netdev = adapter->netdev;
1927         int status;
1928
1929         if (adapter->msix_enabled) {
1930                 status = be_msix_register(adapter);
1931                 if (status == 0)
1932                         goto done;
1933                 /* INTx is not supported for VF */
1934                 if (!be_physfn(adapter))
1935                         return status;
1936         }
1937
1938         /* INTx */
1939         netdev->irq = adapter->pdev->irq;
1940         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1941                         adapter);
1942         if (status) {
1943                 dev_err(&adapter->pdev->dev,
1944                         "INTx request IRQ failed - err %d\n", status);
1945                 return status;
1946         }
1947 done:
1948         adapter->isr_registered = true;
1949         return 0;
1950 }
1951
1952 static void be_irq_unregister(struct be_adapter *adapter)
1953 {
1954         struct net_device *netdev = adapter->netdev;
1955
1956         if (!adapter->isr_registered)
1957                 return;
1958
1959         /* INTx */
1960         if (!adapter->msix_enabled) {
1961                 free_irq(netdev->irq, adapter);
1962                 goto done;
1963         }
1964
1965         /* MSIx */
1966         be_free_irq(adapter, &adapter->tx_eq);
1967         be_free_irq(adapter, &adapter->rx_eq);
1968 done:
1969         adapter->isr_registered = false;
1970 }
1971
1972 static int be_close(struct net_device *netdev)
1973 {
1974         struct be_adapter *adapter = netdev_priv(netdev);
1975         struct be_eq_obj *rx_eq = &adapter->rx_eq;
1976         struct be_eq_obj *tx_eq = &adapter->tx_eq;
1977         int vec;
1978
1979         cancel_delayed_work_sync(&adapter->work);
1980
1981         be_async_mcc_disable(adapter);
1982
1983         netif_stop_queue(netdev);
1984         netif_carrier_off(netdev);
1985         adapter->link_up = false;
1986
1987         be_intr_set(adapter, false);
1988
1989         if (adapter->msix_enabled) {
1990                 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1991                 synchronize_irq(vec);
1992                 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1993                 synchronize_irq(vec);
1994         } else {
1995                 synchronize_irq(netdev->irq);
1996         }
1997         be_irq_unregister(adapter);
1998
1999         napi_disable(&rx_eq->napi);
2000         napi_disable(&tx_eq->napi);
2001
2002         /* Wait for all pending tx completions to arrive so that
2003          * all tx skbs are freed.
2004          */
2005         be_tx_compl_clean(adapter);
2006
2007         return 0;
2008 }
2009
2010 static int be_open(struct net_device *netdev)
2011 {
2012         struct be_adapter *adapter = netdev_priv(netdev);
2013         struct be_eq_obj *rx_eq = &adapter->rx_eq;
2014         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2015         bool link_up;
2016         int status;
2017         u8 mac_speed;
2018         u16 link_speed;
2019
2020         /* First time posting */
2021         be_post_rx_frags(adapter);
2022
2023         napi_enable(&rx_eq->napi);
2024         napi_enable(&tx_eq->napi);
2025
2026         be_irq_register(adapter);
2027
2028         be_intr_set(adapter, true);
2029
2030         /* The evt queues are created in unarmed state; arm them */
2031         be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
2032         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2033
2034         /* Rx compl queue may be in unarmed state; rearm it */
2035         be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
2036
2037         /* Now that interrupts are on we can process async mcc */
2038         be_async_mcc_enable(adapter);
2039
2040         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2041
2042         status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2043                         &link_speed);
2044         if (status)
2045                 goto err;
2046         be_link_status_update(adapter, link_up);
2047
2048         if (be_physfn(adapter)) {
2049                 status = be_vid_config(adapter, false, 0);
2050                 if (status)
2051                         goto err;
2052
2053                 status = be_cmd_set_flow_control(adapter,
2054                                 adapter->tx_fc, adapter->rx_fc);
2055                 if (status)
2056                         goto err;
2057         }
2058
2059         return 0;
2060 err:
2061         be_close(adapter->netdev);
2062         return -EIO;
2063 }
2064
2065 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2066 {
2067         struct be_dma_mem cmd;
2068         int status = 0;
2069         u8 mac[ETH_ALEN];
2070
2071         memset(mac, 0, ETH_ALEN);
2072
2073         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2074         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2075         if (cmd.va == NULL)
2076                 return -1;
2077         memset(cmd.va, 0, cmd.size);
2078
2079         if (enable) {
2080                 status = pci_write_config_dword(adapter->pdev,
2081                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2082                 if (status) {
2083                         dev_err(&adapter->pdev->dev,
2084                                 "Could not enable Wake-on-lan\n");
2085                         pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2086                                         cmd.dma);
2087                         return status;
2088                 }
2089                 status = be_cmd_enable_magic_wol(adapter,
2090                                 adapter->netdev->dev_addr, &cmd);
2091                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2092                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2093         } else {
2094                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2095                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2096                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2097         }
2098
2099         pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2100         return status;
2101 }
2102
2103 static int be_setup(struct be_adapter *adapter)
2104 {
2105         struct net_device *netdev = adapter->netdev;
2106         u32 cap_flags, en_flags, vf = 0;
2107         int status;
2108         u8 mac[ETH_ALEN];
2109
2110         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2111
2112         if (be_physfn(adapter)) {
2113                 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2114                                 BE_IF_FLAGS_PROMISCUOUS |
2115                                 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2116                 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2117         }
2118
2119         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2120                         netdev->dev_addr, false/* pmac_invalid */,
2121                         &adapter->if_handle, &adapter->pmac_id, 0);
2122         if (status != 0)
2123                 goto do_none;
2124
2125         if (be_physfn(adapter)) {
2126                 while (vf < num_vfs) {
2127                         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2128                                         | BE_IF_FLAGS_BROADCAST;
2129                         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2130                                         mac, true,
2131                                         &adapter->vf_cfg[vf].vf_if_handle,
2132                                         NULL, vf+1);
2133                         if (status) {
2134                                 dev_err(&adapter->pdev->dev,
2135                                 "Interface Create failed for VF %d\n", vf);
2136                                 goto if_destroy;
2137                         }
2138                         adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2139                         vf++;
2140                 }
2141         } else if (!be_physfn(adapter)) {
2142                 status = be_cmd_mac_addr_query(adapter, mac,
2143                         MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2144                 if (!status) {
2145                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2146                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2147                 }
2148         }
2149
2150         status = be_tx_queues_create(adapter);
2151         if (status != 0)
2152                 goto if_destroy;
2153
2154         status = be_rx_queues_create(adapter);
2155         if (status != 0)
2156                 goto tx_qs_destroy;
2157
2158         status = be_mcc_queues_create(adapter);
2159         if (status != 0)
2160                 goto rx_qs_destroy;
2161
2162         adapter->link_speed = -1;
2163
2164         return 0;
2165
2166 rx_qs_destroy:
2167         be_rx_queues_destroy(adapter);
2168 tx_qs_destroy:
2169         be_tx_queues_destroy(adapter);
2170 if_destroy:
2171         for (vf = 0; vf < num_vfs; vf++)
2172                 if (adapter->vf_cfg[vf].vf_if_handle)
2173                         be_cmd_if_destroy(adapter,
2174                                         adapter->vf_cfg[vf].vf_if_handle);
2175         be_cmd_if_destroy(adapter, adapter->if_handle);
2176 do_none:
2177         return status;
2178 }
2179
2180 static int be_clear(struct be_adapter *adapter)
2181 {
2182         be_mcc_queues_destroy(adapter);
2183         be_rx_queues_destroy(adapter);
2184         be_tx_queues_destroy(adapter);
2185
2186         be_cmd_if_destroy(adapter, adapter->if_handle);
2187
2188         /* tell fw we're done with firing cmds */
2189         be_cmd_fw_clean(adapter);
2190         return 0;
2191 }
2192
2193
2194 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2195 char flash_cookie[2][16] =      {"*** SE FLAS",
2196                                 "H DIRECTORY *** "};
2197
2198 static bool be_flash_redboot(struct be_adapter *adapter,
2199                         const u8 *p, u32 img_start, int image_size,
2200                         int hdr_size)
2201 {
2202         u32 crc_offset;
2203         u8 flashed_crc[4];
2204         int status;
2205
2206         crc_offset = hdr_size + img_start + image_size - 4;
2207
2208         p += crc_offset;
2209
2210         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2211                         (image_size - 4));
2212         if (status) {
2213                 dev_err(&adapter->pdev->dev,
2214                 "could not get crc from flash, not flashing redboot\n");
2215                 return false;
2216         }
2217
2218         /*update redboot only if crc does not match*/
2219         if (!memcmp(flashed_crc, p, 4))
2220                 return false;
2221         else
2222                 return true;
2223 }
2224
2225 static int be_flash_data(struct be_adapter *adapter,
2226                         const struct firmware *fw,
2227                         struct be_dma_mem *flash_cmd, int num_of_images)
2228
2229 {
2230         int status = 0, i, filehdr_size = 0;
2231         u32 total_bytes = 0, flash_op;
2232         int num_bytes;
2233         const u8 *p = fw->data;
2234         struct be_cmd_write_flashrom *req = flash_cmd->va;
2235         struct flash_comp *pflashcomp;
2236         int num_comp;
2237
2238         struct flash_comp gen3_flash_types[9] = {
2239                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2240                         FLASH_IMAGE_MAX_SIZE_g3},
2241                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2242                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2243                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2244                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2245                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2246                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2247                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2248                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2249                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2250                         FLASH_IMAGE_MAX_SIZE_g3},
2251                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2252                         FLASH_IMAGE_MAX_SIZE_g3},
2253                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2254                         FLASH_IMAGE_MAX_SIZE_g3},
2255                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2256                         FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2257         };
2258         struct flash_comp gen2_flash_types[8] = {
2259                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2260                         FLASH_IMAGE_MAX_SIZE_g2},
2261                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2262                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2263                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2264                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2265                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2266                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2267                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2268                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2269                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2270                         FLASH_IMAGE_MAX_SIZE_g2},
2271                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2272                         FLASH_IMAGE_MAX_SIZE_g2},
2273                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2274                          FLASH_IMAGE_MAX_SIZE_g2}
2275         };
2276
2277         if (adapter->generation == BE_GEN3) {
2278                 pflashcomp = gen3_flash_types;
2279                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2280                 num_comp = 9;
2281         } else {
2282                 pflashcomp = gen2_flash_types;
2283                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2284                 num_comp = 8;
2285         }
2286         for (i = 0; i < num_comp; i++) {
2287                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2288                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2289                         continue;
2290                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2291                         (!be_flash_redboot(adapter, fw->data,
2292                          pflashcomp[i].offset, pflashcomp[i].size,
2293                          filehdr_size)))
2294                         continue;
2295                 p = fw->data;
2296                 p += filehdr_size + pflashcomp[i].offset
2297                         + (num_of_images * sizeof(struct image_hdr));
2298         if (p + pflashcomp[i].size > fw->data + fw->size)
2299                 return -1;
2300         total_bytes = pflashcomp[i].size;
2301                 while (total_bytes) {
2302                         if (total_bytes > 32*1024)
2303                                 num_bytes = 32*1024;
2304                         else
2305                                 num_bytes = total_bytes;
2306                         total_bytes -= num_bytes;
2307
2308                         if (!total_bytes)
2309                                 flash_op = FLASHROM_OPER_FLASH;
2310                         else
2311                                 flash_op = FLASHROM_OPER_SAVE;
2312                         memcpy(req->params.data_buf, p, num_bytes);
2313                         p += num_bytes;
2314                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2315                                 pflashcomp[i].optype, flash_op, num_bytes);
2316                         if (status) {
2317                                 dev_err(&adapter->pdev->dev,
2318                                         "cmd to write to flash rom failed.\n");
2319                                 return -1;
2320                         }
2321                         yield();
2322                 }
2323         }
2324         return 0;
2325 }
2326
2327 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2328 {
2329         if (fhdr == NULL)
2330                 return 0;
2331         if (fhdr->build[0] == '3')
2332                 return BE_GEN3;
2333         else if (fhdr->build[0] == '2')
2334                 return BE_GEN2;
2335         else
2336                 return 0;
2337 }
2338
2339 int be_load_fw(struct be_adapter *adapter, u8 *func)
2340 {
2341         char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2342         const struct firmware *fw;
2343         struct flash_file_hdr_g2 *fhdr;
2344         struct flash_file_hdr_g3 *fhdr3;
2345         struct image_hdr *img_hdr_ptr = NULL;
2346         struct be_dma_mem flash_cmd;
2347         int status, i = 0, num_imgs = 0;
2348         const u8 *p;
2349
2350         strcpy(fw_file, func);
2351
2352         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2353         if (status)
2354                 goto fw_exit;
2355
2356         p = fw->data;
2357         fhdr = (struct flash_file_hdr_g2 *) p;
2358         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2359
2360         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2361         flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2362                                         &flash_cmd.dma);
2363         if (!flash_cmd.va) {
2364                 status = -ENOMEM;
2365                 dev_err(&adapter->pdev->dev,
2366                         "Memory allocation failure while flashing\n");
2367                 goto fw_exit;
2368         }
2369
2370         if ((adapter->generation == BE_GEN3) &&
2371                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2372                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2373                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2374                 for (i = 0; i < num_imgs; i++) {
2375                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2376                                         (sizeof(struct flash_file_hdr_g3) +
2377                                          i * sizeof(struct image_hdr)));
2378                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2379                                 status = be_flash_data(adapter, fw, &flash_cmd,
2380                                                         num_imgs);
2381                 }
2382         } else if ((adapter->generation == BE_GEN2) &&
2383                         (get_ufigen_type(fhdr) == BE_GEN2)) {
2384                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2385         } else {
2386                 dev_err(&adapter->pdev->dev,
2387                         "UFI and Interface are not compatible for flashing\n");
2388                 status = -1;
2389         }
2390
2391         pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2392                                 flash_cmd.dma);
2393         if (status) {
2394                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2395                 goto fw_exit;
2396         }
2397
2398         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2399
2400 fw_exit:
2401         release_firmware(fw);
2402         return status;
2403 }
2404
2405 static struct net_device_ops be_netdev_ops = {
2406         .ndo_open               = be_open,
2407         .ndo_stop               = be_close,
2408         .ndo_start_xmit         = be_xmit,
2409         .ndo_get_stats          = be_get_stats,
2410         .ndo_set_rx_mode        = be_set_multicast_list,
2411         .ndo_set_mac_address    = be_mac_addr_set,
2412         .ndo_change_mtu         = be_change_mtu,
2413         .ndo_validate_addr      = eth_validate_addr,
2414         .ndo_vlan_rx_register   = be_vlan_register,
2415         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
2416         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
2417         .ndo_set_vf_mac         = be_set_vf_mac,
2418         .ndo_set_vf_vlan        = be_set_vf_vlan,
2419         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
2420         .ndo_get_vf_config      = be_get_vf_config
2421 };
2422
2423 static void be_netdev_init(struct net_device *netdev)
2424 {
2425         struct be_adapter *adapter = netdev_priv(netdev);
2426
2427         netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2428                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
2429                 NETIF_F_GRO | NETIF_F_TSO6;
2430
2431         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2432
2433         netdev->flags |= IFF_MULTICAST;
2434
2435         adapter->rx_csum = true;
2436
2437         /* Default settings for Rx and Tx flow control */
2438         adapter->rx_fc = true;
2439         adapter->tx_fc = true;
2440
2441         netif_set_gso_max_size(netdev, 65535);
2442
2443         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2444
2445         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2446
2447         netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
2448                 BE_NAPI_WEIGHT);
2449         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2450                 BE_NAPI_WEIGHT);
2451
2452         netif_carrier_off(netdev);
2453         netif_stop_queue(netdev);
2454 }
2455
2456 static void be_unmap_pci_bars(struct be_adapter *adapter)
2457 {
2458         if (adapter->csr)
2459                 iounmap(adapter->csr);
2460         if (adapter->db)
2461                 iounmap(adapter->db);
2462         if (adapter->pcicfg && be_physfn(adapter))
2463                 iounmap(adapter->pcicfg);
2464 }
2465
2466 static int be_map_pci_bars(struct be_adapter *adapter)
2467 {
2468         u8 __iomem *addr;
2469         int pcicfg_reg, db_reg;
2470
2471         if (be_physfn(adapter)) {
2472                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2473                                 pci_resource_len(adapter->pdev, 2));
2474                 if (addr == NULL)
2475                         return -ENOMEM;
2476                 adapter->csr = addr;
2477         }
2478
2479         if (adapter->generation == BE_GEN2) {
2480                 pcicfg_reg = 1;
2481                 db_reg = 4;
2482         } else {
2483                 pcicfg_reg = 0;
2484                 if (be_physfn(adapter))
2485                         db_reg = 4;
2486                 else
2487                         db_reg = 0;
2488         }
2489         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2490                                 pci_resource_len(adapter->pdev, db_reg));
2491         if (addr == NULL)
2492                 goto pci_map_err;
2493         adapter->db = addr;
2494
2495         if (be_physfn(adapter)) {
2496                 addr = ioremap_nocache(
2497                                 pci_resource_start(adapter->pdev, pcicfg_reg),
2498                                 pci_resource_len(adapter->pdev, pcicfg_reg));
2499                 if (addr == NULL)
2500                         goto pci_map_err;
2501                 adapter->pcicfg = addr;
2502         } else
2503                 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2504
2505         return 0;
2506 pci_map_err:
2507         be_unmap_pci_bars(adapter);
2508         return -ENOMEM;
2509 }
2510
2511
2512 static void be_ctrl_cleanup(struct be_adapter *adapter)
2513 {
2514         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2515
2516         be_unmap_pci_bars(adapter);
2517
2518         if (mem->va)
2519                 pci_free_consistent(adapter->pdev, mem->size,
2520                         mem->va, mem->dma);
2521
2522         mem = &adapter->mc_cmd_mem;
2523         if (mem->va)
2524                 pci_free_consistent(adapter->pdev, mem->size,
2525                         mem->va, mem->dma);
2526 }
2527
2528 static int be_ctrl_init(struct be_adapter *adapter)
2529 {
2530         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2531         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2532         struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2533         int status;
2534
2535         status = be_map_pci_bars(adapter);
2536         if (status)
2537                 goto done;
2538
2539         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2540         mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2541                                 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2542         if (!mbox_mem_alloc->va) {
2543                 status = -ENOMEM;
2544                 goto unmap_pci_bars;
2545         }
2546
2547         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2548         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2549         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2550         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2551
2552         mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2553         mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2554                         &mc_cmd_mem->dma);
2555         if (mc_cmd_mem->va == NULL) {
2556                 status = -ENOMEM;
2557                 goto free_mbox;
2558         }
2559         memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2560
2561         spin_lock_init(&adapter->mbox_lock);
2562         spin_lock_init(&adapter->mcc_lock);
2563         spin_lock_init(&adapter->mcc_cq_lock);
2564
2565         init_completion(&adapter->flash_compl);
2566         pci_save_state(adapter->pdev);
2567         return 0;
2568
2569 free_mbox:
2570         pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2571                 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2572
2573 unmap_pci_bars:
2574         be_unmap_pci_bars(adapter);
2575
2576 done:
2577         return status;
2578 }
2579
2580 static void be_stats_cleanup(struct be_adapter *adapter)
2581 {
2582         struct be_stats_obj *stats = &adapter->stats;
2583         struct be_dma_mem *cmd = &stats->cmd;
2584
2585         if (cmd->va)
2586                 pci_free_consistent(adapter->pdev, cmd->size,
2587                         cmd->va, cmd->dma);
2588 }
2589
2590 static int be_stats_init(struct be_adapter *adapter)
2591 {
2592         struct be_stats_obj *stats = &adapter->stats;
2593         struct be_dma_mem *cmd = &stats->cmd;
2594
2595         cmd->size = sizeof(struct be_cmd_req_get_stats);
2596         cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2597         if (cmd->va == NULL)
2598                 return -1;
2599         memset(cmd->va, 0, cmd->size);
2600         return 0;
2601 }
2602
2603 static void __devexit be_remove(struct pci_dev *pdev)
2604 {
2605         struct be_adapter *adapter = pci_get_drvdata(pdev);
2606
2607         if (!adapter)
2608                 return;
2609
2610         unregister_netdev(adapter->netdev);
2611
2612         be_clear(adapter);
2613
2614         be_stats_cleanup(adapter);
2615
2616         be_ctrl_cleanup(adapter);
2617
2618         be_sriov_disable(adapter);
2619
2620         be_msix_disable(adapter);
2621
2622         pci_set_drvdata(pdev, NULL);
2623         pci_release_regions(pdev);
2624         pci_disable_device(pdev);
2625
2626         free_netdev(adapter->netdev);
2627 }
2628
2629 static int be_get_config(struct be_adapter *adapter)
2630 {
2631         int status;
2632         u8 mac[ETH_ALEN];
2633
2634         status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2635         if (status)
2636                 return status;
2637
2638         status = be_cmd_query_fw_cfg(adapter,
2639                                 &adapter->port_num, &adapter->function_mode);
2640         if (status)
2641                 return status;
2642
2643         memset(mac, 0, ETH_ALEN);
2644
2645         if (be_physfn(adapter)) {
2646                 status = be_cmd_mac_addr_query(adapter, mac,
2647                         MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2648
2649                 if (status)
2650                         return status;
2651
2652                 if (!is_valid_ether_addr(mac))
2653                         return -EADDRNOTAVAIL;
2654
2655                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2656                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2657         }
2658
2659         if (adapter->function_mode & 0x400)
2660                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2661         else
2662                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2663
2664         return 0;
2665 }
2666
2667 static int __devinit be_probe(struct pci_dev *pdev,
2668                         const struct pci_device_id *pdev_id)
2669 {
2670         int status = 0;
2671         struct be_adapter *adapter;
2672         struct net_device *netdev;
2673
2674
2675         status = pci_enable_device(pdev);
2676         if (status)
2677                 goto do_none;
2678
2679         status = pci_request_regions(pdev, DRV_NAME);
2680         if (status)
2681                 goto disable_dev;
2682         pci_set_master(pdev);
2683
2684         netdev = alloc_etherdev(sizeof(struct be_adapter));
2685         if (netdev == NULL) {
2686                 status = -ENOMEM;
2687                 goto rel_reg;
2688         }
2689         adapter = netdev_priv(netdev);
2690
2691         switch (pdev->device) {
2692         case BE_DEVICE_ID1:
2693         case OC_DEVICE_ID1:
2694                 adapter->generation = BE_GEN2;
2695                 break;
2696         case BE_DEVICE_ID2:
2697         case OC_DEVICE_ID2:
2698                 adapter->generation = BE_GEN3;
2699                 break;
2700         default:
2701                 adapter->generation = 0;
2702         }
2703
2704         adapter->pdev = pdev;
2705         pci_set_drvdata(pdev, adapter);
2706         adapter->netdev = netdev;
2707         be_netdev_init(netdev);
2708         SET_NETDEV_DEV(netdev, &pdev->dev);
2709
2710         be_msix_enable(adapter);
2711
2712         status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2713         if (!status) {
2714                 netdev->features |= NETIF_F_HIGHDMA;
2715         } else {
2716                 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2717                 if (status) {
2718                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2719                         goto free_netdev;
2720                 }
2721         }
2722
2723         be_sriov_enable(adapter);
2724
2725         status = be_ctrl_init(adapter);
2726         if (status)
2727                 goto free_netdev;
2728
2729         /* sync up with fw's ready state */
2730         if (be_physfn(adapter)) {
2731                 status = be_cmd_POST(adapter);
2732                 if (status)
2733                         goto ctrl_clean;
2734         }
2735
2736         /* tell fw we're ready to fire cmds */
2737         status = be_cmd_fw_init(adapter);
2738         if (status)
2739                 goto ctrl_clean;
2740
2741         if (be_physfn(adapter)) {
2742                 status = be_cmd_reset_function(adapter);
2743                 if (status)
2744                         goto ctrl_clean;
2745         }
2746
2747         status = be_stats_init(adapter);
2748         if (status)
2749                 goto ctrl_clean;
2750
2751         status = be_get_config(adapter);
2752         if (status)
2753                 goto stats_clean;
2754
2755         INIT_DELAYED_WORK(&adapter->work, be_worker);
2756
2757         status = be_setup(adapter);
2758         if (status)
2759                 goto stats_clean;
2760
2761         status = register_netdev(netdev);
2762         if (status != 0)
2763                 goto unsetup;
2764
2765         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2766         return 0;
2767
2768 unsetup:
2769         be_clear(adapter);
2770 stats_clean:
2771         be_stats_cleanup(adapter);
2772 ctrl_clean:
2773         be_ctrl_cleanup(adapter);
2774 free_netdev:
2775         be_msix_disable(adapter);
2776         be_sriov_disable(adapter);
2777         free_netdev(adapter->netdev);
2778         pci_set_drvdata(pdev, NULL);
2779 rel_reg:
2780         pci_release_regions(pdev);
2781 disable_dev:
2782         pci_disable_device(pdev);
2783 do_none:
2784         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
2785         return status;
2786 }
2787
2788 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2789 {
2790         struct be_adapter *adapter = pci_get_drvdata(pdev);
2791         struct net_device *netdev =  adapter->netdev;
2792
2793         if (adapter->wol)
2794                 be_setup_wol(adapter, true);
2795
2796         netif_device_detach(netdev);
2797         if (netif_running(netdev)) {
2798                 rtnl_lock();
2799                 be_close(netdev);
2800                 rtnl_unlock();
2801         }
2802         be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
2803         be_clear(adapter);
2804
2805         pci_save_state(pdev);
2806         pci_disable_device(pdev);
2807         pci_set_power_state(pdev, pci_choose_state(pdev, state));
2808         return 0;
2809 }
2810
2811 static int be_resume(struct pci_dev *pdev)
2812 {
2813         int status = 0;
2814         struct be_adapter *adapter = pci_get_drvdata(pdev);
2815         struct net_device *netdev =  adapter->netdev;
2816
2817         netif_device_detach(netdev);
2818
2819         status = pci_enable_device(pdev);
2820         if (status)
2821                 return status;
2822
2823         pci_set_power_state(pdev, 0);
2824         pci_restore_state(pdev);
2825
2826         /* tell fw we're ready to fire cmds */
2827         status = be_cmd_fw_init(adapter);
2828         if (status)
2829                 return status;
2830
2831         be_setup(adapter);
2832         if (netif_running(netdev)) {
2833                 rtnl_lock();
2834                 be_open(netdev);
2835                 rtnl_unlock();
2836         }
2837         netif_device_attach(netdev);
2838
2839         if (adapter->wol)
2840                 be_setup_wol(adapter, false);
2841         return 0;
2842 }
2843
2844 /*
2845  * An FLR will stop BE from DMAing any data.
2846  */
2847 static void be_shutdown(struct pci_dev *pdev)
2848 {
2849         struct be_adapter *adapter = pci_get_drvdata(pdev);
2850         struct net_device *netdev =  adapter->netdev;
2851
2852         netif_device_detach(netdev);
2853
2854         be_cmd_reset_function(adapter);
2855
2856         if (adapter->wol)
2857                 be_setup_wol(adapter, true);
2858
2859         pci_disable_device(pdev);
2860 }
2861
2862 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2863                                 pci_channel_state_t state)
2864 {
2865         struct be_adapter *adapter = pci_get_drvdata(pdev);
2866         struct net_device *netdev =  adapter->netdev;
2867
2868         dev_err(&adapter->pdev->dev, "EEH error detected\n");
2869
2870         adapter->eeh_err = true;
2871
2872         netif_device_detach(netdev);
2873
2874         if (netif_running(netdev)) {
2875                 rtnl_lock();
2876                 be_close(netdev);
2877                 rtnl_unlock();
2878         }
2879         be_clear(adapter);
2880
2881         if (state == pci_channel_io_perm_failure)
2882                 return PCI_ERS_RESULT_DISCONNECT;
2883
2884         pci_disable_device(pdev);
2885
2886         return PCI_ERS_RESULT_NEED_RESET;
2887 }
2888
2889 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2890 {
2891         struct be_adapter *adapter = pci_get_drvdata(pdev);
2892         int status;
2893
2894         dev_info(&adapter->pdev->dev, "EEH reset\n");
2895         adapter->eeh_err = false;
2896
2897         status = pci_enable_device(pdev);
2898         if (status)
2899                 return PCI_ERS_RESULT_DISCONNECT;
2900
2901         pci_set_master(pdev);
2902         pci_set_power_state(pdev, 0);
2903         pci_restore_state(pdev);
2904
2905         /* Check if card is ok and fw is ready */
2906         status = be_cmd_POST(adapter);
2907         if (status)
2908                 return PCI_ERS_RESULT_DISCONNECT;
2909
2910         return PCI_ERS_RESULT_RECOVERED;
2911 }
2912
2913 static void be_eeh_resume(struct pci_dev *pdev)
2914 {
2915         int status = 0;
2916         struct be_adapter *adapter = pci_get_drvdata(pdev);
2917         struct net_device *netdev =  adapter->netdev;
2918
2919         dev_info(&adapter->pdev->dev, "EEH resume\n");
2920
2921         pci_save_state(pdev);
2922
2923         /* tell fw we're ready to fire cmds */
2924         status = be_cmd_fw_init(adapter);
2925         if (status)
2926                 goto err;
2927
2928         status = be_setup(adapter);
2929         if (status)
2930                 goto err;
2931
2932         if (netif_running(netdev)) {
2933                 status = be_open(netdev);
2934                 if (status)
2935                         goto err;
2936         }
2937         netif_device_attach(netdev);
2938         return;
2939 err:
2940         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
2941 }
2942
2943 static struct pci_error_handlers be_eeh_handlers = {
2944         .error_detected = be_eeh_err_detected,
2945         .slot_reset = be_eeh_reset,
2946         .resume = be_eeh_resume,
2947 };
2948
2949 static struct pci_driver be_driver = {
2950         .name = DRV_NAME,
2951         .id_table = be_dev_ids,
2952         .probe = be_probe,
2953         .remove = be_remove,
2954         .suspend = be_suspend,
2955         .resume = be_resume,
2956         .shutdown = be_shutdown,
2957         .err_handler = &be_eeh_handlers
2958 };
2959
2960 static int __init be_init_module(void)
2961 {
2962         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
2963             rx_frag_size != 2048) {
2964                 printk(KERN_WARNING DRV_NAME
2965                         " : Module param rx_frag_size must be 2048/4096/8192."
2966                         " Using 2048\n");
2967                 rx_frag_size = 2048;
2968         }
2969
2970         if (num_vfs > 32) {
2971                 printk(KERN_WARNING DRV_NAME
2972                         " : Module param num_vfs must not be greater than 32."
2973                         "Using 32\n");
2974                 num_vfs = 32;
2975         }
2976
2977         return pci_register_driver(&be_driver);
2978 }
2979 module_init(be_init_module);
2980
2981 static void __exit be_exit_module(void)
2982 {
2983         pci_unregister_driver(&be_driver);
2984 }
2985 module_exit(be_exit_module);