]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/qlge/qlge_main.c
vlan: Don't check for vlan group before vlan_tx_tag_present.
[net-next-2.6.git] / drivers / net / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
42
43 #include "qlge.h"
44
45 char qlge_driver_name[] = DRV_NAME;
46 const char qlge_driver_version[] = DRV_VERSION;
47
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
52
53 static const u32 default_msg =
54     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55 /* NETIF_MSG_TIMER |    */
56     NETIF_MSG_IFDOWN |
57     NETIF_MSG_IFUP |
58     NETIF_MSG_RX_ERR |
59     NETIF_MSG_TX_ERR |
60 /*  NETIF_MSG_TX_QUEUED | */
61 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65 static int debug = 0x00007fff;  /* defaults above */
66 module_param(debug, int, 0);
67 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69 #define MSIX_IRQ 0
70 #define MSI_IRQ 1
71 #define LEG_IRQ 2
72 static int qlge_irq_type = MSIX_IRQ;
73 module_param(qlge_irq_type, int, MSIX_IRQ);
74 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75
76 static int qlge_mpi_coredump;
77 module_param(qlge_mpi_coredump, int, 0);
78 MODULE_PARM_DESC(qlge_mpi_coredump,
79                 "Option to enable MPI firmware dump. "
80                 "Default is OFF - Do Not allocate memory. ");
81
82 static int qlge_force_coredump;
83 module_param(qlge_force_coredump, int, 0);
84 MODULE_PARM_DESC(qlge_force_coredump,
85                 "Option to allow force of firmware core dump. "
86                 "Default is OFF - Do not allow.");
87
88 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
89         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
90         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
91         /* required last entry */
92         {0,}
93 };
94
95 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96
97 /* This hardware semaphore causes exclusive access to
98  * resources shared between the NIC driver, MPI firmware,
99  * FCOE firmware and the FC driver.
100  */
101 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
102 {
103         u32 sem_bits = 0;
104
105         switch (sem_mask) {
106         case SEM_XGMAC0_MASK:
107                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
108                 break;
109         case SEM_XGMAC1_MASK:
110                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
111                 break;
112         case SEM_ICB_MASK:
113                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
114                 break;
115         case SEM_MAC_ADDR_MASK:
116                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
117                 break;
118         case SEM_FLASH_MASK:
119                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
120                 break;
121         case SEM_PROBE_MASK:
122                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
123                 break;
124         case SEM_RT_IDX_MASK:
125                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
126                 break;
127         case SEM_PROC_REG_MASK:
128                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
129                 break;
130         default:
131                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
132                 return -EINVAL;
133         }
134
135         ql_write32(qdev, SEM, sem_bits | sem_mask);
136         return !(ql_read32(qdev, SEM) & sem_bits);
137 }
138
139 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
140 {
141         unsigned int wait_count = 30;
142         do {
143                 if (!ql_sem_trylock(qdev, sem_mask))
144                         return 0;
145                 udelay(100);
146         } while (--wait_count);
147         return -ETIMEDOUT;
148 }
149
150 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
151 {
152         ql_write32(qdev, SEM, sem_mask);
153         ql_read32(qdev, SEM);   /* flush */
154 }
155
156 /* This function waits for a specific bit to come ready
157  * in a given register.  It is used mostly by the initialize
158  * process, but is also used in kernel thread API such as
159  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
160  */
161 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
162 {
163         u32 temp;
164         int count = UDELAY_COUNT;
165
166         while (count) {
167                 temp = ql_read32(qdev, reg);
168
169                 /* check for errors */
170                 if (temp & err_bit) {
171                         netif_alert(qdev, probe, qdev->ndev,
172                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
173                                     reg, temp);
174                         return -EIO;
175                 } else if (temp & bit)
176                         return 0;
177                 udelay(UDELAY_DELAY);
178                 count--;
179         }
180         netif_alert(qdev, probe, qdev->ndev,
181                     "Timed out waiting for reg %x to come ready.\n", reg);
182         return -ETIMEDOUT;
183 }
184
185 /* The CFG register is used to download TX and RX control blocks
186  * to the chip. This function waits for an operation to complete.
187  */
188 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
189 {
190         int count = UDELAY_COUNT;
191         u32 temp;
192
193         while (count) {
194                 temp = ql_read32(qdev, CFG);
195                 if (temp & CFG_LE)
196                         return -EIO;
197                 if (!(temp & bit))
198                         return 0;
199                 udelay(UDELAY_DELAY);
200                 count--;
201         }
202         return -ETIMEDOUT;
203 }
204
205
206 /* Used to issue init control blocks to hw. Maps control block,
207  * sets address, triggers download, waits for completion.
208  */
209 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
210                  u16 q_id)
211 {
212         u64 map;
213         int status = 0;
214         int direction;
215         u32 mask;
216         u32 value;
217
218         direction =
219             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
220             PCI_DMA_FROMDEVICE;
221
222         map = pci_map_single(qdev->pdev, ptr, size, direction);
223         if (pci_dma_mapping_error(qdev->pdev, map)) {
224                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
225                 return -ENOMEM;
226         }
227
228         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
229         if (status)
230                 return status;
231
232         status = ql_wait_cfg(qdev, bit);
233         if (status) {
234                 netif_err(qdev, ifup, qdev->ndev,
235                           "Timed out waiting for CFG to come ready.\n");
236                 goto exit;
237         }
238
239         ql_write32(qdev, ICB_L, (u32) map);
240         ql_write32(qdev, ICB_H, (u32) (map >> 32));
241
242         mask = CFG_Q_MASK | (bit << 16);
243         value = bit | (q_id << CFG_Q_SHIFT);
244         ql_write32(qdev, CFG, (mask | value));
245
246         /*
247          * Wait for the bit to clear after signaling hw.
248          */
249         status = ql_wait_cfg(qdev, bit);
250 exit:
251         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
252         pci_unmap_single(qdev->pdev, map, size, direction);
253         return status;
254 }
255
256 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
257 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
258                         u32 *value)
259 {
260         u32 offset = 0;
261         int status;
262
263         switch (type) {
264         case MAC_ADDR_TYPE_MULTI_MAC:
265         case MAC_ADDR_TYPE_CAM_MAC:
266                 {
267                         status =
268                             ql_wait_reg_rdy(qdev,
269                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
270                         if (status)
271                                 goto exit;
272                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
273                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
274                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
275                         status =
276                             ql_wait_reg_rdy(qdev,
277                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
278                         if (status)
279                                 goto exit;
280                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
281                         status =
282                             ql_wait_reg_rdy(qdev,
283                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
284                         if (status)
285                                 goto exit;
286                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
287                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
288                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
289                         status =
290                             ql_wait_reg_rdy(qdev,
291                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
292                         if (status)
293                                 goto exit;
294                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
295                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
296                                 status =
297                                     ql_wait_reg_rdy(qdev,
298                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
299                                 if (status)
300                                         goto exit;
301                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
302                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
303                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
304                                 status =
305                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
306                                                     MAC_ADDR_MR, 0);
307                                 if (status)
308                                         goto exit;
309                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
310                         }
311                         break;
312                 }
313         case MAC_ADDR_TYPE_VLAN:
314         case MAC_ADDR_TYPE_MULTI_FLTR:
315         default:
316                 netif_crit(qdev, ifup, qdev->ndev,
317                            "Address type %d not yet supported.\n", type);
318                 status = -EPERM;
319         }
320 exit:
321         return status;
322 }
323
324 /* Set up a MAC, multicast or VLAN address for the
325  * inbound frame matching.
326  */
327 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
328                                u16 index)
329 {
330         u32 offset = 0;
331         int status = 0;
332
333         switch (type) {
334         case MAC_ADDR_TYPE_MULTI_MAC:
335                 {
336                         u32 upper = (addr[0] << 8) | addr[1];
337                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
338                                         (addr[4] << 8) | (addr[5]);
339
340                         status =
341                                 ql_wait_reg_rdy(qdev,
342                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
343                         if (status)
344                                 goto exit;
345                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
346                                 (index << MAC_ADDR_IDX_SHIFT) |
347                                 type | MAC_ADDR_E);
348                         ql_write32(qdev, MAC_ADDR_DATA, lower);
349                         status =
350                                 ql_wait_reg_rdy(qdev,
351                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352                         if (status)
353                                 goto exit;
354                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
355                                 (index << MAC_ADDR_IDX_SHIFT) |
356                                 type | MAC_ADDR_E);
357
358                         ql_write32(qdev, MAC_ADDR_DATA, upper);
359                         status =
360                                 ql_wait_reg_rdy(qdev,
361                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
362                         if (status)
363                                 goto exit;
364                         break;
365                 }
366         case MAC_ADDR_TYPE_CAM_MAC:
367                 {
368                         u32 cam_output;
369                         u32 upper = (addr[0] << 8) | addr[1];
370                         u32 lower =
371                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
372                             (addr[5]);
373
374                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
375                                      "Adding %s address %pM at index %d in the CAM.\n",
376                                      type == MAC_ADDR_TYPE_MULTI_MAC ?
377                                      "MULTICAST" : "UNICAST",
378                                      addr, index);
379
380                         status =
381                             ql_wait_reg_rdy(qdev,
382                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
383                         if (status)
384                                 goto exit;
385                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
386                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
387                                    type);       /* type */
388                         ql_write32(qdev, MAC_ADDR_DATA, lower);
389                         status =
390                             ql_wait_reg_rdy(qdev,
391                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
392                         if (status)
393                                 goto exit;
394                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
395                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
396                                    type);       /* type */
397                         ql_write32(qdev, MAC_ADDR_DATA, upper);
398                         status =
399                             ql_wait_reg_rdy(qdev,
400                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
401                         if (status)
402                                 goto exit;
403                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
404                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
405                                    type);       /* type */
406                         /* This field should also include the queue id
407                            and possibly the function id.  Right now we hardcode
408                            the route field to NIC core.
409                          */
410                         cam_output = (CAM_OUT_ROUTE_NIC |
411                                       (qdev->
412                                        func << CAM_OUT_FUNC_SHIFT) |
413                                         (0 << CAM_OUT_CQ_ID_SHIFT));
414                         if (qdev->vlgrp)
415                                 cam_output |= CAM_OUT_RV;
416                         /* route to NIC core */
417                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
418                         break;
419                 }
420         case MAC_ADDR_TYPE_VLAN:
421                 {
422                         u32 enable_bit = *((u32 *) &addr[0]);
423                         /* For VLAN, the addr actually holds a bit that
424                          * either enables or disables the vlan id we are
425                          * addressing. It's either MAC_ADDR_E on or off.
426                          * That's bit-27 we're talking about.
427                          */
428                         netif_info(qdev, ifup, qdev->ndev,
429                                    "%s VLAN ID %d %s the CAM.\n",
430                                    enable_bit ? "Adding" : "Removing",
431                                    index,
432                                    enable_bit ? "to" : "from");
433
434                         status =
435                             ql_wait_reg_rdy(qdev,
436                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
437                         if (status)
438                                 goto exit;
439                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
440                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
441                                    type |       /* type */
442                                    enable_bit); /* enable/disable */
443                         break;
444                 }
445         case MAC_ADDR_TYPE_MULTI_FLTR:
446         default:
447                 netif_crit(qdev, ifup, qdev->ndev,
448                            "Address type %d not yet supported.\n", type);
449                 status = -EPERM;
450         }
451 exit:
452         return status;
453 }
454
455 /* Set or clear MAC address in hardware. We sometimes
456  * have to clear it to prevent wrong frame routing
457  * especially in a bonding environment.
458  */
459 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
460 {
461         int status;
462         char zero_mac_addr[ETH_ALEN];
463         char *addr;
464
465         if (set) {
466                 addr = &qdev->current_mac_addr[0];
467                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
468                              "Set Mac addr %pM\n", addr);
469         } else {
470                 memset(zero_mac_addr, 0, ETH_ALEN);
471                 addr = &zero_mac_addr[0];
472                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473                              "Clearing MAC address\n");
474         }
475         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
476         if (status)
477                 return status;
478         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
479                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
480         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
481         if (status)
482                 netif_err(qdev, ifup, qdev->ndev,
483                           "Failed to init mac address.\n");
484         return status;
485 }
486
487 void ql_link_on(struct ql_adapter *qdev)
488 {
489         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
490         netif_carrier_on(qdev->ndev);
491         ql_set_mac_addr(qdev, 1);
492 }
493
494 void ql_link_off(struct ql_adapter *qdev)
495 {
496         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
497         netif_carrier_off(qdev->ndev);
498         ql_set_mac_addr(qdev, 0);
499 }
500
501 /* Get a specific frame routing value from the CAM.
502  * Used for debug and reg dump.
503  */
504 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
505 {
506         int status = 0;
507
508         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
509         if (status)
510                 goto exit;
511
512         ql_write32(qdev, RT_IDX,
513                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
514         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
515         if (status)
516                 goto exit;
517         *value = ql_read32(qdev, RT_DATA);
518 exit:
519         return status;
520 }
521
522 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
523  * to route different frame types to various inbound queues.  We send broadcast/
524  * multicast/error frames to the default queue for slow handling,
525  * and CAM hit/RSS frames to the fast handling queues.
526  */
527 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
528                               int enable)
529 {
530         int status = -EINVAL; /* Return error if no mask match. */
531         u32 value = 0;
532
533         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
534                      "%s %s mask %s the routing reg.\n",
535                      enable ? "Adding" : "Removing",
536                      index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
537                      index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
538                      index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
539                      index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
540                      index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
541                      index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
542                      index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
543                      index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
544                      index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
545                      index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
546                      index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
547                      index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
548                      index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
549                      index == RT_IDX_UNUSED013 ? "UNUSED13" :
550                      index == RT_IDX_UNUSED014 ? "UNUSED14" :
551                      index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
552                      "(Bad index != RT_IDX)",
553                      enable ? "to" : "from");
554
555         switch (mask) {
556         case RT_IDX_CAM_HIT:
557                 {
558                         value = RT_IDX_DST_CAM_Q |      /* dest */
559                             RT_IDX_TYPE_NICQ |  /* type */
560                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
561                         break;
562                 }
563         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
564                 {
565                         value = RT_IDX_DST_DFLT_Q |     /* dest */
566                             RT_IDX_TYPE_NICQ |  /* type */
567                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
568                         break;
569                 }
570         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
571                 {
572                         value = RT_IDX_DST_DFLT_Q |     /* dest */
573                             RT_IDX_TYPE_NICQ |  /* type */
574                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
575                         break;
576                 }
577         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
578                 {
579                         value = RT_IDX_DST_DFLT_Q | /* dest */
580                                 RT_IDX_TYPE_NICQ | /* type */
581                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
582                                 RT_IDX_IDX_SHIFT); /* index */
583                         break;
584                 }
585         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
586                 {
587                         value = RT_IDX_DST_DFLT_Q | /* dest */
588                                 RT_IDX_TYPE_NICQ | /* type */
589                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
590                                 RT_IDX_IDX_SHIFT); /* index */
591                         break;
592                 }
593         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
594                 {
595                         value = RT_IDX_DST_DFLT_Q |     /* dest */
596                             RT_IDX_TYPE_NICQ |  /* type */
597                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
598                         break;
599                 }
600         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
601                 {
602                         value = RT_IDX_DST_DFLT_Q |     /* dest */
603                             RT_IDX_TYPE_NICQ |  /* type */
604                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
605                         break;
606                 }
607         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
608                 {
609                         value = RT_IDX_DST_DFLT_Q |     /* dest */
610                             RT_IDX_TYPE_NICQ |  /* type */
611                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
612                         break;
613                 }
614         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
615                 {
616                         value = RT_IDX_DST_RSS |        /* dest */
617                             RT_IDX_TYPE_NICQ |  /* type */
618                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
619                         break;
620                 }
621         case 0:         /* Clear the E-bit on an entry. */
622                 {
623                         value = RT_IDX_DST_DFLT_Q |     /* dest */
624                             RT_IDX_TYPE_NICQ |  /* type */
625                             (index << RT_IDX_IDX_SHIFT);/* index */
626                         break;
627                 }
628         default:
629                 netif_err(qdev, ifup, qdev->ndev,
630                           "Mask type %d not yet supported.\n", mask);
631                 status = -EPERM;
632                 goto exit;
633         }
634
635         if (value) {
636                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
637                 if (status)
638                         goto exit;
639                 value |= (enable ? RT_IDX_E : 0);
640                 ql_write32(qdev, RT_IDX, value);
641                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
642         }
643 exit:
644         return status;
645 }
646
647 static void ql_enable_interrupts(struct ql_adapter *qdev)
648 {
649         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
650 }
651
652 static void ql_disable_interrupts(struct ql_adapter *qdev)
653 {
654         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
655 }
656
657 /* If we're running with multiple MSI-X vectors then we enable on the fly.
658  * Otherwise, we may have multiple outstanding workers and don't want to
659  * enable until the last one finishes. In this case, the irq_cnt gets
660  * incremented everytime we queue a worker and decremented everytime
661  * a worker finishes.  Once it hits zero we enable the interrupt.
662  */
663 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
664 {
665         u32 var = 0;
666         unsigned long hw_flags = 0;
667         struct intr_context *ctx = qdev->intr_context + intr;
668
669         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
670                 /* Always enable if we're MSIX multi interrupts and
671                  * it's not the default (zeroeth) interrupt.
672                  */
673                 ql_write32(qdev, INTR_EN,
674                            ctx->intr_en_mask);
675                 var = ql_read32(qdev, STS);
676                 return var;
677         }
678
679         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
680         if (atomic_dec_and_test(&ctx->irq_cnt)) {
681                 ql_write32(qdev, INTR_EN,
682                            ctx->intr_en_mask);
683                 var = ql_read32(qdev, STS);
684         }
685         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
686         return var;
687 }
688
689 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
690 {
691         u32 var = 0;
692         struct intr_context *ctx;
693
694         /* HW disables for us if we're MSIX multi interrupts and
695          * it's not the default (zeroeth) interrupt.
696          */
697         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
698                 return 0;
699
700         ctx = qdev->intr_context + intr;
701         spin_lock(&qdev->hw_lock);
702         if (!atomic_read(&ctx->irq_cnt)) {
703                 ql_write32(qdev, INTR_EN,
704                 ctx->intr_dis_mask);
705                 var = ql_read32(qdev, STS);
706         }
707         atomic_inc(&ctx->irq_cnt);
708         spin_unlock(&qdev->hw_lock);
709         return var;
710 }
711
712 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
713 {
714         int i;
715         for (i = 0; i < qdev->intr_count; i++) {
716                 /* The enable call does a atomic_dec_and_test
717                  * and enables only if the result is zero.
718                  * So we precharge it here.
719                  */
720                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
721                         i == 0))
722                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
723                 ql_enable_completion_interrupt(qdev, i);
724         }
725
726 }
727
728 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
729 {
730         int status, i;
731         u16 csum = 0;
732         __le16 *flash = (__le16 *)&qdev->flash;
733
734         status = strncmp((char *)&qdev->flash, str, 4);
735         if (status) {
736                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
737                 return  status;
738         }
739
740         for (i = 0; i < size; i++)
741                 csum += le16_to_cpu(*flash++);
742
743         if (csum)
744                 netif_err(qdev, ifup, qdev->ndev,
745                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
746
747         return csum;
748 }
749
750 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
751 {
752         int status = 0;
753         /* wait for reg to come ready */
754         status = ql_wait_reg_rdy(qdev,
755                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
756         if (status)
757                 goto exit;
758         /* set up for reg read */
759         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
760         /* wait for reg to come ready */
761         status = ql_wait_reg_rdy(qdev,
762                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
763         if (status)
764                 goto exit;
765          /* This data is stored on flash as an array of
766          * __le32.  Since ql_read32() returns cpu endian
767          * we need to swap it back.
768          */
769         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
770 exit:
771         return status;
772 }
773
774 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
775 {
776         u32 i, size;
777         int status;
778         __le32 *p = (__le32 *)&qdev->flash;
779         u32 offset;
780         u8 mac_addr[6];
781
782         /* Get flash offset for function and adjust
783          * for dword access.
784          */
785         if (!qdev->port)
786                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
787         else
788                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
789
790         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
791                 return -ETIMEDOUT;
792
793         size = sizeof(struct flash_params_8000) / sizeof(u32);
794         for (i = 0; i < size; i++, p++) {
795                 status = ql_read_flash_word(qdev, i+offset, p);
796                 if (status) {
797                         netif_err(qdev, ifup, qdev->ndev,
798                                   "Error reading flash.\n");
799                         goto exit;
800                 }
801         }
802
803         status = ql_validate_flash(qdev,
804                         sizeof(struct flash_params_8000) / sizeof(u16),
805                         "8000");
806         if (status) {
807                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
808                 status = -EINVAL;
809                 goto exit;
810         }
811
812         /* Extract either manufacturer or BOFM modified
813          * MAC address.
814          */
815         if (qdev->flash.flash_params_8000.data_type1 == 2)
816                 memcpy(mac_addr,
817                         qdev->flash.flash_params_8000.mac_addr1,
818                         qdev->ndev->addr_len);
819         else
820                 memcpy(mac_addr,
821                         qdev->flash.flash_params_8000.mac_addr,
822                         qdev->ndev->addr_len);
823
824         if (!is_valid_ether_addr(mac_addr)) {
825                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
826                 status = -EINVAL;
827                 goto exit;
828         }
829
830         memcpy(qdev->ndev->dev_addr,
831                 mac_addr,
832                 qdev->ndev->addr_len);
833
834 exit:
835         ql_sem_unlock(qdev, SEM_FLASH_MASK);
836         return status;
837 }
838
839 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
840 {
841         int i;
842         int status;
843         __le32 *p = (__le32 *)&qdev->flash;
844         u32 offset = 0;
845         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
846
847         /* Second function's parameters follow the first
848          * function's.
849          */
850         if (qdev->port)
851                 offset = size;
852
853         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
854                 return -ETIMEDOUT;
855
856         for (i = 0; i < size; i++, p++) {
857                 status = ql_read_flash_word(qdev, i+offset, p);
858                 if (status) {
859                         netif_err(qdev, ifup, qdev->ndev,
860                                   "Error reading flash.\n");
861                         goto exit;
862                 }
863
864         }
865
866         status = ql_validate_flash(qdev,
867                         sizeof(struct flash_params_8012) / sizeof(u16),
868                         "8012");
869         if (status) {
870                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
871                 status = -EINVAL;
872                 goto exit;
873         }
874
875         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
876                 status = -EINVAL;
877                 goto exit;
878         }
879
880         memcpy(qdev->ndev->dev_addr,
881                 qdev->flash.flash_params_8012.mac_addr,
882                 qdev->ndev->addr_len);
883
884 exit:
885         ql_sem_unlock(qdev, SEM_FLASH_MASK);
886         return status;
887 }
888
889 /* xgmac register are located behind the xgmac_addr and xgmac_data
890  * register pair.  Each read/write requires us to wait for the ready
891  * bit before reading/writing the data.
892  */
893 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
894 {
895         int status;
896         /* wait for reg to come ready */
897         status = ql_wait_reg_rdy(qdev,
898                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899         if (status)
900                 return status;
901         /* write the data to the data reg */
902         ql_write32(qdev, XGMAC_DATA, data);
903         /* trigger the write */
904         ql_write32(qdev, XGMAC_ADDR, reg);
905         return status;
906 }
907
908 /* xgmac register are located behind the xgmac_addr and xgmac_data
909  * register pair.  Each read/write requires us to wait for the ready
910  * bit before reading/writing the data.
911  */
912 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
913 {
914         int status = 0;
915         /* wait for reg to come ready */
916         status = ql_wait_reg_rdy(qdev,
917                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
918         if (status)
919                 goto exit;
920         /* set up for reg read */
921         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
922         /* wait for reg to come ready */
923         status = ql_wait_reg_rdy(qdev,
924                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
925         if (status)
926                 goto exit;
927         /* get the data */
928         *data = ql_read32(qdev, XGMAC_DATA);
929 exit:
930         return status;
931 }
932
933 /* This is used for reading the 64-bit statistics regs. */
934 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
935 {
936         int status = 0;
937         u32 hi = 0;
938         u32 lo = 0;
939
940         status = ql_read_xgmac_reg(qdev, reg, &lo);
941         if (status)
942                 goto exit;
943
944         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
945         if (status)
946                 goto exit;
947
948         *data = (u64) lo | ((u64) hi << 32);
949
950 exit:
951         return status;
952 }
953
954 static int ql_8000_port_initialize(struct ql_adapter *qdev)
955 {
956         int status;
957         /*
958          * Get MPI firmware version for driver banner
959          * and ethool info.
960          */
961         status = ql_mb_about_fw(qdev);
962         if (status)
963                 goto exit;
964         status = ql_mb_get_fw_state(qdev);
965         if (status)
966                 goto exit;
967         /* Wake up a worker to get/set the TX/RX frame sizes. */
968         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
969 exit:
970         return status;
971 }
972
973 /* Take the MAC Core out of reset.
974  * Enable statistics counting.
975  * Take the transmitter/receiver out of reset.
976  * This functionality may be done in the MPI firmware at a
977  * later date.
978  */
979 static int ql_8012_port_initialize(struct ql_adapter *qdev)
980 {
981         int status = 0;
982         u32 data;
983
984         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
985                 /* Another function has the semaphore, so
986                  * wait for the port init bit to come ready.
987                  */
988                 netif_info(qdev, link, qdev->ndev,
989                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
990                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
991                 if (status) {
992                         netif_crit(qdev, link, qdev->ndev,
993                                    "Port initialize timed out.\n");
994                 }
995                 return status;
996         }
997
998         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
999         /* Set the core reset. */
1000         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1001         if (status)
1002                 goto end;
1003         data |= GLOBAL_CFG_RESET;
1004         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1005         if (status)
1006                 goto end;
1007
1008         /* Clear the core reset and turn on jumbo for receiver. */
1009         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
1010         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
1011         data |= GLOBAL_CFG_TX_STAT_EN;
1012         data |= GLOBAL_CFG_RX_STAT_EN;
1013         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1014         if (status)
1015                 goto end;
1016
1017         /* Enable transmitter, and clear it's reset. */
1018         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1019         if (status)
1020                 goto end;
1021         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
1022         data |= TX_CFG_EN;      /* Enable the transmitter. */
1023         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1024         if (status)
1025                 goto end;
1026
1027         /* Enable receiver and clear it's reset. */
1028         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1029         if (status)
1030                 goto end;
1031         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1032         data |= RX_CFG_EN;      /* Enable the receiver. */
1033         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1034         if (status)
1035                 goto end;
1036
1037         /* Turn on jumbo. */
1038         status =
1039             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1040         if (status)
1041                 goto end;
1042         status =
1043             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1044         if (status)
1045                 goto end;
1046
1047         /* Signal to the world that the port is enabled.        */
1048         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1049 end:
1050         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1051         return status;
1052 }
1053
1054 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1055 {
1056         return PAGE_SIZE << qdev->lbq_buf_order;
1057 }
1058
1059 /* Get the next large buffer. */
1060 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1061 {
1062         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1063         rx_ring->lbq_curr_idx++;
1064         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1065                 rx_ring->lbq_curr_idx = 0;
1066         rx_ring->lbq_free_cnt++;
1067         return lbq_desc;
1068 }
1069
1070 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1071                 struct rx_ring *rx_ring)
1072 {
1073         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1074
1075         pci_dma_sync_single_for_cpu(qdev->pdev,
1076                                         dma_unmap_addr(lbq_desc, mapaddr),
1077                                     rx_ring->lbq_buf_size,
1078                                         PCI_DMA_FROMDEVICE);
1079
1080         /* If it's the last chunk of our master page then
1081          * we unmap it.
1082          */
1083         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1084                                         == ql_lbq_block_size(qdev))
1085                 pci_unmap_page(qdev->pdev,
1086                                 lbq_desc->p.pg_chunk.map,
1087                                 ql_lbq_block_size(qdev),
1088                                 PCI_DMA_FROMDEVICE);
1089         return lbq_desc;
1090 }
1091
1092 /* Get the next small buffer. */
1093 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1094 {
1095         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1096         rx_ring->sbq_curr_idx++;
1097         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1098                 rx_ring->sbq_curr_idx = 0;
1099         rx_ring->sbq_free_cnt++;
1100         return sbq_desc;
1101 }
1102
1103 /* Update an rx ring index. */
1104 static void ql_update_cq(struct rx_ring *rx_ring)
1105 {
1106         rx_ring->cnsmr_idx++;
1107         rx_ring->curr_entry++;
1108         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1109                 rx_ring->cnsmr_idx = 0;
1110                 rx_ring->curr_entry = rx_ring->cq_base;
1111         }
1112 }
1113
1114 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1115 {
1116         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1117 }
1118
1119 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1120                                                 struct bq_desc *lbq_desc)
1121 {
1122         if (!rx_ring->pg_chunk.page) {
1123                 u64 map;
1124                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1125                                                 GFP_ATOMIC,
1126                                                 qdev->lbq_buf_order);
1127                 if (unlikely(!rx_ring->pg_chunk.page)) {
1128                         netif_err(qdev, drv, qdev->ndev,
1129                                   "page allocation failed.\n");
1130                         return -ENOMEM;
1131                 }
1132                 rx_ring->pg_chunk.offset = 0;
1133                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1134                                         0, ql_lbq_block_size(qdev),
1135                                         PCI_DMA_FROMDEVICE);
1136                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1137                         __free_pages(rx_ring->pg_chunk.page,
1138                                         qdev->lbq_buf_order);
1139                         netif_err(qdev, drv, qdev->ndev,
1140                                   "PCI mapping failed.\n");
1141                         return -ENOMEM;
1142                 }
1143                 rx_ring->pg_chunk.map = map;
1144                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1145         }
1146
1147         /* Copy the current master pg_chunk info
1148          * to the current descriptor.
1149          */
1150         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1151
1152         /* Adjust the master page chunk for next
1153          * buffer get.
1154          */
1155         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1156         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1157                 rx_ring->pg_chunk.page = NULL;
1158                 lbq_desc->p.pg_chunk.last_flag = 1;
1159         } else {
1160                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1161                 get_page(rx_ring->pg_chunk.page);
1162                 lbq_desc->p.pg_chunk.last_flag = 0;
1163         }
1164         return 0;
1165 }
1166 /* Process (refill) a large buffer queue. */
1167 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1168 {
1169         u32 clean_idx = rx_ring->lbq_clean_idx;
1170         u32 start_idx = clean_idx;
1171         struct bq_desc *lbq_desc;
1172         u64 map;
1173         int i;
1174
1175         while (rx_ring->lbq_free_cnt > 32) {
1176                 for (i = 0; i < 16; i++) {
1177                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1178                                      "lbq: try cleaning clean_idx = %d.\n",
1179                                      clean_idx);
1180                         lbq_desc = &rx_ring->lbq[clean_idx];
1181                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1182                                 netif_err(qdev, ifup, qdev->ndev,
1183                                           "Could not get a page chunk.\n");
1184                                 return;
1185                         }
1186
1187                         map = lbq_desc->p.pg_chunk.map +
1188                                 lbq_desc->p.pg_chunk.offset;
1189                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1190                         dma_unmap_len_set(lbq_desc, maplen,
1191                                         rx_ring->lbq_buf_size);
1192                                 *lbq_desc->addr = cpu_to_le64(map);
1193
1194                         pci_dma_sync_single_for_device(qdev->pdev, map,
1195                                                 rx_ring->lbq_buf_size,
1196                                                 PCI_DMA_FROMDEVICE);
1197                         clean_idx++;
1198                         if (clean_idx == rx_ring->lbq_len)
1199                                 clean_idx = 0;
1200                 }
1201
1202                 rx_ring->lbq_clean_idx = clean_idx;
1203                 rx_ring->lbq_prod_idx += 16;
1204                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1205                         rx_ring->lbq_prod_idx = 0;
1206                 rx_ring->lbq_free_cnt -= 16;
1207         }
1208
1209         if (start_idx != clean_idx) {
1210                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1211                              "lbq: updating prod idx = %d.\n",
1212                              rx_ring->lbq_prod_idx);
1213                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1214                                 rx_ring->lbq_prod_idx_db_reg);
1215         }
1216 }
1217
1218 /* Process (refill) a small buffer queue. */
1219 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1220 {
1221         u32 clean_idx = rx_ring->sbq_clean_idx;
1222         u32 start_idx = clean_idx;
1223         struct bq_desc *sbq_desc;
1224         u64 map;
1225         int i;
1226
1227         while (rx_ring->sbq_free_cnt > 16) {
1228                 for (i = 0; i < 16; i++) {
1229                         sbq_desc = &rx_ring->sbq[clean_idx];
1230                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1231                                      "sbq: try cleaning clean_idx = %d.\n",
1232                                      clean_idx);
1233                         if (sbq_desc->p.skb == NULL) {
1234                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1235                                              qdev->ndev,
1236                                              "sbq: getting new skb for index %d.\n",
1237                                              sbq_desc->index);
1238                                 sbq_desc->p.skb =
1239                                     netdev_alloc_skb(qdev->ndev,
1240                                                      SMALL_BUFFER_SIZE);
1241                                 if (sbq_desc->p.skb == NULL) {
1242                                         netif_err(qdev, probe, qdev->ndev,
1243                                                   "Couldn't get an skb.\n");
1244                                         rx_ring->sbq_clean_idx = clean_idx;
1245                                         return;
1246                                 }
1247                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1248                                 map = pci_map_single(qdev->pdev,
1249                                                      sbq_desc->p.skb->data,
1250                                                      rx_ring->sbq_buf_size,
1251                                                      PCI_DMA_FROMDEVICE);
1252                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1253                                         netif_err(qdev, ifup, qdev->ndev,
1254                                                   "PCI mapping failed.\n");
1255                                         rx_ring->sbq_clean_idx = clean_idx;
1256                                         dev_kfree_skb_any(sbq_desc->p.skb);
1257                                         sbq_desc->p.skb = NULL;
1258                                         return;
1259                                 }
1260                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1261                                 dma_unmap_len_set(sbq_desc, maplen,
1262                                                   rx_ring->sbq_buf_size);
1263                                 *sbq_desc->addr = cpu_to_le64(map);
1264                         }
1265
1266                         clean_idx++;
1267                         if (clean_idx == rx_ring->sbq_len)
1268                                 clean_idx = 0;
1269                 }
1270                 rx_ring->sbq_clean_idx = clean_idx;
1271                 rx_ring->sbq_prod_idx += 16;
1272                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1273                         rx_ring->sbq_prod_idx = 0;
1274                 rx_ring->sbq_free_cnt -= 16;
1275         }
1276
1277         if (start_idx != clean_idx) {
1278                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1279                              "sbq: updating prod idx = %d.\n",
1280                              rx_ring->sbq_prod_idx);
1281                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1282                                 rx_ring->sbq_prod_idx_db_reg);
1283         }
1284 }
1285
1286 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1287                                     struct rx_ring *rx_ring)
1288 {
1289         ql_update_sbq(qdev, rx_ring);
1290         ql_update_lbq(qdev, rx_ring);
1291 }
1292
1293 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1294  * fails at some stage, or from the interrupt when a tx completes.
1295  */
1296 static void ql_unmap_send(struct ql_adapter *qdev,
1297                           struct tx_ring_desc *tx_ring_desc, int mapped)
1298 {
1299         int i;
1300         for (i = 0; i < mapped; i++) {
1301                 if (i == 0 || (i == 7 && mapped > 7)) {
1302                         /*
1303                          * Unmap the skb->data area, or the
1304                          * external sglist (AKA the Outbound
1305                          * Address List (OAL)).
1306                          * If its the zeroeth element, then it's
1307                          * the skb->data area.  If it's the 7th
1308                          * element and there is more than 6 frags,
1309                          * then its an OAL.
1310                          */
1311                         if (i == 7) {
1312                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1313                                              qdev->ndev,
1314                                              "unmapping OAL area.\n");
1315                         }
1316                         pci_unmap_single(qdev->pdev,
1317                                          dma_unmap_addr(&tx_ring_desc->map[i],
1318                                                         mapaddr),
1319                                          dma_unmap_len(&tx_ring_desc->map[i],
1320                                                        maplen),
1321                                          PCI_DMA_TODEVICE);
1322                 } else {
1323                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1324                                      "unmapping frag %d.\n", i);
1325                         pci_unmap_page(qdev->pdev,
1326                                        dma_unmap_addr(&tx_ring_desc->map[i],
1327                                                       mapaddr),
1328                                        dma_unmap_len(&tx_ring_desc->map[i],
1329                                                      maplen), PCI_DMA_TODEVICE);
1330                 }
1331         }
1332
1333 }
1334
1335 /* Map the buffers for this transmit.  This will return
1336  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1337  */
1338 static int ql_map_send(struct ql_adapter *qdev,
1339                        struct ob_mac_iocb_req *mac_iocb_ptr,
1340                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1341 {
1342         int len = skb_headlen(skb);
1343         dma_addr_t map;
1344         int frag_idx, err, map_idx = 0;
1345         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1346         int frag_cnt = skb_shinfo(skb)->nr_frags;
1347
1348         if (frag_cnt) {
1349                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1350                              "frag_cnt = %d.\n", frag_cnt);
1351         }
1352         /*
1353          * Map the skb buffer first.
1354          */
1355         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1356
1357         err = pci_dma_mapping_error(qdev->pdev, map);
1358         if (err) {
1359                 netif_err(qdev, tx_queued, qdev->ndev,
1360                           "PCI mapping failed with error: %d\n", err);
1361
1362                 return NETDEV_TX_BUSY;
1363         }
1364
1365         tbd->len = cpu_to_le32(len);
1366         tbd->addr = cpu_to_le64(map);
1367         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1368         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1369         map_idx++;
1370
1371         /*
1372          * This loop fills the remainder of the 8 address descriptors
1373          * in the IOCB.  If there are more than 7 fragments, then the
1374          * eighth address desc will point to an external list (OAL).
1375          * When this happens, the remainder of the frags will be stored
1376          * in this list.
1377          */
1378         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1379                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1380                 tbd++;
1381                 if (frag_idx == 6 && frag_cnt > 7) {
1382                         /* Let's tack on an sglist.
1383                          * Our control block will now
1384                          * look like this:
1385                          * iocb->seg[0] = skb->data
1386                          * iocb->seg[1] = frag[0]
1387                          * iocb->seg[2] = frag[1]
1388                          * iocb->seg[3] = frag[2]
1389                          * iocb->seg[4] = frag[3]
1390                          * iocb->seg[5] = frag[4]
1391                          * iocb->seg[6] = frag[5]
1392                          * iocb->seg[7] = ptr to OAL (external sglist)
1393                          * oal->seg[0] = frag[6]
1394                          * oal->seg[1] = frag[7]
1395                          * oal->seg[2] = frag[8]
1396                          * oal->seg[3] = frag[9]
1397                          * oal->seg[4] = frag[10]
1398                          *      etc...
1399                          */
1400                         /* Tack on the OAL in the eighth segment of IOCB. */
1401                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1402                                              sizeof(struct oal),
1403                                              PCI_DMA_TODEVICE);
1404                         err = pci_dma_mapping_error(qdev->pdev, map);
1405                         if (err) {
1406                                 netif_err(qdev, tx_queued, qdev->ndev,
1407                                           "PCI mapping outbound address list with error: %d\n",
1408                                           err);
1409                                 goto map_error;
1410                         }
1411
1412                         tbd->addr = cpu_to_le64(map);
1413                         /*
1414                          * The length is the number of fragments
1415                          * that remain to be mapped times the length
1416                          * of our sglist (OAL).
1417                          */
1418                         tbd->len =
1419                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1420                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1421                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1422                                            map);
1423                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1424                                           sizeof(struct oal));
1425                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1426                         map_idx++;
1427                 }
1428
1429                 map =
1430                     pci_map_page(qdev->pdev, frag->page,
1431                                  frag->page_offset, frag->size,
1432                                  PCI_DMA_TODEVICE);
1433
1434                 err = pci_dma_mapping_error(qdev->pdev, map);
1435                 if (err) {
1436                         netif_err(qdev, tx_queued, qdev->ndev,
1437                                   "PCI mapping frags failed with error: %d.\n",
1438                                   err);
1439                         goto map_error;
1440                 }
1441
1442                 tbd->addr = cpu_to_le64(map);
1443                 tbd->len = cpu_to_le32(frag->size);
1444                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1445                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1446                                   frag->size);
1447
1448         }
1449         /* Save the number of segments we've mapped. */
1450         tx_ring_desc->map_cnt = map_idx;
1451         /* Terminate the last segment. */
1452         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1453         return NETDEV_TX_OK;
1454
1455 map_error:
1456         /*
1457          * If the first frag mapping failed, then i will be zero.
1458          * This causes the unmap of the skb->data area.  Otherwise
1459          * we pass in the number of frags that mapped successfully
1460          * so they can be umapped.
1461          */
1462         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1463         return NETDEV_TX_BUSY;
1464 }
1465
1466 /* Process an inbound completion from an rx ring. */
1467 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1468                                         struct rx_ring *rx_ring,
1469                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1470                                         u32 length,
1471                                         u16 vlan_id)
1472 {
1473         struct sk_buff *skb;
1474         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1475         struct skb_frag_struct *rx_frag;
1476         int nr_frags;
1477         struct napi_struct *napi = &rx_ring->napi;
1478
1479         napi->dev = qdev->ndev;
1480
1481         skb = napi_get_frags(napi);
1482         if (!skb) {
1483                 netif_err(qdev, drv, qdev->ndev,
1484                           "Couldn't get an skb, exiting.\n");
1485                 rx_ring->rx_dropped++;
1486                 put_page(lbq_desc->p.pg_chunk.page);
1487                 return;
1488         }
1489         prefetch(lbq_desc->p.pg_chunk.va);
1490         rx_frag = skb_shinfo(skb)->frags;
1491         nr_frags = skb_shinfo(skb)->nr_frags;
1492         rx_frag += nr_frags;
1493         rx_frag->page = lbq_desc->p.pg_chunk.page;
1494         rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1495         rx_frag->size = length;
1496
1497         skb->len += length;
1498         skb->data_len += length;
1499         skb->truesize += length;
1500         skb_shinfo(skb)->nr_frags++;
1501
1502         rx_ring->rx_packets++;
1503         rx_ring->rx_bytes += length;
1504         skb->ip_summed = CHECKSUM_UNNECESSARY;
1505         skb_record_rx_queue(skb, rx_ring->cq_id);
1506         if (qdev->vlgrp && (vlan_id != 0xffff))
1507                 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1508         else
1509                 napi_gro_frags(napi);
1510 }
1511
1512 /* Process an inbound completion from an rx ring. */
1513 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1514                                         struct rx_ring *rx_ring,
1515                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1516                                         u32 length,
1517                                         u16 vlan_id)
1518 {
1519         struct net_device *ndev = qdev->ndev;
1520         struct sk_buff *skb = NULL;
1521         void *addr;
1522         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1523         struct napi_struct *napi = &rx_ring->napi;
1524
1525         skb = netdev_alloc_skb(ndev, length);
1526         if (!skb) {
1527                 netif_err(qdev, drv, qdev->ndev,
1528                           "Couldn't get an skb, need to unwind!.\n");
1529                 rx_ring->rx_dropped++;
1530                 put_page(lbq_desc->p.pg_chunk.page);
1531                 return;
1532         }
1533
1534         addr = lbq_desc->p.pg_chunk.va;
1535         prefetch(addr);
1536
1537
1538         /* Frame error, so drop the packet. */
1539         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1540                 netif_info(qdev, drv, qdev->ndev,
1541                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1542                 rx_ring->rx_errors++;
1543                 goto err_out;
1544         }
1545
1546         /* The max framesize filter on this chip is set higher than
1547          * MTU since FCoE uses 2k frames.
1548          */
1549         if (skb->len > ndev->mtu + ETH_HLEN) {
1550                 netif_err(qdev, drv, qdev->ndev,
1551                           "Segment too small, dropping.\n");
1552                 rx_ring->rx_dropped++;
1553                 goto err_out;
1554         }
1555         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1556         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1557                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1558                      length);
1559         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1560                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1561                                 length-ETH_HLEN);
1562         skb->len += length-ETH_HLEN;
1563         skb->data_len += length-ETH_HLEN;
1564         skb->truesize += length-ETH_HLEN;
1565
1566         rx_ring->rx_packets++;
1567         rx_ring->rx_bytes += skb->len;
1568         skb->protocol = eth_type_trans(skb, ndev);
1569         skb_checksum_none_assert(skb);
1570
1571         if (qdev->rx_csum &&
1572                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1573                 /* TCP frame. */
1574                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1575                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1576                                      "TCP checksum done!\n");
1577                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1578                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1579                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1580                         /* Unfragmented ipv4 UDP frame. */
1581                         struct iphdr *iph = (struct iphdr *) skb->data;
1582                         if (!(iph->frag_off &
1583                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1584                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1585                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1586                                              qdev->ndev,
1587                                              "TCP checksum done!\n");
1588                         }
1589                 }
1590         }
1591
1592         skb_record_rx_queue(skb, rx_ring->cq_id);
1593         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1594                 if (qdev->vlgrp && (vlan_id != 0xffff))
1595                         vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1596                 else
1597                         napi_gro_receive(napi, skb);
1598         } else {
1599                 if (qdev->vlgrp && (vlan_id != 0xffff))
1600                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1601                 else
1602                         netif_receive_skb(skb);
1603         }
1604         return;
1605 err_out:
1606         dev_kfree_skb_any(skb);
1607         put_page(lbq_desc->p.pg_chunk.page);
1608 }
1609
1610 /* Process an inbound completion from an rx ring. */
1611 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1612                                         struct rx_ring *rx_ring,
1613                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1614                                         u32 length,
1615                                         u16 vlan_id)
1616 {
1617         struct net_device *ndev = qdev->ndev;
1618         struct sk_buff *skb = NULL;
1619         struct sk_buff *new_skb = NULL;
1620         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1621
1622         skb = sbq_desc->p.skb;
1623         /* Allocate new_skb and copy */
1624         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1625         if (new_skb == NULL) {
1626                 netif_err(qdev, probe, qdev->ndev,
1627                           "No skb available, drop the packet.\n");
1628                 rx_ring->rx_dropped++;
1629                 return;
1630         }
1631         skb_reserve(new_skb, NET_IP_ALIGN);
1632         memcpy(skb_put(new_skb, length), skb->data, length);
1633         skb = new_skb;
1634
1635         /* Frame error, so drop the packet. */
1636         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1637                 netif_info(qdev, drv, qdev->ndev,
1638                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1639                 dev_kfree_skb_any(skb);
1640                 rx_ring->rx_errors++;
1641                 return;
1642         }
1643
1644         /* loopback self test for ethtool */
1645         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1646                 ql_check_lb_frame(qdev, skb);
1647                 dev_kfree_skb_any(skb);
1648                 return;
1649         }
1650
1651         /* The max framesize filter on this chip is set higher than
1652          * MTU since FCoE uses 2k frames.
1653          */
1654         if (skb->len > ndev->mtu + ETH_HLEN) {
1655                 dev_kfree_skb_any(skb);
1656                 rx_ring->rx_dropped++;
1657                 return;
1658         }
1659
1660         prefetch(skb->data);
1661         skb->dev = ndev;
1662         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1663                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1664                              "%s Multicast.\n",
1665                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1666                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1667                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1668                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1669                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1670                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1671         }
1672         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1673                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1674                              "Promiscuous Packet.\n");
1675
1676         rx_ring->rx_packets++;
1677         rx_ring->rx_bytes += skb->len;
1678         skb->protocol = eth_type_trans(skb, ndev);
1679         skb_checksum_none_assert(skb);
1680
1681         /* If rx checksum is on, and there are no
1682          * csum or frame errors.
1683          */
1684         if (qdev->rx_csum &&
1685                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1686                 /* TCP frame. */
1687                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1688                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1689                                      "TCP checksum done!\n");
1690                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1691                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1692                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1693                         /* Unfragmented ipv4 UDP frame. */
1694                         struct iphdr *iph = (struct iphdr *) skb->data;
1695                         if (!(iph->frag_off &
1696                                 ntohs(IP_MF|IP_OFFSET))) {
1697                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1698                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1699                                              qdev->ndev,
1700                                              "TCP checksum done!\n");
1701                         }
1702                 }
1703         }
1704
1705         skb_record_rx_queue(skb, rx_ring->cq_id);
1706         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1707                 if (qdev->vlgrp && (vlan_id != 0xffff))
1708                         vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1709                                                 vlan_id, skb);
1710                 else
1711                         napi_gro_receive(&rx_ring->napi, skb);
1712         } else {
1713                 if (qdev->vlgrp && (vlan_id != 0xffff))
1714                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1715                 else
1716                         netif_receive_skb(skb);
1717         }
1718 }
1719
1720 static void ql_realign_skb(struct sk_buff *skb, int len)
1721 {
1722         void *temp_addr = skb->data;
1723
1724         /* Undo the skb_reserve(skb,32) we did before
1725          * giving to hardware, and realign data on
1726          * a 2-byte boundary.
1727          */
1728         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1729         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1730         skb_copy_to_linear_data(skb, temp_addr,
1731                 (unsigned int)len);
1732 }
1733
1734 /*
1735  * This function builds an skb for the given inbound
1736  * completion.  It will be rewritten for readability in the near
1737  * future, but for not it works well.
1738  */
1739 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1740                                        struct rx_ring *rx_ring,
1741                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1742 {
1743         struct bq_desc *lbq_desc;
1744         struct bq_desc *sbq_desc;
1745         struct sk_buff *skb = NULL;
1746         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1747        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1748
1749         /*
1750          * Handle the header buffer if present.
1751          */
1752         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1753             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1754                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1755                              "Header of %d bytes in small buffer.\n", hdr_len);
1756                 /*
1757                  * Headers fit nicely into a small buffer.
1758                  */
1759                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1760                 pci_unmap_single(qdev->pdev,
1761                                 dma_unmap_addr(sbq_desc, mapaddr),
1762                                 dma_unmap_len(sbq_desc, maplen),
1763                                 PCI_DMA_FROMDEVICE);
1764                 skb = sbq_desc->p.skb;
1765                 ql_realign_skb(skb, hdr_len);
1766                 skb_put(skb, hdr_len);
1767                 sbq_desc->p.skb = NULL;
1768         }
1769
1770         /*
1771          * Handle the data buffer(s).
1772          */
1773         if (unlikely(!length)) {        /* Is there data too? */
1774                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775                              "No Data buffer in this packet.\n");
1776                 return skb;
1777         }
1778
1779         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1780                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1781                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1782                                      "Headers in small, data of %d bytes in small, combine them.\n",
1783                                      length);
1784                         /*
1785                          * Data is less than small buffer size so it's
1786                          * stuffed in a small buffer.
1787                          * For this case we append the data
1788                          * from the "data" small buffer to the "header" small
1789                          * buffer.
1790                          */
1791                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1792                         pci_dma_sync_single_for_cpu(qdev->pdev,
1793                                                     dma_unmap_addr
1794                                                     (sbq_desc, mapaddr),
1795                                                     dma_unmap_len
1796                                                     (sbq_desc, maplen),
1797                                                     PCI_DMA_FROMDEVICE);
1798                         memcpy(skb_put(skb, length),
1799                                sbq_desc->p.skb->data, length);
1800                         pci_dma_sync_single_for_device(qdev->pdev,
1801                                                        dma_unmap_addr
1802                                                        (sbq_desc,
1803                                                         mapaddr),
1804                                                        dma_unmap_len
1805                                                        (sbq_desc,
1806                                                         maplen),
1807                                                        PCI_DMA_FROMDEVICE);
1808                 } else {
1809                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1810                                      "%d bytes in a single small buffer.\n",
1811                                      length);
1812                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1813                         skb = sbq_desc->p.skb;
1814                         ql_realign_skb(skb, length);
1815                         skb_put(skb, length);
1816                         pci_unmap_single(qdev->pdev,
1817                                          dma_unmap_addr(sbq_desc,
1818                                                         mapaddr),
1819                                          dma_unmap_len(sbq_desc,
1820                                                        maplen),
1821                                          PCI_DMA_FROMDEVICE);
1822                         sbq_desc->p.skb = NULL;
1823                 }
1824         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1825                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1826                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1827                                      "Header in small, %d bytes in large. Chain large to small!\n",
1828                                      length);
1829                         /*
1830                          * The data is in a single large buffer.  We
1831                          * chain it to the header buffer's skb and let
1832                          * it rip.
1833                          */
1834                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1835                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1836                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1837                                      lbq_desc->p.pg_chunk.offset, length);
1838                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1839                                                 lbq_desc->p.pg_chunk.offset,
1840                                                 length);
1841                         skb->len += length;
1842                         skb->data_len += length;
1843                         skb->truesize += length;
1844                 } else {
1845                         /*
1846                          * The headers and data are in a single large buffer. We
1847                          * copy it to a new skb and let it go. This can happen with
1848                          * jumbo mtu on a non-TCP/UDP frame.
1849                          */
1850                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1851                         skb = netdev_alloc_skb(qdev->ndev, length);
1852                         if (skb == NULL) {
1853                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1854                                              "No skb available, drop the packet.\n");
1855                                 return NULL;
1856                         }
1857                         pci_unmap_page(qdev->pdev,
1858                                        dma_unmap_addr(lbq_desc,
1859                                                       mapaddr),
1860                                        dma_unmap_len(lbq_desc, maplen),
1861                                        PCI_DMA_FROMDEVICE);
1862                         skb_reserve(skb, NET_IP_ALIGN);
1863                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1864                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1865                                      length);
1866                         skb_fill_page_desc(skb, 0,
1867                                                 lbq_desc->p.pg_chunk.page,
1868                                                 lbq_desc->p.pg_chunk.offset,
1869                                                 length);
1870                         skb->len += length;
1871                         skb->data_len += length;
1872                         skb->truesize += length;
1873                         length -= length;
1874                         __pskb_pull_tail(skb,
1875                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1876                                 VLAN_ETH_HLEN : ETH_HLEN);
1877                 }
1878         } else {
1879                 /*
1880                  * The data is in a chain of large buffers
1881                  * pointed to by a small buffer.  We loop
1882                  * thru and chain them to the our small header
1883                  * buffer's skb.
1884                  * frags:  There are 18 max frags and our small
1885                  *         buffer will hold 32 of them. The thing is,
1886                  *         we'll use 3 max for our 9000 byte jumbo
1887                  *         frames.  If the MTU goes up we could
1888                  *          eventually be in trouble.
1889                  */
1890                 int size, i = 0;
1891                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1892                 pci_unmap_single(qdev->pdev,
1893                                  dma_unmap_addr(sbq_desc, mapaddr),
1894                                  dma_unmap_len(sbq_desc, maplen),
1895                                  PCI_DMA_FROMDEVICE);
1896                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1897                         /*
1898                          * This is an non TCP/UDP IP frame, so
1899                          * the headers aren't split into a small
1900                          * buffer.  We have to use the small buffer
1901                          * that contains our sg list as our skb to
1902                          * send upstairs. Copy the sg list here to
1903                          * a local buffer and use it to find the
1904                          * pages to chain.
1905                          */
1906                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1907                                      "%d bytes of headers & data in chain of large.\n",
1908                                      length);
1909                         skb = sbq_desc->p.skb;
1910                         sbq_desc->p.skb = NULL;
1911                         skb_reserve(skb, NET_IP_ALIGN);
1912                 }
1913                 while (length > 0) {
1914                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1915                         size = (length < rx_ring->lbq_buf_size) ? length :
1916                                 rx_ring->lbq_buf_size;
1917
1918                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919                                      "Adding page %d to skb for %d bytes.\n",
1920                                      i, size);
1921                         skb_fill_page_desc(skb, i,
1922                                                 lbq_desc->p.pg_chunk.page,
1923                                                 lbq_desc->p.pg_chunk.offset,
1924                                                 size);
1925                         skb->len += size;
1926                         skb->data_len += size;
1927                         skb->truesize += size;
1928                         length -= size;
1929                         i++;
1930                 }
1931                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1932                                 VLAN_ETH_HLEN : ETH_HLEN);
1933         }
1934         return skb;
1935 }
1936
1937 /* Process an inbound completion from an rx ring. */
1938 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1939                                    struct rx_ring *rx_ring,
1940                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1941                                    u16 vlan_id)
1942 {
1943         struct net_device *ndev = qdev->ndev;
1944         struct sk_buff *skb = NULL;
1945
1946         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1947
1948         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1949         if (unlikely(!skb)) {
1950                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1951                              "No skb available, drop packet.\n");
1952                 rx_ring->rx_dropped++;
1953                 return;
1954         }
1955
1956         /* Frame error, so drop the packet. */
1957         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1958                 netif_info(qdev, drv, qdev->ndev,
1959                           "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1960                 dev_kfree_skb_any(skb);
1961                 rx_ring->rx_errors++;
1962                 return;
1963         }
1964
1965         /* The max framesize filter on this chip is set higher than
1966          * MTU since FCoE uses 2k frames.
1967          */
1968         if (skb->len > ndev->mtu + ETH_HLEN) {
1969                 dev_kfree_skb_any(skb);
1970                 rx_ring->rx_dropped++;
1971                 return;
1972         }
1973
1974         /* loopback self test for ethtool */
1975         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1976                 ql_check_lb_frame(qdev, skb);
1977                 dev_kfree_skb_any(skb);
1978                 return;
1979         }
1980
1981         prefetch(skb->data);
1982         skb->dev = ndev;
1983         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1984                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1985                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1986                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1987                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1988                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1989                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1990                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1991                 rx_ring->rx_multicast++;
1992         }
1993         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1994                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1995                              "Promiscuous Packet.\n");
1996         }
1997
1998         skb->protocol = eth_type_trans(skb, ndev);
1999         skb_checksum_none_assert(skb);
2000
2001         /* If rx checksum is on, and there are no
2002          * csum or frame errors.
2003          */
2004         if (qdev->rx_csum &&
2005                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2006                 /* TCP frame. */
2007                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2008                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2009                                      "TCP checksum done!\n");
2010                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2011                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2012                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2013                 /* Unfragmented ipv4 UDP frame. */
2014                         struct iphdr *iph = (struct iphdr *) skb->data;
2015                         if (!(iph->frag_off &
2016                                 ntohs(IP_MF|IP_OFFSET))) {
2017                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2018                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2019                                              "TCP checksum done!\n");
2020                         }
2021                 }
2022         }
2023
2024         rx_ring->rx_packets++;
2025         rx_ring->rx_bytes += skb->len;
2026         skb_record_rx_queue(skb, rx_ring->cq_id);
2027         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2028                 if (qdev->vlgrp &&
2029                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2030                         (vlan_id != 0))
2031                         vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2032                                 vlan_id, skb);
2033                 else
2034                         napi_gro_receive(&rx_ring->napi, skb);
2035         } else {
2036                 if (qdev->vlgrp &&
2037                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2038                         (vlan_id != 0))
2039                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2040                 else
2041                         netif_receive_skb(skb);
2042         }
2043 }
2044
2045 /* Process an inbound completion from an rx ring. */
2046 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2047                                         struct rx_ring *rx_ring,
2048                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2049 {
2050         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2051         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2052                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2053                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2054
2055         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2056
2057         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2058                 /* The data and headers are split into
2059                  * separate buffers.
2060                  */
2061                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2062                                                 vlan_id);
2063         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2064                 /* The data fit in a single small buffer.
2065                  * Allocate a new skb, copy the data and
2066                  * return the buffer to the free pool.
2067                  */
2068                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2069                                                 length, vlan_id);
2070         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2071                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2072                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2073                 /* TCP packet in a page chunk that's been checksummed.
2074                  * Tack it on to our GRO skb and let it go.
2075                  */
2076                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2077                                                 length, vlan_id);
2078         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2079                 /* Non-TCP packet in a page chunk. Allocate an
2080                  * skb, tack it on frags, and send it up.
2081                  */
2082                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2083                                                 length, vlan_id);
2084         } else {
2085                 /* Non-TCP/UDP large frames that span multiple buffers
2086                  * can be processed corrrectly by the split frame logic.
2087                  */
2088                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2089                                                 vlan_id);
2090         }
2091
2092         return (unsigned long)length;
2093 }
2094
2095 /* Process an outbound completion from an rx ring. */
2096 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2097                                    struct ob_mac_iocb_rsp *mac_rsp)
2098 {
2099         struct tx_ring *tx_ring;
2100         struct tx_ring_desc *tx_ring_desc;
2101
2102         QL_DUMP_OB_MAC_RSP(mac_rsp);
2103         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2104         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2105         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2106         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2107         tx_ring->tx_packets++;
2108         dev_kfree_skb(tx_ring_desc->skb);
2109         tx_ring_desc->skb = NULL;
2110
2111         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2112                                         OB_MAC_IOCB_RSP_S |
2113                                         OB_MAC_IOCB_RSP_L |
2114                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2115                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2116                         netif_warn(qdev, tx_done, qdev->ndev,
2117                                    "Total descriptor length did not match transfer length.\n");
2118                 }
2119                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2120                         netif_warn(qdev, tx_done, qdev->ndev,
2121                                    "Frame too short to be valid, not sent.\n");
2122                 }
2123                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2124                         netif_warn(qdev, tx_done, qdev->ndev,
2125                                    "Frame too long, but sent anyway.\n");
2126                 }
2127                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2128                         netif_warn(qdev, tx_done, qdev->ndev,
2129                                    "PCI backplane error. Frame not sent.\n");
2130                 }
2131         }
2132         atomic_inc(&tx_ring->tx_count);
2133 }
2134
2135 /* Fire up a handler to reset the MPI processor. */
2136 void ql_queue_fw_error(struct ql_adapter *qdev)
2137 {
2138         ql_link_off(qdev);
2139         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2140 }
2141
2142 void ql_queue_asic_error(struct ql_adapter *qdev)
2143 {
2144         ql_link_off(qdev);
2145         ql_disable_interrupts(qdev);
2146         /* Clear adapter up bit to signal the recovery
2147          * process that it shouldn't kill the reset worker
2148          * thread
2149          */
2150         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2151         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2152 }
2153
2154 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2155                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2156 {
2157         switch (ib_ae_rsp->event) {
2158         case MGMT_ERR_EVENT:
2159                 netif_err(qdev, rx_err, qdev->ndev,
2160                           "Management Processor Fatal Error.\n");
2161                 ql_queue_fw_error(qdev);
2162                 return;
2163
2164         case CAM_LOOKUP_ERR_EVENT:
2165                 netif_err(qdev, link, qdev->ndev,
2166                           "Multiple CAM hits lookup occurred.\n");
2167                 netif_err(qdev, drv, qdev->ndev,
2168                           "This event shouldn't occur.\n");
2169                 ql_queue_asic_error(qdev);
2170                 return;
2171
2172         case SOFT_ECC_ERROR_EVENT:
2173                 netif_err(qdev, rx_err, qdev->ndev,
2174                           "Soft ECC error detected.\n");
2175                 ql_queue_asic_error(qdev);
2176                 break;
2177
2178         case PCI_ERR_ANON_BUF_RD:
2179                 netif_err(qdev, rx_err, qdev->ndev,
2180                           "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2181                           ib_ae_rsp->q_id);
2182                 ql_queue_asic_error(qdev);
2183                 break;
2184
2185         default:
2186                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2187                           ib_ae_rsp->event);
2188                 ql_queue_asic_error(qdev);
2189                 break;
2190         }
2191 }
2192
2193 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2194 {
2195         struct ql_adapter *qdev = rx_ring->qdev;
2196         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2197         struct ob_mac_iocb_rsp *net_rsp = NULL;
2198         int count = 0;
2199
2200         struct tx_ring *tx_ring;
2201         /* While there are entries in the completion queue. */
2202         while (prod != rx_ring->cnsmr_idx) {
2203
2204                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2205                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2206                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2207
2208                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2209                 rmb();
2210                 switch (net_rsp->opcode) {
2211
2212                 case OPCODE_OB_MAC_TSO_IOCB:
2213                 case OPCODE_OB_MAC_IOCB:
2214                         ql_process_mac_tx_intr(qdev, net_rsp);
2215                         break;
2216                 default:
2217                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2218                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2219                                      net_rsp->opcode);
2220                 }
2221                 count++;
2222                 ql_update_cq(rx_ring);
2223                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2224         }
2225         if (!net_rsp)
2226                 return 0;
2227         ql_write_cq_idx(rx_ring);
2228         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2229         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2230                 if (atomic_read(&tx_ring->queue_stopped) &&
2231                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2232                         /*
2233                          * The queue got stopped because the tx_ring was full.
2234                          * Wake it up, because it's now at least 25% empty.
2235                          */
2236                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2237         }
2238
2239         return count;
2240 }
2241
2242 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2243 {
2244         struct ql_adapter *qdev = rx_ring->qdev;
2245         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2246         struct ql_net_rsp_iocb *net_rsp;
2247         int count = 0;
2248
2249         /* While there are entries in the completion queue. */
2250         while (prod != rx_ring->cnsmr_idx) {
2251
2252                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2253                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2254                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2255
2256                 net_rsp = rx_ring->curr_entry;
2257                 rmb();
2258                 switch (net_rsp->opcode) {
2259                 case OPCODE_IB_MAC_IOCB:
2260                         ql_process_mac_rx_intr(qdev, rx_ring,
2261                                                (struct ib_mac_iocb_rsp *)
2262                                                net_rsp);
2263                         break;
2264
2265                 case OPCODE_IB_AE_IOCB:
2266                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2267                                                 net_rsp);
2268                         break;
2269                 default:
2270                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2271                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2272                                      net_rsp->opcode);
2273                         break;
2274                 }
2275                 count++;
2276                 ql_update_cq(rx_ring);
2277                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2278                 if (count == budget)
2279                         break;
2280         }
2281         ql_update_buffer_queues(qdev, rx_ring);
2282         ql_write_cq_idx(rx_ring);
2283         return count;
2284 }
2285
2286 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2287 {
2288         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2289         struct ql_adapter *qdev = rx_ring->qdev;
2290         struct rx_ring *trx_ring;
2291         int i, work_done = 0;
2292         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2293
2294         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2295                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2296
2297         /* Service the TX rings first.  They start
2298          * right after the RSS rings. */
2299         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2300                 trx_ring = &qdev->rx_ring[i];
2301                 /* If this TX completion ring belongs to this vector and
2302                  * it's not empty then service it.
2303                  */
2304                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2305                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2306                                         trx_ring->cnsmr_idx)) {
2307                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2308                                      "%s: Servicing TX completion ring %d.\n",
2309                                      __func__, trx_ring->cq_id);
2310                         ql_clean_outbound_rx_ring(trx_ring);
2311                 }
2312         }
2313
2314         /*
2315          * Now service the RSS ring if it's active.
2316          */
2317         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2318                                         rx_ring->cnsmr_idx) {
2319                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2320                              "%s: Servicing RX completion ring %d.\n",
2321                              __func__, rx_ring->cq_id);
2322                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2323         }
2324
2325         if (work_done < budget) {
2326                 napi_complete(napi);
2327                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2328         }
2329         return work_done;
2330 }
2331
2332 static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
2333 {
2334         struct ql_adapter *qdev = netdev_priv(ndev);
2335
2336         qdev->vlgrp = grp;
2337         if (grp) {
2338                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2339                              "Turning on VLAN in NIC_RCV_CFG.\n");
2340                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2341                            NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2342         } else {
2343                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2344                              "Turning off VLAN in NIC_RCV_CFG.\n");
2345                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2346         }
2347 }
2348
2349 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2350 {
2351         struct ql_adapter *qdev = netdev_priv(ndev);
2352         u32 enable_bit = MAC_ADDR_E;
2353         int status;
2354
2355         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2356         if (status)
2357                 return;
2358         if (ql_set_mac_addr_reg
2359             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2360                 netif_err(qdev, ifup, qdev->ndev,
2361                           "Failed to init vlan address.\n");
2362         }
2363         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2364 }
2365
2366 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2367 {
2368         struct ql_adapter *qdev = netdev_priv(ndev);
2369         u32 enable_bit = 0;
2370         int status;
2371
2372         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2373         if (status)
2374                 return;
2375
2376         if (ql_set_mac_addr_reg
2377             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2378                 netif_err(qdev, ifup, qdev->ndev,
2379                           "Failed to clear vlan address.\n");
2380         }
2381         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2382
2383 }
2384
2385 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2386 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2387 {
2388         struct rx_ring *rx_ring = dev_id;
2389         napi_schedule(&rx_ring->napi);
2390         return IRQ_HANDLED;
2391 }
2392
2393 /* This handles a fatal error, MPI activity, and the default
2394  * rx_ring in an MSI-X multiple vector environment.
2395  * In MSI/Legacy environment it also process the rest of
2396  * the rx_rings.
2397  */
2398 static irqreturn_t qlge_isr(int irq, void *dev_id)
2399 {
2400         struct rx_ring *rx_ring = dev_id;
2401         struct ql_adapter *qdev = rx_ring->qdev;
2402         struct intr_context *intr_context = &qdev->intr_context[0];
2403         u32 var;
2404         int work_done = 0;
2405
2406         spin_lock(&qdev->hw_lock);
2407         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2408                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2409                              "Shared Interrupt, Not ours!\n");
2410                 spin_unlock(&qdev->hw_lock);
2411                 return IRQ_NONE;
2412         }
2413         spin_unlock(&qdev->hw_lock);
2414
2415         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2416
2417         /*
2418          * Check for fatal error.
2419          */
2420         if (var & STS_FE) {
2421                 ql_queue_asic_error(qdev);
2422                 netif_err(qdev, intr, qdev->ndev,
2423                           "Got fatal error, STS = %x.\n", var);
2424                 var = ql_read32(qdev, ERR_STS);
2425                 netif_err(qdev, intr, qdev->ndev,
2426                           "Resetting chip. Error Status Register = 0x%x\n", var);
2427                 return IRQ_HANDLED;
2428         }
2429
2430         /*
2431          * Check MPI processor activity.
2432          */
2433         if ((var & STS_PI) &&
2434                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2435                 /*
2436                  * We've got an async event or mailbox completion.
2437                  * Handle it and clear the source of the interrupt.
2438                  */
2439                 netif_err(qdev, intr, qdev->ndev,
2440                           "Got MPI processor interrupt.\n");
2441                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2442                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2443                 queue_delayed_work_on(smp_processor_id(),
2444                                 qdev->workqueue, &qdev->mpi_work, 0);
2445                 work_done++;
2446         }
2447
2448         /*
2449          * Get the bit-mask that shows the active queues for this
2450          * pass.  Compare it to the queues that this irq services
2451          * and call napi if there's a match.
2452          */
2453         var = ql_read32(qdev, ISR1);
2454         if (var & intr_context->irq_mask) {
2455                 netif_info(qdev, intr, qdev->ndev,
2456                            "Waking handler for rx_ring[0].\n");
2457                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2458                 napi_schedule(&rx_ring->napi);
2459                 work_done++;
2460         }
2461         ql_enable_completion_interrupt(qdev, intr_context->intr);
2462         return work_done ? IRQ_HANDLED : IRQ_NONE;
2463 }
2464
2465 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2466 {
2467
2468         if (skb_is_gso(skb)) {
2469                 int err;
2470                 if (skb_header_cloned(skb)) {
2471                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2472                         if (err)
2473                                 return err;
2474                 }
2475
2476                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2477                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2478                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2479                 mac_iocb_ptr->total_hdrs_len =
2480                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2481                 mac_iocb_ptr->net_trans_offset =
2482                     cpu_to_le16(skb_network_offset(skb) |
2483                                 skb_transport_offset(skb)
2484                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2485                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2486                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2487                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2488                         struct iphdr *iph = ip_hdr(skb);
2489                         iph->check = 0;
2490                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2491                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2492                                                                  iph->daddr, 0,
2493                                                                  IPPROTO_TCP,
2494                                                                  0);
2495                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2496                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2497                         tcp_hdr(skb)->check =
2498                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2499                                              &ipv6_hdr(skb)->daddr,
2500                                              0, IPPROTO_TCP, 0);
2501                 }
2502                 return 1;
2503         }
2504         return 0;
2505 }
2506
2507 static void ql_hw_csum_setup(struct sk_buff *skb,
2508                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2509 {
2510         int len;
2511         struct iphdr *iph = ip_hdr(skb);
2512         __sum16 *check;
2513         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2514         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2515         mac_iocb_ptr->net_trans_offset =
2516                 cpu_to_le16(skb_network_offset(skb) |
2517                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2518
2519         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2520         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2521         if (likely(iph->protocol == IPPROTO_TCP)) {
2522                 check = &(tcp_hdr(skb)->check);
2523                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2524                 mac_iocb_ptr->total_hdrs_len =
2525                     cpu_to_le16(skb_transport_offset(skb) +
2526                                 (tcp_hdr(skb)->doff << 2));
2527         } else {
2528                 check = &(udp_hdr(skb)->check);
2529                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2530                 mac_iocb_ptr->total_hdrs_len =
2531                     cpu_to_le16(skb_transport_offset(skb) +
2532                                 sizeof(struct udphdr));
2533         }
2534         *check = ~csum_tcpudp_magic(iph->saddr,
2535                                     iph->daddr, len, iph->protocol, 0);
2536 }
2537
2538 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2539 {
2540         struct tx_ring_desc *tx_ring_desc;
2541         struct ob_mac_iocb_req *mac_iocb_ptr;
2542         struct ql_adapter *qdev = netdev_priv(ndev);
2543         int tso;
2544         struct tx_ring *tx_ring;
2545         u32 tx_ring_idx = (u32) skb->queue_mapping;
2546
2547         tx_ring = &qdev->tx_ring[tx_ring_idx];
2548
2549         if (skb_padto(skb, ETH_ZLEN))
2550                 return NETDEV_TX_OK;
2551
2552         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2553                 netif_info(qdev, tx_queued, qdev->ndev,
2554                            "%s: shutting down tx queue %d du to lack of resources.\n",
2555                            __func__, tx_ring_idx);
2556                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2557                 atomic_inc(&tx_ring->queue_stopped);
2558                 tx_ring->tx_errors++;
2559                 return NETDEV_TX_BUSY;
2560         }
2561         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2562         mac_iocb_ptr = tx_ring_desc->queue_entry;
2563         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2564
2565         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2566         mac_iocb_ptr->tid = tx_ring_desc->index;
2567         /* We use the upper 32-bits to store the tx queue for this IO.
2568          * When we get the completion we can use it to establish the context.
2569          */
2570         mac_iocb_ptr->txq_idx = tx_ring_idx;
2571         tx_ring_desc->skb = skb;
2572
2573         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2574
2575         if (vlan_tx_tag_present(skb)) {
2576                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2577                              "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2578                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2579                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2580         }
2581         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2582         if (tso < 0) {
2583                 dev_kfree_skb_any(skb);
2584                 return NETDEV_TX_OK;
2585         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2586                 ql_hw_csum_setup(skb,
2587                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2588         }
2589         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2590                         NETDEV_TX_OK) {
2591                 netif_err(qdev, tx_queued, qdev->ndev,
2592                           "Could not map the segments.\n");
2593                 tx_ring->tx_errors++;
2594                 return NETDEV_TX_BUSY;
2595         }
2596         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2597         tx_ring->prod_idx++;
2598         if (tx_ring->prod_idx == tx_ring->wq_len)
2599                 tx_ring->prod_idx = 0;
2600         wmb();
2601
2602         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2603         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2604                      "tx queued, slot %d, len %d\n",
2605                      tx_ring->prod_idx, skb->len);
2606
2607         atomic_dec(&tx_ring->tx_count);
2608         return NETDEV_TX_OK;
2609 }
2610
2611
2612 static void ql_free_shadow_space(struct ql_adapter *qdev)
2613 {
2614         if (qdev->rx_ring_shadow_reg_area) {
2615                 pci_free_consistent(qdev->pdev,
2616                                     PAGE_SIZE,
2617                                     qdev->rx_ring_shadow_reg_area,
2618                                     qdev->rx_ring_shadow_reg_dma);
2619                 qdev->rx_ring_shadow_reg_area = NULL;
2620         }
2621         if (qdev->tx_ring_shadow_reg_area) {
2622                 pci_free_consistent(qdev->pdev,
2623                                     PAGE_SIZE,
2624                                     qdev->tx_ring_shadow_reg_area,
2625                                     qdev->tx_ring_shadow_reg_dma);
2626                 qdev->tx_ring_shadow_reg_area = NULL;
2627         }
2628 }
2629
2630 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2631 {
2632         qdev->rx_ring_shadow_reg_area =
2633             pci_alloc_consistent(qdev->pdev,
2634                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2635         if (qdev->rx_ring_shadow_reg_area == NULL) {
2636                 netif_err(qdev, ifup, qdev->ndev,
2637                           "Allocation of RX shadow space failed.\n");
2638                 return -ENOMEM;
2639         }
2640         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2641         qdev->tx_ring_shadow_reg_area =
2642             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2643                                  &qdev->tx_ring_shadow_reg_dma);
2644         if (qdev->tx_ring_shadow_reg_area == NULL) {
2645                 netif_err(qdev, ifup, qdev->ndev,
2646                           "Allocation of TX shadow space failed.\n");
2647                 goto err_wqp_sh_area;
2648         }
2649         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2650         return 0;
2651
2652 err_wqp_sh_area:
2653         pci_free_consistent(qdev->pdev,
2654                             PAGE_SIZE,
2655                             qdev->rx_ring_shadow_reg_area,
2656                             qdev->rx_ring_shadow_reg_dma);
2657         return -ENOMEM;
2658 }
2659
2660 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2661 {
2662         struct tx_ring_desc *tx_ring_desc;
2663         int i;
2664         struct ob_mac_iocb_req *mac_iocb_ptr;
2665
2666         mac_iocb_ptr = tx_ring->wq_base;
2667         tx_ring_desc = tx_ring->q;
2668         for (i = 0; i < tx_ring->wq_len; i++) {
2669                 tx_ring_desc->index = i;
2670                 tx_ring_desc->skb = NULL;
2671                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2672                 mac_iocb_ptr++;
2673                 tx_ring_desc++;
2674         }
2675         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2676         atomic_set(&tx_ring->queue_stopped, 0);
2677 }
2678
2679 static void ql_free_tx_resources(struct ql_adapter *qdev,
2680                                  struct tx_ring *tx_ring)
2681 {
2682         if (tx_ring->wq_base) {
2683                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2684                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2685                 tx_ring->wq_base = NULL;
2686         }
2687         kfree(tx_ring->q);
2688         tx_ring->q = NULL;
2689 }
2690
2691 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2692                                  struct tx_ring *tx_ring)
2693 {
2694         tx_ring->wq_base =
2695             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2696                                  &tx_ring->wq_base_dma);
2697
2698         if ((tx_ring->wq_base == NULL) ||
2699             tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2700                 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2701                 return -ENOMEM;
2702         }
2703         tx_ring->q =
2704             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2705         if (tx_ring->q == NULL)
2706                 goto err;
2707
2708         return 0;
2709 err:
2710         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2711                             tx_ring->wq_base, tx_ring->wq_base_dma);
2712         return -ENOMEM;
2713 }
2714
2715 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2716 {
2717         struct bq_desc *lbq_desc;
2718
2719         uint32_t  curr_idx, clean_idx;
2720
2721         curr_idx = rx_ring->lbq_curr_idx;
2722         clean_idx = rx_ring->lbq_clean_idx;
2723         while (curr_idx != clean_idx) {
2724                 lbq_desc = &rx_ring->lbq[curr_idx];
2725
2726                 if (lbq_desc->p.pg_chunk.last_flag) {
2727                         pci_unmap_page(qdev->pdev,
2728                                 lbq_desc->p.pg_chunk.map,
2729                                 ql_lbq_block_size(qdev),
2730                                        PCI_DMA_FROMDEVICE);
2731                         lbq_desc->p.pg_chunk.last_flag = 0;
2732                 }
2733
2734                 put_page(lbq_desc->p.pg_chunk.page);
2735                 lbq_desc->p.pg_chunk.page = NULL;
2736
2737                 if (++curr_idx == rx_ring->lbq_len)
2738                         curr_idx = 0;
2739
2740         }
2741 }
2742
2743 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2744 {
2745         int i;
2746         struct bq_desc *sbq_desc;
2747
2748         for (i = 0; i < rx_ring->sbq_len; i++) {
2749                 sbq_desc = &rx_ring->sbq[i];
2750                 if (sbq_desc == NULL) {
2751                         netif_err(qdev, ifup, qdev->ndev,
2752                                   "sbq_desc %d is NULL.\n", i);
2753                         return;
2754                 }
2755                 if (sbq_desc->p.skb) {
2756                         pci_unmap_single(qdev->pdev,
2757                                          dma_unmap_addr(sbq_desc, mapaddr),
2758                                          dma_unmap_len(sbq_desc, maplen),
2759                                          PCI_DMA_FROMDEVICE);
2760                         dev_kfree_skb(sbq_desc->p.skb);
2761                         sbq_desc->p.skb = NULL;
2762                 }
2763         }
2764 }
2765
2766 /* Free all large and small rx buffers associated
2767  * with the completion queues for this device.
2768  */
2769 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2770 {
2771         int i;
2772         struct rx_ring *rx_ring;
2773
2774         for (i = 0; i < qdev->rx_ring_count; i++) {
2775                 rx_ring = &qdev->rx_ring[i];
2776                 if (rx_ring->lbq)
2777                         ql_free_lbq_buffers(qdev, rx_ring);
2778                 if (rx_ring->sbq)
2779                         ql_free_sbq_buffers(qdev, rx_ring);
2780         }
2781 }
2782
2783 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2784 {
2785         struct rx_ring *rx_ring;
2786         int i;
2787
2788         for (i = 0; i < qdev->rx_ring_count; i++) {
2789                 rx_ring = &qdev->rx_ring[i];
2790                 if (rx_ring->type != TX_Q)
2791                         ql_update_buffer_queues(qdev, rx_ring);
2792         }
2793 }
2794
2795 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2796                                 struct rx_ring *rx_ring)
2797 {
2798         int i;
2799         struct bq_desc *lbq_desc;
2800         __le64 *bq = rx_ring->lbq_base;
2801
2802         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2803         for (i = 0; i < rx_ring->lbq_len; i++) {
2804                 lbq_desc = &rx_ring->lbq[i];
2805                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2806                 lbq_desc->index = i;
2807                 lbq_desc->addr = bq;
2808                 bq++;
2809         }
2810 }
2811
2812 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2813                                 struct rx_ring *rx_ring)
2814 {
2815         int i;
2816         struct bq_desc *sbq_desc;
2817         __le64 *bq = rx_ring->sbq_base;
2818
2819         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2820         for (i = 0; i < rx_ring->sbq_len; i++) {
2821                 sbq_desc = &rx_ring->sbq[i];
2822                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2823                 sbq_desc->index = i;
2824                 sbq_desc->addr = bq;
2825                 bq++;
2826         }
2827 }
2828
2829 static void ql_free_rx_resources(struct ql_adapter *qdev,
2830                                  struct rx_ring *rx_ring)
2831 {
2832         /* Free the small buffer queue. */
2833         if (rx_ring->sbq_base) {
2834                 pci_free_consistent(qdev->pdev,
2835                                     rx_ring->sbq_size,
2836                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2837                 rx_ring->sbq_base = NULL;
2838         }
2839
2840         /* Free the small buffer queue control blocks. */
2841         kfree(rx_ring->sbq);
2842         rx_ring->sbq = NULL;
2843
2844         /* Free the large buffer queue. */
2845         if (rx_ring->lbq_base) {
2846                 pci_free_consistent(qdev->pdev,
2847                                     rx_ring->lbq_size,
2848                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2849                 rx_ring->lbq_base = NULL;
2850         }
2851
2852         /* Free the large buffer queue control blocks. */
2853         kfree(rx_ring->lbq);
2854         rx_ring->lbq = NULL;
2855
2856         /* Free the rx queue. */
2857         if (rx_ring->cq_base) {
2858                 pci_free_consistent(qdev->pdev,
2859                                     rx_ring->cq_size,
2860                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2861                 rx_ring->cq_base = NULL;
2862         }
2863 }
2864
2865 /* Allocate queues and buffers for this completions queue based
2866  * on the values in the parameter structure. */
2867 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2868                                  struct rx_ring *rx_ring)
2869 {
2870
2871         /*
2872          * Allocate the completion queue for this rx_ring.
2873          */
2874         rx_ring->cq_base =
2875             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2876                                  &rx_ring->cq_base_dma);
2877
2878         if (rx_ring->cq_base == NULL) {
2879                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2880                 return -ENOMEM;
2881         }
2882
2883         if (rx_ring->sbq_len) {
2884                 /*
2885                  * Allocate small buffer queue.
2886                  */
2887                 rx_ring->sbq_base =
2888                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2889                                          &rx_ring->sbq_base_dma);
2890
2891                 if (rx_ring->sbq_base == NULL) {
2892                         netif_err(qdev, ifup, qdev->ndev,
2893                                   "Small buffer queue allocation failed.\n");
2894                         goto err_mem;
2895                 }
2896
2897                 /*
2898                  * Allocate small buffer queue control blocks.
2899                  */
2900                 rx_ring->sbq =
2901                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2902                             GFP_KERNEL);
2903                 if (rx_ring->sbq == NULL) {
2904                         netif_err(qdev, ifup, qdev->ndev,
2905                                   "Small buffer queue control block allocation failed.\n");
2906                         goto err_mem;
2907                 }
2908
2909                 ql_init_sbq_ring(qdev, rx_ring);
2910         }
2911
2912         if (rx_ring->lbq_len) {
2913                 /*
2914                  * Allocate large buffer queue.
2915                  */
2916                 rx_ring->lbq_base =
2917                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2918                                          &rx_ring->lbq_base_dma);
2919
2920                 if (rx_ring->lbq_base == NULL) {
2921                         netif_err(qdev, ifup, qdev->ndev,
2922                                   "Large buffer queue allocation failed.\n");
2923                         goto err_mem;
2924                 }
2925                 /*
2926                  * Allocate large buffer queue control blocks.
2927                  */
2928                 rx_ring->lbq =
2929                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2930                             GFP_KERNEL);
2931                 if (rx_ring->lbq == NULL) {
2932                         netif_err(qdev, ifup, qdev->ndev,
2933                                   "Large buffer queue control block allocation failed.\n");
2934                         goto err_mem;
2935                 }
2936
2937                 ql_init_lbq_ring(qdev, rx_ring);
2938         }
2939
2940         return 0;
2941
2942 err_mem:
2943         ql_free_rx_resources(qdev, rx_ring);
2944         return -ENOMEM;
2945 }
2946
2947 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2948 {
2949         struct tx_ring *tx_ring;
2950         struct tx_ring_desc *tx_ring_desc;
2951         int i, j;
2952
2953         /*
2954          * Loop through all queues and free
2955          * any resources.
2956          */
2957         for (j = 0; j < qdev->tx_ring_count; j++) {
2958                 tx_ring = &qdev->tx_ring[j];
2959                 for (i = 0; i < tx_ring->wq_len; i++) {
2960                         tx_ring_desc = &tx_ring->q[i];
2961                         if (tx_ring_desc && tx_ring_desc->skb) {
2962                                 netif_err(qdev, ifdown, qdev->ndev,
2963                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
2964                                           tx_ring_desc->skb, j,
2965                                           tx_ring_desc->index);
2966                                 ql_unmap_send(qdev, tx_ring_desc,
2967                                               tx_ring_desc->map_cnt);
2968                                 dev_kfree_skb(tx_ring_desc->skb);
2969                                 tx_ring_desc->skb = NULL;
2970                         }
2971                 }
2972         }
2973 }
2974
2975 static void ql_free_mem_resources(struct ql_adapter *qdev)
2976 {
2977         int i;
2978
2979         for (i = 0; i < qdev->tx_ring_count; i++)
2980                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2981         for (i = 0; i < qdev->rx_ring_count; i++)
2982                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2983         ql_free_shadow_space(qdev);
2984 }
2985
2986 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2987 {
2988         int i;
2989
2990         /* Allocate space for our shadow registers and such. */
2991         if (ql_alloc_shadow_space(qdev))
2992                 return -ENOMEM;
2993
2994         for (i = 0; i < qdev->rx_ring_count; i++) {
2995                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2996                         netif_err(qdev, ifup, qdev->ndev,
2997                                   "RX resource allocation failed.\n");
2998                         goto err_mem;
2999                 }
3000         }
3001         /* Allocate tx queue resources */
3002         for (i = 0; i < qdev->tx_ring_count; i++) {
3003                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3004                         netif_err(qdev, ifup, qdev->ndev,
3005                                   "TX resource allocation failed.\n");
3006                         goto err_mem;
3007                 }
3008         }
3009         return 0;
3010
3011 err_mem:
3012         ql_free_mem_resources(qdev);
3013         return -ENOMEM;
3014 }
3015
3016 /* Set up the rx ring control block and pass it to the chip.
3017  * The control block is defined as
3018  * "Completion Queue Initialization Control Block", or cqicb.
3019  */
3020 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3021 {
3022         struct cqicb *cqicb = &rx_ring->cqicb;
3023         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3024                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3025         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3026                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3027         void __iomem *doorbell_area =
3028             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3029         int err = 0;
3030         u16 bq_len;
3031         u64 tmp;
3032         __le64 *base_indirect_ptr;
3033         int page_entries;
3034
3035         /* Set up the shadow registers for this ring. */
3036         rx_ring->prod_idx_sh_reg = shadow_reg;
3037         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3038         *rx_ring->prod_idx_sh_reg = 0;
3039         shadow_reg += sizeof(u64);
3040         shadow_reg_dma += sizeof(u64);
3041         rx_ring->lbq_base_indirect = shadow_reg;
3042         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3043         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3044         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3045         rx_ring->sbq_base_indirect = shadow_reg;
3046         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3047
3048         /* PCI doorbell mem area + 0x00 for consumer index register */
3049         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3050         rx_ring->cnsmr_idx = 0;
3051         rx_ring->curr_entry = rx_ring->cq_base;
3052
3053         /* PCI doorbell mem area + 0x04 for valid register */
3054         rx_ring->valid_db_reg = doorbell_area + 0x04;
3055
3056         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3057         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3058
3059         /* PCI doorbell mem area + 0x1c */
3060         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3061
3062         memset((void *)cqicb, 0, sizeof(struct cqicb));
3063         cqicb->msix_vect = rx_ring->irq;
3064
3065         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3066         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3067
3068         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3069
3070         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3071
3072         /*
3073          * Set up the control block load flags.
3074          */
3075         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3076             FLAGS_LV |          /* Load MSI-X vector */
3077             FLAGS_LI;           /* Load irq delay values */
3078         if (rx_ring->lbq_len) {
3079                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3080                 tmp = (u64)rx_ring->lbq_base_dma;
3081                 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3082                 page_entries = 0;
3083                 do {
3084                         *base_indirect_ptr = cpu_to_le64(tmp);
3085                         tmp += DB_PAGE_SIZE;
3086                         base_indirect_ptr++;
3087                         page_entries++;
3088                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3089                 cqicb->lbq_addr =
3090                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3091                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3092                         (u16) rx_ring->lbq_buf_size;
3093                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3094                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3095                         (u16) rx_ring->lbq_len;
3096                 cqicb->lbq_len = cpu_to_le16(bq_len);
3097                 rx_ring->lbq_prod_idx = 0;
3098                 rx_ring->lbq_curr_idx = 0;
3099                 rx_ring->lbq_clean_idx = 0;
3100                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3101         }
3102         if (rx_ring->sbq_len) {
3103                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3104                 tmp = (u64)rx_ring->sbq_base_dma;
3105                 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3106                 page_entries = 0;
3107                 do {
3108                         *base_indirect_ptr = cpu_to_le64(tmp);
3109                         tmp += DB_PAGE_SIZE;
3110                         base_indirect_ptr++;
3111                         page_entries++;
3112                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3113                 cqicb->sbq_addr =
3114                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3115                 cqicb->sbq_buf_size =
3116                     cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3117                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3118                         (u16) rx_ring->sbq_len;
3119                 cqicb->sbq_len = cpu_to_le16(bq_len);
3120                 rx_ring->sbq_prod_idx = 0;
3121                 rx_ring->sbq_curr_idx = 0;
3122                 rx_ring->sbq_clean_idx = 0;
3123                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3124         }
3125         switch (rx_ring->type) {
3126         case TX_Q:
3127                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3128                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3129                 break;
3130         case RX_Q:
3131                 /* Inbound completion handling rx_rings run in
3132                  * separate NAPI contexts.
3133                  */
3134                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3135                                64);
3136                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3137                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3138                 break;
3139         default:
3140                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3141                              "Invalid rx_ring->type = %d.\n", rx_ring->type);
3142         }
3143         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3144                      "Initializing rx work queue.\n");
3145         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3146                            CFG_LCQ, rx_ring->cq_id);
3147         if (err) {
3148                 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3149                 return err;
3150         }
3151         return err;
3152 }
3153
3154 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3155 {
3156         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3157         void __iomem *doorbell_area =
3158             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3159         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3160             (tx_ring->wq_id * sizeof(u64));
3161         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3162             (tx_ring->wq_id * sizeof(u64));
3163         int err = 0;
3164
3165         /*
3166          * Assign doorbell registers for this tx_ring.
3167          */
3168         /* TX PCI doorbell mem area for tx producer index */
3169         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3170         tx_ring->prod_idx = 0;
3171         /* TX PCI doorbell mem area + 0x04 */
3172         tx_ring->valid_db_reg = doorbell_area + 0x04;
3173
3174         /*
3175          * Assign shadow registers for this tx_ring.
3176          */
3177         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3178         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3179
3180         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3181         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3182                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3183         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3184         wqicb->rid = 0;
3185         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3186
3187         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3188
3189         ql_init_tx_ring(qdev, tx_ring);
3190
3191         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3192                            (u16) tx_ring->wq_id);
3193         if (err) {
3194                 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3195                 return err;
3196         }
3197         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3198                      "Successfully loaded WQICB.\n");
3199         return err;
3200 }
3201
3202 static void ql_disable_msix(struct ql_adapter *qdev)
3203 {
3204         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3205                 pci_disable_msix(qdev->pdev);
3206                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3207                 kfree(qdev->msi_x_entry);
3208                 qdev->msi_x_entry = NULL;
3209         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3210                 pci_disable_msi(qdev->pdev);
3211                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3212         }
3213 }
3214
3215 /* We start by trying to get the number of vectors
3216  * stored in qdev->intr_count. If we don't get that
3217  * many then we reduce the count and try again.
3218  */
3219 static void ql_enable_msix(struct ql_adapter *qdev)
3220 {
3221         int i, err;
3222
3223         /* Get the MSIX vectors. */
3224         if (qlge_irq_type == MSIX_IRQ) {
3225                 /* Try to alloc space for the msix struct,
3226                  * if it fails then go to MSI/legacy.
3227                  */
3228                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3229                                             sizeof(struct msix_entry),
3230                                             GFP_KERNEL);
3231                 if (!qdev->msi_x_entry) {
3232                         qlge_irq_type = MSI_IRQ;
3233                         goto msi;
3234                 }
3235
3236                 for (i = 0; i < qdev->intr_count; i++)
3237                         qdev->msi_x_entry[i].entry = i;
3238
3239                 /* Loop to get our vectors.  We start with
3240                  * what we want and settle for what we get.
3241                  */
3242                 do {
3243                         err = pci_enable_msix(qdev->pdev,
3244                                 qdev->msi_x_entry, qdev->intr_count);
3245                         if (err > 0)
3246                                 qdev->intr_count = err;
3247                 } while (err > 0);
3248
3249                 if (err < 0) {
3250                         kfree(qdev->msi_x_entry);
3251                         qdev->msi_x_entry = NULL;
3252                         netif_warn(qdev, ifup, qdev->ndev,
3253                                    "MSI-X Enable failed, trying MSI.\n");
3254                         qdev->intr_count = 1;
3255                         qlge_irq_type = MSI_IRQ;
3256                 } else if (err == 0) {
3257                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3258                         netif_info(qdev, ifup, qdev->ndev,
3259                                    "MSI-X Enabled, got %d vectors.\n",
3260                                    qdev->intr_count);
3261                         return;
3262                 }
3263         }
3264 msi:
3265         qdev->intr_count = 1;
3266         if (qlge_irq_type == MSI_IRQ) {
3267                 if (!pci_enable_msi(qdev->pdev)) {
3268                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3269                         netif_info(qdev, ifup, qdev->ndev,
3270                                    "Running with MSI interrupts.\n");
3271                         return;
3272                 }
3273         }
3274         qlge_irq_type = LEG_IRQ;
3275         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3276                      "Running with legacy interrupts.\n");
3277 }
3278
3279 /* Each vector services 1 RSS ring and and 1 or more
3280  * TX completion rings.  This function loops through
3281  * the TX completion rings and assigns the vector that
3282  * will service it.  An example would be if there are
3283  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3284  * This would mean that vector 0 would service RSS ring 0
3285  * and TX competion rings 0,1,2 and 3.  Vector 1 would
3286  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3287  */
3288 static void ql_set_tx_vect(struct ql_adapter *qdev)
3289 {
3290         int i, j, vect;
3291         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3292
3293         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3294                 /* Assign irq vectors to TX rx_rings.*/
3295                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3296                                          i < qdev->rx_ring_count; i++) {
3297                         if (j == tx_rings_per_vector) {
3298                                 vect++;
3299                                 j = 0;
3300                         }
3301                         qdev->rx_ring[i].irq = vect;
3302                         j++;
3303                 }
3304         } else {
3305                 /* For single vector all rings have an irq
3306                  * of zero.
3307                  */
3308                 for (i = 0; i < qdev->rx_ring_count; i++)
3309                         qdev->rx_ring[i].irq = 0;
3310         }
3311 }
3312
3313 /* Set the interrupt mask for this vector.  Each vector
3314  * will service 1 RSS ring and 1 or more TX completion
3315  * rings.  This function sets up a bit mask per vector
3316  * that indicates which rings it services.
3317  */
3318 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3319 {
3320         int j, vect = ctx->intr;
3321         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3322
3323         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3324                 /* Add the RSS ring serviced by this vector
3325                  * to the mask.
3326                  */
3327                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3328                 /* Add the TX ring(s) serviced by this vector
3329                  * to the mask. */
3330                 for (j = 0; j < tx_rings_per_vector; j++) {
3331                         ctx->irq_mask |=
3332                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3333                         (vect * tx_rings_per_vector) + j].cq_id);
3334                 }
3335         } else {
3336                 /* For single vector we just shift each queue's
3337                  * ID into the mask.
3338                  */
3339                 for (j = 0; j < qdev->rx_ring_count; j++)
3340                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3341         }
3342 }
3343
3344 /*
3345  * Here we build the intr_context structures based on
3346  * our rx_ring count and intr vector count.
3347  * The intr_context structure is used to hook each vector
3348  * to possibly different handlers.
3349  */
3350 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3351 {
3352         int i = 0;
3353         struct intr_context *intr_context = &qdev->intr_context[0];
3354
3355         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3356                 /* Each rx_ring has it's
3357                  * own intr_context since we have separate
3358                  * vectors for each queue.
3359                  */
3360                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3361                         qdev->rx_ring[i].irq = i;
3362                         intr_context->intr = i;
3363                         intr_context->qdev = qdev;
3364                         /* Set up this vector's bit-mask that indicates
3365                          * which queues it services.
3366                          */
3367                         ql_set_irq_mask(qdev, intr_context);
3368                         /*
3369                          * We set up each vectors enable/disable/read bits so
3370                          * there's no bit/mask calculations in the critical path.
3371                          */
3372                         intr_context->intr_en_mask =
3373                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3374                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3375                             | i;
3376                         intr_context->intr_dis_mask =
3377                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3378                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3379                             INTR_EN_IHD | i;
3380                         intr_context->intr_read_mask =
3381                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3382                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3383                             i;
3384                         if (i == 0) {
3385                                 /* The first vector/queue handles
3386                                  * broadcast/multicast, fatal errors,
3387                                  * and firmware events.  This in addition
3388                                  * to normal inbound NAPI processing.
3389                                  */
3390                                 intr_context->handler = qlge_isr;
3391                                 sprintf(intr_context->name, "%s-rx-%d",
3392                                         qdev->ndev->name, i);
3393                         } else {
3394                                 /*
3395                                  * Inbound queues handle unicast frames only.
3396                                  */
3397                                 intr_context->handler = qlge_msix_rx_isr;
3398                                 sprintf(intr_context->name, "%s-rx-%d",
3399                                         qdev->ndev->name, i);
3400                         }
3401                 }
3402         } else {
3403                 /*
3404                  * All rx_rings use the same intr_context since
3405                  * there is only one vector.
3406                  */
3407                 intr_context->intr = 0;
3408                 intr_context->qdev = qdev;
3409                 /*
3410                  * We set up each vectors enable/disable/read bits so
3411                  * there's no bit/mask calculations in the critical path.
3412                  */
3413                 intr_context->intr_en_mask =
3414                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3415                 intr_context->intr_dis_mask =
3416                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3417                     INTR_EN_TYPE_DISABLE;
3418                 intr_context->intr_read_mask =
3419                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3420                 /*
3421                  * Single interrupt means one handler for all rings.
3422                  */
3423                 intr_context->handler = qlge_isr;
3424                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3425                 /* Set up this vector's bit-mask that indicates
3426                  * which queues it services. In this case there is
3427                  * a single vector so it will service all RSS and
3428                  * TX completion rings.
3429                  */
3430                 ql_set_irq_mask(qdev, intr_context);
3431         }
3432         /* Tell the TX completion rings which MSIx vector
3433          * they will be using.
3434          */
3435         ql_set_tx_vect(qdev);
3436 }
3437
3438 static void ql_free_irq(struct ql_adapter *qdev)
3439 {
3440         int i;
3441         struct intr_context *intr_context = &qdev->intr_context[0];
3442
3443         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3444                 if (intr_context->hooked) {
3445                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3446                                 free_irq(qdev->msi_x_entry[i].vector,
3447                                          &qdev->rx_ring[i]);
3448                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3449                                              "freeing msix interrupt %d.\n", i);
3450                         } else {
3451                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3452                                 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3453                                              "freeing msi interrupt %d.\n", i);
3454                         }
3455                 }
3456         }
3457         ql_disable_msix(qdev);
3458 }
3459
3460 static int ql_request_irq(struct ql_adapter *qdev)
3461 {
3462         int i;
3463         int status = 0;
3464         struct pci_dev *pdev = qdev->pdev;
3465         struct intr_context *intr_context = &qdev->intr_context[0];
3466
3467         ql_resolve_queues_to_irqs(qdev);
3468
3469         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3470                 atomic_set(&intr_context->irq_cnt, 0);
3471                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3472                         status = request_irq(qdev->msi_x_entry[i].vector,
3473                                              intr_context->handler,
3474                                              0,
3475                                              intr_context->name,
3476                                              &qdev->rx_ring[i]);
3477                         if (status) {
3478                                 netif_err(qdev, ifup, qdev->ndev,
3479                                           "Failed request for MSIX interrupt %d.\n",
3480                                           i);
3481                                 goto err_irq;
3482                         } else {
3483                                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3484                                              "Hooked intr %d, queue type %s, with name %s.\n",
3485                                              i,
3486                                              qdev->rx_ring[i].type == DEFAULT_Q ?
3487                                              "DEFAULT_Q" :
3488                                              qdev->rx_ring[i].type == TX_Q ?
3489                                              "TX_Q" :
3490                                              qdev->rx_ring[i].type == RX_Q ?
3491                                              "RX_Q" : "",
3492                                              intr_context->name);
3493                         }
3494                 } else {
3495                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3496                                      "trying msi or legacy interrupts.\n");
3497                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3498                                      "%s: irq = %d.\n", __func__, pdev->irq);
3499                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3500                                      "%s: context->name = %s.\n", __func__,
3501                                      intr_context->name);
3502                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3503                                      "%s: dev_id = 0x%p.\n", __func__,
3504                                      &qdev->rx_ring[0]);
3505                         status =
3506                             request_irq(pdev->irq, qlge_isr,
3507                                         test_bit(QL_MSI_ENABLED,
3508                                                  &qdev->
3509                                                  flags) ? 0 : IRQF_SHARED,
3510                                         intr_context->name, &qdev->rx_ring[0]);
3511                         if (status)
3512                                 goto err_irq;
3513
3514                         netif_err(qdev, ifup, qdev->ndev,
3515                                   "Hooked intr %d, queue type %s, with name %s.\n",
3516                                   i,
3517                                   qdev->rx_ring[0].type == DEFAULT_Q ?
3518                                   "DEFAULT_Q" :
3519                                   qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3520                                   qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3521                                   intr_context->name);
3522                 }
3523                 intr_context->hooked = 1;
3524         }
3525         return status;
3526 err_irq:
3527         netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3528         ql_free_irq(qdev);
3529         return status;
3530 }
3531
3532 static int ql_start_rss(struct ql_adapter *qdev)
3533 {
3534         u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3535                                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3536                                 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3537                                 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3538                                 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3539                                 0xbe, 0xac, 0x01, 0xfa};
3540         struct ricb *ricb = &qdev->ricb;
3541         int status = 0;
3542         int i;
3543         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3544
3545         memset((void *)ricb, 0, sizeof(*ricb));
3546
3547         ricb->base_cq = RSS_L4K;
3548         ricb->flags =
3549                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3550         ricb->mask = cpu_to_le16((u16)(0x3ff));
3551
3552         /*
3553          * Fill out the Indirection Table.
3554          */
3555         for (i = 0; i < 1024; i++)
3556                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3557
3558         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3559         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3560
3561         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3562
3563         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3564         if (status) {
3565                 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3566                 return status;
3567         }
3568         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3569                      "Successfully loaded RICB.\n");
3570         return status;
3571 }
3572
3573 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3574 {
3575         int i, status = 0;
3576
3577         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3578         if (status)
3579                 return status;
3580         /* Clear all the entries in the routing table. */
3581         for (i = 0; i < 16; i++) {
3582                 status = ql_set_routing_reg(qdev, i, 0, 0);
3583                 if (status) {
3584                         netif_err(qdev, ifup, qdev->ndev,
3585                                   "Failed to init routing register for CAM packets.\n");
3586                         break;
3587                 }
3588         }
3589         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3590         return status;
3591 }
3592
3593 /* Initialize the frame-to-queue routing. */
3594 static int ql_route_initialize(struct ql_adapter *qdev)
3595 {
3596         int status = 0;
3597
3598         /* Clear all the entries in the routing table. */
3599         status = ql_clear_routing_entries(qdev);
3600         if (status)
3601                 return status;
3602
3603         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3604         if (status)
3605                 return status;
3606
3607         status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3608                                                 RT_IDX_IP_CSUM_ERR, 1);
3609         if (status) {
3610                 netif_err(qdev, ifup, qdev->ndev,
3611                         "Failed to init routing register "
3612                         "for IP CSUM error packets.\n");
3613                 goto exit;
3614         }
3615         status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3616                                                 RT_IDX_TU_CSUM_ERR, 1);
3617         if (status) {
3618                 netif_err(qdev, ifup, qdev->ndev,
3619                         "Failed to init routing register "
3620                         "for TCP/UDP CSUM error packets.\n");
3621                 goto exit;
3622         }
3623         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3624         if (status) {
3625                 netif_err(qdev, ifup, qdev->ndev,
3626                           "Failed to init routing register for broadcast packets.\n");
3627                 goto exit;
3628         }
3629         /* If we have more than one inbound queue, then turn on RSS in the
3630          * routing block.
3631          */
3632         if (qdev->rss_ring_count > 1) {
3633                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3634                                         RT_IDX_RSS_MATCH, 1);
3635                 if (status) {
3636                         netif_err(qdev, ifup, qdev->ndev,
3637                                   "Failed to init routing register for MATCH RSS packets.\n");
3638                         goto exit;
3639                 }
3640         }
3641
3642         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3643                                     RT_IDX_CAM_HIT, 1);
3644         if (status)
3645                 netif_err(qdev, ifup, qdev->ndev,
3646                           "Failed to init routing register for CAM packets.\n");
3647 exit:
3648         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3649         return status;
3650 }
3651
3652 int ql_cam_route_initialize(struct ql_adapter *qdev)
3653 {
3654         int status, set;
3655
3656         /* If check if the link is up and use to
3657          * determine if we are setting or clearing
3658          * the MAC address in the CAM.
3659          */
3660         set = ql_read32(qdev, STS);
3661         set &= qdev->port_link_up;
3662         status = ql_set_mac_addr(qdev, set);
3663         if (status) {
3664                 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3665                 return status;
3666         }
3667
3668         status = ql_route_initialize(qdev);
3669         if (status)
3670                 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3671
3672         return status;
3673 }
3674
3675 static int ql_adapter_initialize(struct ql_adapter *qdev)
3676 {
3677         u32 value, mask;
3678         int i;
3679         int status = 0;
3680
3681         /*
3682          * Set up the System register to halt on errors.
3683          */
3684         value = SYS_EFE | SYS_FAE;
3685         mask = value << 16;
3686         ql_write32(qdev, SYS, mask | value);
3687
3688         /* Set the default queue, and VLAN behavior. */
3689         value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3690         mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3691         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3692
3693         /* Set the MPI interrupt to enabled. */
3694         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3695
3696         /* Enable the function, set pagesize, enable error checking. */
3697         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3698             FSC_EC | FSC_VM_PAGE_4K;
3699         value |= SPLT_SETTING;
3700
3701         /* Set/clear header splitting. */
3702         mask = FSC_VM_PAGESIZE_MASK |
3703             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3704         ql_write32(qdev, FSC, mask | value);
3705
3706         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3707
3708         /* Set RX packet routing to use port/pci function on which the
3709          * packet arrived on in addition to usual frame routing.
3710          * This is helpful on bonding where both interfaces can have
3711          * the same MAC address.
3712          */
3713         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3714         /* Reroute all packets to our Interface.
3715          * They may have been routed to MPI firmware
3716          * due to WOL.
3717          */
3718         value = ql_read32(qdev, MGMT_RCV_CFG);
3719         value &= ~MGMT_RCV_CFG_RM;
3720         mask = 0xffff0000;
3721
3722         /* Sticky reg needs clearing due to WOL. */
3723         ql_write32(qdev, MGMT_RCV_CFG, mask);
3724         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3725
3726         /* Default WOL is enable on Mezz cards */
3727         if (qdev->pdev->subsystem_device == 0x0068 ||
3728                         qdev->pdev->subsystem_device == 0x0180)
3729                 qdev->wol = WAKE_MAGIC;
3730
3731         /* Start up the rx queues. */
3732         for (i = 0; i < qdev->rx_ring_count; i++) {
3733                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3734                 if (status) {
3735                         netif_err(qdev, ifup, qdev->ndev,
3736                                   "Failed to start rx ring[%d].\n", i);
3737                         return status;
3738                 }
3739         }
3740
3741         /* If there is more than one inbound completion queue
3742          * then download a RICB to configure RSS.
3743          */
3744         if (qdev->rss_ring_count > 1) {
3745                 status = ql_start_rss(qdev);
3746                 if (status) {
3747                         netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3748                         return status;
3749                 }
3750         }
3751
3752         /* Start up the tx queues. */
3753         for (i = 0; i < qdev->tx_ring_count; i++) {
3754                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3755                 if (status) {
3756                         netif_err(qdev, ifup, qdev->ndev,
3757                                   "Failed to start tx ring[%d].\n", i);
3758                         return status;
3759                 }
3760         }
3761
3762         /* Initialize the port and set the max framesize. */
3763         status = qdev->nic_ops->port_initialize(qdev);
3764         if (status)
3765                 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3766
3767         /* Set up the MAC address and frame routing filter. */
3768         status = ql_cam_route_initialize(qdev);
3769         if (status) {
3770                 netif_err(qdev, ifup, qdev->ndev,
3771                           "Failed to init CAM/Routing tables.\n");
3772                 return status;
3773         }
3774
3775         /* Start NAPI for the RSS queues. */
3776         for (i = 0; i < qdev->rss_ring_count; i++) {
3777                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3778                              "Enabling NAPI for rx_ring[%d].\n", i);
3779                 napi_enable(&qdev->rx_ring[i].napi);
3780         }
3781
3782         return status;
3783 }
3784
3785 /* Issue soft reset to chip. */
3786 static int ql_adapter_reset(struct ql_adapter *qdev)
3787 {
3788         u32 value;
3789         int status = 0;
3790         unsigned long end_jiffies;
3791
3792         /* Clear all the entries in the routing table. */
3793         status = ql_clear_routing_entries(qdev);
3794         if (status) {
3795                 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3796                 return status;
3797         }
3798
3799         end_jiffies = jiffies +
3800                 max((unsigned long)1, usecs_to_jiffies(30));
3801
3802         /* Stop management traffic. */
3803         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3804
3805         /* Wait for the NIC and MGMNT FIFOs to empty. */
3806         ql_wait_fifo_empty(qdev);
3807
3808         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3809
3810         do {
3811                 value = ql_read32(qdev, RST_FO);
3812                 if ((value & RST_FO_FR) == 0)
3813                         break;
3814                 cpu_relax();
3815         } while (time_before(jiffies, end_jiffies));
3816
3817         if (value & RST_FO_FR) {
3818                 netif_err(qdev, ifdown, qdev->ndev,
3819                           "ETIMEDOUT!!! errored out of resetting the chip!\n");
3820                 status = -ETIMEDOUT;
3821         }
3822
3823         /* Resume management traffic. */
3824         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3825         return status;
3826 }
3827
3828 static void ql_display_dev_info(struct net_device *ndev)
3829 {
3830         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3831
3832         netif_info(qdev, probe, qdev->ndev,
3833                    "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3834                    "XG Roll = %d, XG Rev = %d.\n",
3835                    qdev->func,
3836                    qdev->port,
3837                    qdev->chip_rev_id & 0x0000000f,
3838                    qdev->chip_rev_id >> 4 & 0x0000000f,
3839                    qdev->chip_rev_id >> 8 & 0x0000000f,
3840                    qdev->chip_rev_id >> 12 & 0x0000000f);
3841         netif_info(qdev, probe, qdev->ndev,
3842                    "MAC address %pM\n", ndev->dev_addr);
3843 }
3844
3845 int ql_wol(struct ql_adapter *qdev)
3846 {
3847         int status = 0;
3848         u32 wol = MB_WOL_DISABLE;
3849
3850         /* The CAM is still intact after a reset, but if we
3851          * are doing WOL, then we may need to program the
3852          * routing regs. We would also need to issue the mailbox
3853          * commands to instruct the MPI what to do per the ethtool
3854          * settings.
3855          */
3856
3857         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3858                         WAKE_MCAST | WAKE_BCAST)) {
3859                 netif_err(qdev, ifdown, qdev->ndev,
3860                           "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3861                           qdev->wol);
3862                 return -EINVAL;
3863         }
3864
3865         if (qdev->wol & WAKE_MAGIC) {
3866                 status = ql_mb_wol_set_magic(qdev, 1);
3867                 if (status) {
3868                         netif_err(qdev, ifdown, qdev->ndev,
3869                                   "Failed to set magic packet on %s.\n",
3870                                   qdev->ndev->name);
3871                         return status;
3872                 } else
3873                         netif_info(qdev, drv, qdev->ndev,
3874                                    "Enabled magic packet successfully on %s.\n",
3875                                    qdev->ndev->name);
3876
3877                 wol |= MB_WOL_MAGIC_PKT;
3878         }
3879
3880         if (qdev->wol) {
3881                 wol |= MB_WOL_MODE_ON;
3882                 status = ql_mb_wol_mode(qdev, wol);
3883                 netif_err(qdev, drv, qdev->ndev,
3884                           "WOL %s (wol code 0x%x) on %s\n",
3885                           (status == 0) ? "Successfully set" : "Failed",
3886                           wol, qdev->ndev->name);
3887         }
3888
3889         return status;
3890 }
3891
3892 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3893 {
3894
3895         /* Don't kill the reset worker thread if we
3896          * are in the process of recovery.
3897          */
3898         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3899                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3900         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3901         cancel_delayed_work_sync(&qdev->mpi_work);
3902         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3903         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3904         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3905 }
3906
3907 static int ql_adapter_down(struct ql_adapter *qdev)
3908 {
3909         int i, status = 0;
3910
3911         ql_link_off(qdev);
3912
3913         ql_cancel_all_work_sync(qdev);
3914
3915         for (i = 0; i < qdev->rss_ring_count; i++)
3916                 napi_disable(&qdev->rx_ring[i].napi);
3917
3918         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3919
3920         ql_disable_interrupts(qdev);
3921
3922         ql_tx_ring_clean(qdev);
3923
3924         /* Call netif_napi_del() from common point.
3925          */
3926         for (i = 0; i < qdev->rss_ring_count; i++)
3927                 netif_napi_del(&qdev->rx_ring[i].napi);
3928
3929         status = ql_adapter_reset(qdev);
3930         if (status)
3931                 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3932                           qdev->func);
3933         ql_free_rx_buffers(qdev);
3934
3935         return status;
3936 }
3937
3938 static int ql_adapter_up(struct ql_adapter *qdev)
3939 {
3940         int err = 0;
3941
3942         err = ql_adapter_initialize(qdev);
3943         if (err) {
3944                 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3945                 goto err_init;
3946         }
3947         set_bit(QL_ADAPTER_UP, &qdev->flags);
3948         ql_alloc_rx_buffers(qdev);
3949         /* If the port is initialized and the
3950          * link is up the turn on the carrier.
3951          */
3952         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3953                         (ql_read32(qdev, STS) & qdev->port_link_up))
3954                 ql_link_on(qdev);
3955         /* Restore rx mode. */
3956         clear_bit(QL_ALLMULTI, &qdev->flags);
3957         clear_bit(QL_PROMISCUOUS, &qdev->flags);
3958         qlge_set_multicast_list(qdev->ndev);
3959
3960         ql_enable_interrupts(qdev);
3961         ql_enable_all_completion_interrupts(qdev);
3962         netif_tx_start_all_queues(qdev->ndev);
3963
3964         return 0;
3965 err_init:
3966         ql_adapter_reset(qdev);
3967         return err;
3968 }
3969
3970 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3971 {
3972         ql_free_mem_resources(qdev);
3973         ql_free_irq(qdev);
3974 }
3975
3976 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3977 {
3978         int status = 0;
3979
3980         if (ql_alloc_mem_resources(qdev)) {
3981                 netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
3982                 return -ENOMEM;
3983         }
3984         status = ql_request_irq(qdev);
3985         return status;
3986 }
3987
3988 static int qlge_close(struct net_device *ndev)
3989 {
3990         struct ql_adapter *qdev = netdev_priv(ndev);
3991
3992         /* If we hit pci_channel_io_perm_failure
3993          * failure condition, then we already
3994          * brought the adapter down.
3995          */
3996         if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3997                 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3998                 clear_bit(QL_EEH_FATAL, &qdev->flags);
3999                 return 0;
4000         }
4001
4002         /*
4003          * Wait for device to recover from a reset.
4004          * (Rarely happens, but possible.)
4005          */
4006         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4007                 msleep(1);
4008         ql_adapter_down(qdev);
4009         ql_release_adapter_resources(qdev);
4010         return 0;
4011 }
4012
4013 static int ql_configure_rings(struct ql_adapter *qdev)
4014 {
4015         int i;
4016         struct rx_ring *rx_ring;
4017         struct tx_ring *tx_ring;
4018         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4019         unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4020                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4021
4022         qdev->lbq_buf_order = get_order(lbq_buf_len);
4023
4024         /* In a perfect world we have one RSS ring for each CPU
4025          * and each has it's own vector.  To do that we ask for
4026          * cpu_cnt vectors.  ql_enable_msix() will adjust the
4027          * vector count to what we actually get.  We then
4028          * allocate an RSS ring for each.
4029          * Essentially, we are doing min(cpu_count, msix_vector_count).
4030          */
4031         qdev->intr_count = cpu_cnt;
4032         ql_enable_msix(qdev);
4033         /* Adjust the RSS ring count to the actual vector count. */
4034         qdev->rss_ring_count = qdev->intr_count;
4035         qdev->tx_ring_count = cpu_cnt;
4036         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4037
4038         for (i = 0; i < qdev->tx_ring_count; i++) {
4039                 tx_ring = &qdev->tx_ring[i];
4040                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4041                 tx_ring->qdev = qdev;
4042                 tx_ring->wq_id = i;
4043                 tx_ring->wq_len = qdev->tx_ring_size;
4044                 tx_ring->wq_size =
4045                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4046
4047                 /*
4048                  * The completion queue ID for the tx rings start
4049                  * immediately after the rss rings.
4050                  */
4051                 tx_ring->cq_id = qdev->rss_ring_count + i;
4052         }
4053
4054         for (i = 0; i < qdev->rx_ring_count; i++) {
4055                 rx_ring = &qdev->rx_ring[i];
4056                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4057                 rx_ring->qdev = qdev;
4058                 rx_ring->cq_id = i;
4059                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4060                 if (i < qdev->rss_ring_count) {
4061                         /*
4062                          * Inbound (RSS) queues.
4063                          */
4064                         rx_ring->cq_len = qdev->rx_ring_size;
4065                         rx_ring->cq_size =
4066                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4067                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4068                         rx_ring->lbq_size =
4069                             rx_ring->lbq_len * sizeof(__le64);
4070                         rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4071                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4072                                      "lbq_buf_size %d, order = %d\n",
4073                                      rx_ring->lbq_buf_size,
4074                                      qdev->lbq_buf_order);
4075                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4076                         rx_ring->sbq_size =
4077                             rx_ring->sbq_len * sizeof(__le64);
4078                         rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4079                         rx_ring->type = RX_Q;
4080                 } else {
4081                         /*
4082                          * Outbound queue handles outbound completions only.
4083                          */
4084                         /* outbound cq is same size as tx_ring it services. */
4085                         rx_ring->cq_len = qdev->tx_ring_size;
4086                         rx_ring->cq_size =
4087                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4088                         rx_ring->lbq_len = 0;
4089                         rx_ring->lbq_size = 0;
4090                         rx_ring->lbq_buf_size = 0;
4091                         rx_ring->sbq_len = 0;
4092                         rx_ring->sbq_size = 0;
4093                         rx_ring->sbq_buf_size = 0;
4094                         rx_ring->type = TX_Q;
4095                 }
4096         }
4097         return 0;
4098 }
4099
4100 static int qlge_open(struct net_device *ndev)
4101 {
4102         int err = 0;
4103         struct ql_adapter *qdev = netdev_priv(ndev);
4104
4105         err = ql_adapter_reset(qdev);
4106         if (err)
4107                 return err;
4108
4109         err = ql_configure_rings(qdev);
4110         if (err)
4111                 return err;
4112
4113         err = ql_get_adapter_resources(qdev);
4114         if (err)
4115                 goto error_up;
4116
4117         err = ql_adapter_up(qdev);
4118         if (err)
4119                 goto error_up;
4120
4121         return err;
4122
4123 error_up:
4124         ql_release_adapter_resources(qdev);
4125         return err;
4126 }
4127
4128 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4129 {
4130         struct rx_ring *rx_ring;
4131         int i, status;
4132         u32 lbq_buf_len;
4133
4134         /* Wait for an oustanding reset to complete. */
4135         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4136                 int i = 3;
4137                 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4138                         netif_err(qdev, ifup, qdev->ndev,
4139                                   "Waiting for adapter UP...\n");
4140                         ssleep(1);
4141                 }
4142
4143                 if (!i) {
4144                         netif_err(qdev, ifup, qdev->ndev,
4145                                   "Timed out waiting for adapter UP\n");
4146                         return -ETIMEDOUT;
4147                 }
4148         }
4149
4150         status = ql_adapter_down(qdev);
4151         if (status)
4152                 goto error;
4153
4154         /* Get the new rx buffer size. */
4155         lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4156                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4157         qdev->lbq_buf_order = get_order(lbq_buf_len);
4158
4159         for (i = 0; i < qdev->rss_ring_count; i++) {
4160                 rx_ring = &qdev->rx_ring[i];
4161                 /* Set the new size. */
4162                 rx_ring->lbq_buf_size = lbq_buf_len;
4163         }
4164
4165         status = ql_adapter_up(qdev);
4166         if (status)
4167                 goto error;
4168
4169         return status;
4170 error:
4171         netif_alert(qdev, ifup, qdev->ndev,
4172                     "Driver up/down cycle failed, closing device.\n");
4173         set_bit(QL_ADAPTER_UP, &qdev->flags);
4174         dev_close(qdev->ndev);
4175         return status;
4176 }
4177
4178 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4179 {
4180         struct ql_adapter *qdev = netdev_priv(ndev);
4181         int status;
4182
4183         if (ndev->mtu == 1500 && new_mtu == 9000) {
4184                 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4185         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4186                 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4187         } else
4188                 return -EINVAL;
4189
4190         queue_delayed_work(qdev->workqueue,
4191                         &qdev->mpi_port_cfg_work, 3*HZ);
4192
4193         ndev->mtu = new_mtu;
4194
4195         if (!netif_running(qdev->ndev)) {
4196                 return 0;
4197         }
4198
4199         status = ql_change_rx_buffers(qdev);
4200         if (status) {
4201                 netif_err(qdev, ifup, qdev->ndev,
4202                           "Changing MTU failed.\n");
4203         }
4204
4205         return status;
4206 }
4207
4208 static struct net_device_stats *qlge_get_stats(struct net_device
4209                                                *ndev)
4210 {
4211         struct ql_adapter *qdev = netdev_priv(ndev);
4212         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4213         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4214         unsigned long pkts, mcast, dropped, errors, bytes;
4215         int i;
4216
4217         /* Get RX stats. */
4218         pkts = mcast = dropped = errors = bytes = 0;
4219         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4220                         pkts += rx_ring->rx_packets;
4221                         bytes += rx_ring->rx_bytes;
4222                         dropped += rx_ring->rx_dropped;
4223                         errors += rx_ring->rx_errors;
4224                         mcast += rx_ring->rx_multicast;
4225         }
4226         ndev->stats.rx_packets = pkts;
4227         ndev->stats.rx_bytes = bytes;
4228         ndev->stats.rx_dropped = dropped;
4229         ndev->stats.rx_errors = errors;
4230         ndev->stats.multicast = mcast;
4231
4232         /* Get TX stats. */
4233         pkts = errors = bytes = 0;
4234         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4235                         pkts += tx_ring->tx_packets;
4236                         bytes += tx_ring->tx_bytes;
4237                         errors += tx_ring->tx_errors;
4238         }
4239         ndev->stats.tx_packets = pkts;
4240         ndev->stats.tx_bytes = bytes;
4241         ndev->stats.tx_errors = errors;
4242         return &ndev->stats;
4243 }
4244
4245 void qlge_set_multicast_list(struct net_device *ndev)
4246 {
4247         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4248         struct netdev_hw_addr *ha;
4249         int i, status;
4250
4251         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4252         if (status)
4253                 return;
4254         /*
4255          * Set or clear promiscuous mode if a
4256          * transition is taking place.
4257          */
4258         if (ndev->flags & IFF_PROMISC) {
4259                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4260                         if (ql_set_routing_reg
4261                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4262                                 netif_err(qdev, hw, qdev->ndev,
4263                                           "Failed to set promiscous mode.\n");
4264                         } else {
4265                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4266                         }
4267                 }
4268         } else {
4269                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4270                         if (ql_set_routing_reg
4271                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4272                                 netif_err(qdev, hw, qdev->ndev,
4273                                           "Failed to clear promiscous mode.\n");
4274                         } else {
4275                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4276                         }
4277                 }
4278         }
4279
4280         /*
4281          * Set or clear all multicast mode if a
4282          * transition is taking place.
4283          */
4284         if ((ndev->flags & IFF_ALLMULTI) ||
4285             (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4286                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4287                         if (ql_set_routing_reg
4288                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4289                                 netif_err(qdev, hw, qdev->ndev,
4290                                           "Failed to set all-multi mode.\n");
4291                         } else {
4292                                 set_bit(QL_ALLMULTI, &qdev->flags);
4293                         }
4294                 }
4295         } else {
4296                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4297                         if (ql_set_routing_reg
4298                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4299                                 netif_err(qdev, hw, qdev->ndev,
4300                                           "Failed to clear all-multi mode.\n");
4301                         } else {
4302                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4303                         }
4304                 }
4305         }
4306
4307         if (!netdev_mc_empty(ndev)) {
4308                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4309                 if (status)
4310                         goto exit;
4311                 i = 0;
4312                 netdev_for_each_mc_addr(ha, ndev) {
4313                         if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4314                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4315                                 netif_err(qdev, hw, qdev->ndev,
4316                                           "Failed to loadmulticast address.\n");
4317                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4318                                 goto exit;
4319                         }
4320                         i++;
4321                 }
4322                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4323                 if (ql_set_routing_reg
4324                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4325                         netif_err(qdev, hw, qdev->ndev,
4326                                   "Failed to set multicast match mode.\n");
4327                 } else {
4328                         set_bit(QL_ALLMULTI, &qdev->flags);
4329                 }
4330         }
4331 exit:
4332         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4333 }
4334
4335 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4336 {
4337         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4338         struct sockaddr *addr = p;
4339         int status;
4340
4341         if (!is_valid_ether_addr(addr->sa_data))
4342                 return -EADDRNOTAVAIL;
4343         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4344         /* Update local copy of current mac address. */
4345         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4346
4347         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4348         if (status)
4349                 return status;
4350         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4351                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4352         if (status)
4353                 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4354         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4355         return status;
4356 }
4357
4358 static void qlge_tx_timeout(struct net_device *ndev)
4359 {
4360         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4361         ql_queue_asic_error(qdev);
4362 }
4363
4364 static void ql_asic_reset_work(struct work_struct *work)
4365 {
4366         struct ql_adapter *qdev =
4367             container_of(work, struct ql_adapter, asic_reset_work.work);
4368         int status;
4369         rtnl_lock();
4370         status = ql_adapter_down(qdev);
4371         if (status)
4372                 goto error;
4373
4374         status = ql_adapter_up(qdev);
4375         if (status)
4376                 goto error;
4377
4378         /* Restore rx mode. */
4379         clear_bit(QL_ALLMULTI, &qdev->flags);
4380         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4381         qlge_set_multicast_list(qdev->ndev);
4382
4383         rtnl_unlock();
4384         return;
4385 error:
4386         netif_alert(qdev, ifup, qdev->ndev,
4387                     "Driver up/down cycle failed, closing device\n");
4388
4389         set_bit(QL_ADAPTER_UP, &qdev->flags);
4390         dev_close(qdev->ndev);
4391         rtnl_unlock();
4392 }
4393
4394 static struct nic_operations qla8012_nic_ops = {
4395         .get_flash              = ql_get_8012_flash_params,
4396         .port_initialize        = ql_8012_port_initialize,
4397 };
4398
4399 static struct nic_operations qla8000_nic_ops = {
4400         .get_flash              = ql_get_8000_flash_params,
4401         .port_initialize        = ql_8000_port_initialize,
4402 };
4403
4404 /* Find the pcie function number for the other NIC
4405  * on this chip.  Since both NIC functions share a
4406  * common firmware we have the lowest enabled function
4407  * do any common work.  Examples would be resetting
4408  * after a fatal firmware error, or doing a firmware
4409  * coredump.
4410  */
4411 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4412 {
4413         int status = 0;
4414         u32 temp;
4415         u32 nic_func1, nic_func2;
4416
4417         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4418                         &temp);
4419         if (status)
4420                 return status;
4421
4422         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4423                         MPI_TEST_NIC_FUNC_MASK);
4424         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4425                         MPI_TEST_NIC_FUNC_MASK);
4426
4427         if (qdev->func == nic_func1)
4428                 qdev->alt_func = nic_func2;
4429         else if (qdev->func == nic_func2)
4430                 qdev->alt_func = nic_func1;
4431         else
4432                 status = -EIO;
4433
4434         return status;
4435 }
4436
4437 static int ql_get_board_info(struct ql_adapter *qdev)
4438 {
4439         int status;
4440         qdev->func =
4441             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4442         if (qdev->func > 3)
4443                 return -EIO;
4444
4445         status = ql_get_alt_pcie_func(qdev);
4446         if (status)
4447                 return status;
4448
4449         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4450         if (qdev->port) {
4451                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4452                 qdev->port_link_up = STS_PL1;
4453                 qdev->port_init = STS_PI1;
4454                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4455                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4456         } else {
4457                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4458                 qdev->port_link_up = STS_PL0;
4459                 qdev->port_init = STS_PI0;
4460                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4461                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4462         }
4463         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4464         qdev->device_id = qdev->pdev->device;
4465         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4466                 qdev->nic_ops = &qla8012_nic_ops;
4467         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4468                 qdev->nic_ops = &qla8000_nic_ops;
4469         return status;
4470 }
4471
4472 static void ql_release_all(struct pci_dev *pdev)
4473 {
4474         struct net_device *ndev = pci_get_drvdata(pdev);
4475         struct ql_adapter *qdev = netdev_priv(ndev);
4476
4477         if (qdev->workqueue) {
4478                 destroy_workqueue(qdev->workqueue);
4479                 qdev->workqueue = NULL;
4480         }
4481
4482         if (qdev->reg_base)
4483                 iounmap(qdev->reg_base);
4484         if (qdev->doorbell_area)
4485                 iounmap(qdev->doorbell_area);
4486         vfree(qdev->mpi_coredump);
4487         pci_release_regions(pdev);
4488         pci_set_drvdata(pdev, NULL);
4489 }
4490
4491 static int __devinit ql_init_device(struct pci_dev *pdev,
4492                                     struct net_device *ndev, int cards_found)
4493 {
4494         struct ql_adapter *qdev = netdev_priv(ndev);
4495         int err = 0;
4496
4497         memset((void *)qdev, 0, sizeof(*qdev));
4498         err = pci_enable_device(pdev);
4499         if (err) {
4500                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4501                 return err;
4502         }
4503
4504         qdev->ndev = ndev;
4505         qdev->pdev = pdev;
4506         pci_set_drvdata(pdev, ndev);
4507
4508         /* Set PCIe read request size */
4509         err = pcie_set_readrq(pdev, 4096);
4510         if (err) {
4511                 dev_err(&pdev->dev, "Set readrq failed.\n");
4512                 goto err_out1;
4513         }
4514
4515         err = pci_request_regions(pdev, DRV_NAME);
4516         if (err) {
4517                 dev_err(&pdev->dev, "PCI region request failed.\n");
4518                 return err;
4519         }
4520
4521         pci_set_master(pdev);
4522         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4523                 set_bit(QL_DMA64, &qdev->flags);
4524                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4525         } else {
4526                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4527                 if (!err)
4528                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4529         }
4530
4531         if (err) {
4532                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4533                 goto err_out2;
4534         }
4535
4536         /* Set PCIe reset type for EEH to fundamental. */
4537         pdev->needs_freset = 1;
4538         pci_save_state(pdev);
4539         qdev->reg_base =
4540             ioremap_nocache(pci_resource_start(pdev, 1),
4541                             pci_resource_len(pdev, 1));
4542         if (!qdev->reg_base) {
4543                 dev_err(&pdev->dev, "Register mapping failed.\n");
4544                 err = -ENOMEM;
4545                 goto err_out2;
4546         }
4547
4548         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4549         qdev->doorbell_area =
4550             ioremap_nocache(pci_resource_start(pdev, 3),
4551                             pci_resource_len(pdev, 3));
4552         if (!qdev->doorbell_area) {
4553                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4554                 err = -ENOMEM;
4555                 goto err_out2;
4556         }
4557
4558         err = ql_get_board_info(qdev);
4559         if (err) {
4560                 dev_err(&pdev->dev, "Register access failed.\n");
4561                 err = -EIO;
4562                 goto err_out2;
4563         }
4564         qdev->msg_enable = netif_msg_init(debug, default_msg);
4565         spin_lock_init(&qdev->hw_lock);
4566         spin_lock_init(&qdev->stats_lock);
4567
4568         if (qlge_mpi_coredump) {
4569                 qdev->mpi_coredump =
4570                         vmalloc(sizeof(struct ql_mpi_coredump));
4571                 if (qdev->mpi_coredump == NULL) {
4572                         dev_err(&pdev->dev, "Coredump alloc failed.\n");
4573                         err = -ENOMEM;
4574                         goto err_out2;
4575                 }
4576                 if (qlge_force_coredump)
4577                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4578         }
4579         /* make sure the EEPROM is good */
4580         err = qdev->nic_ops->get_flash(qdev);
4581         if (err) {
4582                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4583                 goto err_out2;
4584         }
4585
4586         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4587         /* Keep local copy of current mac address. */
4588         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4589
4590         /* Set up the default ring sizes. */
4591         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4592         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4593
4594         /* Set up the coalescing parameters. */
4595         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4596         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4597         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4598         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4599
4600         /*
4601          * Set up the operating parameters.
4602          */
4603         qdev->rx_csum = 1;
4604         qdev->workqueue = create_singlethread_workqueue(ndev->name);
4605         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4606         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4607         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4608         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4609         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4610         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4611         init_completion(&qdev->ide_completion);
4612
4613         if (!cards_found) {
4614                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4615                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4616                          DRV_NAME, DRV_VERSION);
4617         }
4618         return 0;
4619 err_out2:
4620         ql_release_all(pdev);
4621 err_out1:
4622         pci_disable_device(pdev);
4623         return err;
4624 }
4625
4626 static const struct net_device_ops qlge_netdev_ops = {
4627         .ndo_open               = qlge_open,
4628         .ndo_stop               = qlge_close,
4629         .ndo_start_xmit         = qlge_send,
4630         .ndo_change_mtu         = qlge_change_mtu,
4631         .ndo_get_stats          = qlge_get_stats,
4632         .ndo_set_multicast_list = qlge_set_multicast_list,
4633         .ndo_set_mac_address    = qlge_set_mac_address,
4634         .ndo_validate_addr      = eth_validate_addr,
4635         .ndo_tx_timeout         = qlge_tx_timeout,
4636         .ndo_vlan_rx_register   = qlge_vlan_rx_register,
4637         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4638         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4639 };
4640
4641 static void ql_timer(unsigned long data)
4642 {
4643         struct ql_adapter *qdev = (struct ql_adapter *)data;
4644         u32 var = 0;
4645
4646         var = ql_read32(qdev, STS);
4647         if (pci_channel_offline(qdev->pdev)) {
4648                 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4649                 return;
4650         }
4651
4652         mod_timer(&qdev->timer, jiffies + (5*HZ));
4653 }
4654
4655 static int __devinit qlge_probe(struct pci_dev *pdev,
4656                                 const struct pci_device_id *pci_entry)
4657 {
4658         struct net_device *ndev = NULL;
4659         struct ql_adapter *qdev = NULL;
4660         static int cards_found = 0;
4661         int err = 0;
4662
4663         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4664                         min(MAX_CPUS, (int)num_online_cpus()));
4665         if (!ndev)
4666                 return -ENOMEM;
4667
4668         err = ql_init_device(pdev, ndev, cards_found);
4669         if (err < 0) {
4670                 free_netdev(ndev);
4671                 return err;
4672         }
4673
4674         qdev = netdev_priv(ndev);
4675         SET_NETDEV_DEV(ndev, &pdev->dev);
4676         ndev->features = (0
4677                           | NETIF_F_IP_CSUM
4678                           | NETIF_F_SG
4679                           | NETIF_F_TSO
4680                           | NETIF_F_TSO6
4681                           | NETIF_F_TSO_ECN
4682                           | NETIF_F_HW_VLAN_TX
4683                           | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4684         ndev->features |= NETIF_F_GRO;
4685
4686         if (test_bit(QL_DMA64, &qdev->flags))
4687                 ndev->features |= NETIF_F_HIGHDMA;
4688
4689         /*
4690          * Set up net_device structure.
4691          */
4692         ndev->tx_queue_len = qdev->tx_ring_size;
4693         ndev->irq = pdev->irq;
4694
4695         ndev->netdev_ops = &qlge_netdev_ops;
4696         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4697         ndev->watchdog_timeo = 10 * HZ;
4698
4699         err = register_netdev(ndev);
4700         if (err) {
4701                 dev_err(&pdev->dev, "net device registration failed.\n");
4702                 ql_release_all(pdev);
4703                 pci_disable_device(pdev);
4704                 return err;
4705         }
4706         /* Start up the timer to trigger EEH if
4707          * the bus goes dead
4708          */
4709         init_timer_deferrable(&qdev->timer);
4710         qdev->timer.data = (unsigned long)qdev;
4711         qdev->timer.function = ql_timer;
4712         qdev->timer.expires = jiffies + (5*HZ);
4713         add_timer(&qdev->timer);
4714         ql_link_off(qdev);
4715         ql_display_dev_info(ndev);
4716         atomic_set(&qdev->lb_count, 0);
4717         cards_found++;
4718         return 0;
4719 }
4720
4721 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4722 {
4723         return qlge_send(skb, ndev);
4724 }
4725
4726 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4727 {
4728         return ql_clean_inbound_rx_ring(rx_ring, budget);
4729 }
4730
4731 static void __devexit qlge_remove(struct pci_dev *pdev)
4732 {
4733         struct net_device *ndev = pci_get_drvdata(pdev);
4734         struct ql_adapter *qdev = netdev_priv(ndev);
4735         del_timer_sync(&qdev->timer);
4736         ql_cancel_all_work_sync(qdev);
4737         unregister_netdev(ndev);
4738         ql_release_all(pdev);
4739         pci_disable_device(pdev);
4740         free_netdev(ndev);
4741 }
4742
4743 /* Clean up resources without touching hardware. */
4744 static void ql_eeh_close(struct net_device *ndev)
4745 {
4746         int i;
4747         struct ql_adapter *qdev = netdev_priv(ndev);
4748
4749         if (netif_carrier_ok(ndev)) {
4750                 netif_carrier_off(ndev);
4751                 netif_stop_queue(ndev);
4752         }
4753
4754         /* Disabling the timer */
4755         del_timer_sync(&qdev->timer);
4756         ql_cancel_all_work_sync(qdev);
4757
4758         for (i = 0; i < qdev->rss_ring_count; i++)
4759                 netif_napi_del(&qdev->rx_ring[i].napi);
4760
4761         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4762         ql_tx_ring_clean(qdev);
4763         ql_free_rx_buffers(qdev);
4764         ql_release_adapter_resources(qdev);
4765 }
4766
4767 /*
4768  * This callback is called by the PCI subsystem whenever
4769  * a PCI bus error is detected.
4770  */
4771 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4772                                                enum pci_channel_state state)
4773 {
4774         struct net_device *ndev = pci_get_drvdata(pdev);
4775         struct ql_adapter *qdev = netdev_priv(ndev);
4776
4777         switch (state) {
4778         case pci_channel_io_normal:
4779                 return PCI_ERS_RESULT_CAN_RECOVER;
4780         case pci_channel_io_frozen:
4781                 netif_device_detach(ndev);
4782                 if (netif_running(ndev))
4783                         ql_eeh_close(ndev);
4784                 pci_disable_device(pdev);
4785                 return PCI_ERS_RESULT_NEED_RESET;
4786         case pci_channel_io_perm_failure:
4787                 dev_err(&pdev->dev,
4788                         "%s: pci_channel_io_perm_failure.\n", __func__);
4789                 ql_eeh_close(ndev);
4790                 set_bit(QL_EEH_FATAL, &qdev->flags);
4791                 return PCI_ERS_RESULT_DISCONNECT;
4792         }
4793
4794         /* Request a slot reset. */
4795         return PCI_ERS_RESULT_NEED_RESET;
4796 }
4797
4798 /*
4799  * This callback is called after the PCI buss has been reset.
4800  * Basically, this tries to restart the card from scratch.
4801  * This is a shortened version of the device probe/discovery code,
4802  * it resembles the first-half of the () routine.
4803  */
4804 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4805 {
4806         struct net_device *ndev = pci_get_drvdata(pdev);
4807         struct ql_adapter *qdev = netdev_priv(ndev);
4808
4809         pdev->error_state = pci_channel_io_normal;
4810
4811         pci_restore_state(pdev);
4812         if (pci_enable_device(pdev)) {
4813                 netif_err(qdev, ifup, qdev->ndev,
4814                           "Cannot re-enable PCI device after reset.\n");
4815                 return PCI_ERS_RESULT_DISCONNECT;
4816         }
4817         pci_set_master(pdev);
4818
4819         if (ql_adapter_reset(qdev)) {
4820                 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4821                 set_bit(QL_EEH_FATAL, &qdev->flags);
4822                 return PCI_ERS_RESULT_DISCONNECT;
4823         }
4824
4825         return PCI_ERS_RESULT_RECOVERED;
4826 }
4827
4828 static void qlge_io_resume(struct pci_dev *pdev)
4829 {
4830         struct net_device *ndev = pci_get_drvdata(pdev);
4831         struct ql_adapter *qdev = netdev_priv(ndev);
4832         int err = 0;
4833
4834         if (netif_running(ndev)) {
4835                 err = qlge_open(ndev);
4836                 if (err) {
4837                         netif_err(qdev, ifup, qdev->ndev,
4838                                   "Device initialization failed after reset.\n");
4839                         return;
4840                 }
4841         } else {
4842                 netif_err(qdev, ifup, qdev->ndev,
4843                           "Device was not running prior to EEH.\n");
4844         }
4845         mod_timer(&qdev->timer, jiffies + (5*HZ));
4846         netif_device_attach(ndev);
4847 }
4848
4849 static struct pci_error_handlers qlge_err_handler = {
4850         .error_detected = qlge_io_error_detected,
4851         .slot_reset = qlge_io_slot_reset,
4852         .resume = qlge_io_resume,
4853 };
4854
4855 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4856 {
4857         struct net_device *ndev = pci_get_drvdata(pdev);
4858         struct ql_adapter *qdev = netdev_priv(ndev);
4859         int err;
4860
4861         netif_device_detach(ndev);
4862         del_timer_sync(&qdev->timer);
4863
4864         if (netif_running(ndev)) {
4865                 err = ql_adapter_down(qdev);
4866                 if (!err)
4867                         return err;
4868         }
4869
4870         ql_wol(qdev);
4871         err = pci_save_state(pdev);
4872         if (err)
4873                 return err;
4874
4875         pci_disable_device(pdev);
4876
4877         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4878
4879         return 0;
4880 }
4881
4882 #ifdef CONFIG_PM
4883 static int qlge_resume(struct pci_dev *pdev)
4884 {
4885         struct net_device *ndev = pci_get_drvdata(pdev);
4886         struct ql_adapter *qdev = netdev_priv(ndev);
4887         int err;
4888
4889         pci_set_power_state(pdev, PCI_D0);
4890         pci_restore_state(pdev);
4891         err = pci_enable_device(pdev);
4892         if (err) {
4893                 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4894                 return err;
4895         }
4896         pci_set_master(pdev);
4897
4898         pci_enable_wake(pdev, PCI_D3hot, 0);
4899         pci_enable_wake(pdev, PCI_D3cold, 0);
4900
4901         if (netif_running(ndev)) {
4902                 err = ql_adapter_up(qdev);
4903                 if (err)
4904                         return err;
4905         }
4906
4907         mod_timer(&qdev->timer, jiffies + (5*HZ));
4908         netif_device_attach(ndev);
4909
4910         return 0;
4911 }
4912 #endif /* CONFIG_PM */
4913
4914 static void qlge_shutdown(struct pci_dev *pdev)
4915 {
4916         qlge_suspend(pdev, PMSG_SUSPEND);
4917 }
4918
4919 static struct pci_driver qlge_driver = {
4920         .name = DRV_NAME,
4921         .id_table = qlge_pci_tbl,
4922         .probe = qlge_probe,
4923         .remove = __devexit_p(qlge_remove),
4924 #ifdef CONFIG_PM
4925         .suspend = qlge_suspend,
4926         .resume = qlge_resume,
4927 #endif
4928         .shutdown = qlge_shutdown,
4929         .err_handler = &qlge_err_handler
4930 };
4931
4932 static int __init qlge_init_module(void)
4933 {
4934         return pci_register_driver(&qlge_driver);
4935 }
4936
4937 static void __exit qlge_exit(void)
4938 {
4939         pci_unregister_driver(&qlge_driver);
4940 }
4941
4942 module_init(qlge_init_module);
4943 module_exit(qlge_exit);