]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/qlge/qlge_main.c
qlge: Store firmware revision as early as possible.
[net-next-2.6.git] / drivers / net / qlge / qlge_main.c
CommitLineData
c4e84bde
RM
1/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
c4e84bde 37#include <linux/if_vlan.h>
c4e84bde
RM
38#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
b7c6bfb7 41#include <net/ip6_checksum.h>
c4e84bde
RM
42
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
4974097a
RM
60/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
c4e84bde
RM
62/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
72static int irq_type = MSIX_IRQ;
73module_param(irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
b0c2aadf 77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
cdca8d02 78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
c4e84bde
RM
79 /* required last entry */
80 {0,}
81};
82
83MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85/* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
88 */
89static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90{
91 u32 sem_bits = 0;
92
93 switch (sem_mask) {
94 case SEM_XGMAC0_MASK:
95 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96 break;
97 case SEM_XGMAC1_MASK:
98 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99 break;
100 case SEM_ICB_MASK:
101 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102 break;
103 case SEM_MAC_ADDR_MASK:
104 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105 break;
106 case SEM_FLASH_MASK:
107 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108 break;
109 case SEM_PROBE_MASK:
110 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111 break;
112 case SEM_RT_IDX_MASK:
113 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114 break;
115 case SEM_PROC_REG_MASK:
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117 break;
118 default:
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120 return -EINVAL;
121 }
122
123 ql_write32(qdev, SEM, sem_bits | sem_mask);
124 return !(ql_read32(qdev, SEM) & sem_bits);
125}
126
127int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128{
0857e9d7 129 unsigned int wait_count = 30;
c4e84bde
RM
130 do {
131 if (!ql_sem_trylock(qdev, sem_mask))
132 return 0;
0857e9d7
RM
133 udelay(100);
134 } while (--wait_count);
c4e84bde
RM
135 return -ETIMEDOUT;
136}
137
138void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139{
140 ql_write32(qdev, SEM, sem_mask);
141 ql_read32(qdev, SEM); /* flush */
142}
143
144/* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148 */
149int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150{
151 u32 temp;
152 int count = UDELAY_COUNT;
153
154 while (count) {
155 temp = ql_read32(qdev, reg);
156
157 /* check for errors */
158 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
161 reg, temp);
162 return -EIO;
163 } else if (temp & bit)
164 return 0;
165 udelay(UDELAY_DELAY);
166 count--;
167 }
168 QPRINTK(qdev, PROBE, ALERT,
169 "Timed out waiting for reg %x to come ready.\n", reg);
170 return -ETIMEDOUT;
171}
172
173/* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
175 */
176static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177{
178 int count = UDELAY_COUNT;
179 u32 temp;
180
181 while (count) {
182 temp = ql_read32(qdev, CFG);
183 if (temp & CFG_LE)
184 return -EIO;
185 if (!(temp & bit))
186 return 0;
187 udelay(UDELAY_DELAY);
188 count--;
189 }
190 return -ETIMEDOUT;
191}
192
193
194/* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
196 */
197int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198 u16 q_id)
199{
200 u64 map;
201 int status = 0;
202 int direction;
203 u32 mask;
204 u32 value;
205
206 direction =
207 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208 PCI_DMA_FROMDEVICE;
209
210 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213 return -ENOMEM;
214 }
215
4322c5be
RM
216 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
217 if (status)
218 return status;
219
c4e84bde
RM
220 status = ql_wait_cfg(qdev, bit);
221 if (status) {
222 QPRINTK(qdev, IFUP, ERR,
223 "Timed out waiting for CFG to come ready.\n");
224 goto exit;
225 }
226
c4e84bde
RM
227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
c4e84bde
RM
229
230 mask = CFG_Q_MASK | (bit << 16);
231 value = bit | (q_id << CFG_Q_SHIFT);
232 ql_write32(qdev, CFG, (mask | value));
233
234 /*
235 * Wait for the bit to clear after signaling hw.
236 */
237 status = ql_wait_cfg(qdev, bit);
238exit:
4322c5be 239 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
c4e84bde
RM
240 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status;
242}
243
244/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246 u32 *value)
247{
248 u32 offset = 0;
249 int status;
250
c4e84bde
RM
251 switch (type) {
252 case MAC_ADDR_TYPE_MULTI_MAC:
253 case MAC_ADDR_TYPE_CAM_MAC:
254 {
255 status =
256 ql_wait_reg_rdy(qdev,
939678f8 257 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
258 if (status)
259 goto exit;
260 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
261 (index << MAC_ADDR_IDX_SHIFT) | /* index */
262 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
263 status =
264 ql_wait_reg_rdy(qdev,
939678f8 265 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
266 if (status)
267 goto exit;
268 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
269 status =
270 ql_wait_reg_rdy(qdev,
939678f8 271 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
272 if (status)
273 goto exit;
274 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275 (index << MAC_ADDR_IDX_SHIFT) | /* index */
276 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
277 status =
278 ql_wait_reg_rdy(qdev,
939678f8 279 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
280 if (status)
281 goto exit;
282 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
283 if (type == MAC_ADDR_TYPE_CAM_MAC) {
284 status =
285 ql_wait_reg_rdy(qdev,
939678f8 286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
287 if (status)
288 goto exit;
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292 status =
293 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
939678f8 294 MAC_ADDR_MR, 0);
c4e84bde
RM
295 if (status)
296 goto exit;
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298 }
299 break;
300 }
301 case MAC_ADDR_TYPE_VLAN:
302 case MAC_ADDR_TYPE_MULTI_FLTR:
303 default:
304 QPRINTK(qdev, IFUP, CRIT,
305 "Address type %d not yet supported.\n", type);
306 status = -EPERM;
307 }
308exit:
c4e84bde
RM
309 return status;
310}
311
312/* Set up a MAC, multicast or VLAN address for the
313 * inbound frame matching.
314 */
315static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
316 u16 index)
317{
318 u32 offset = 0;
319 int status = 0;
320
c4e84bde
RM
321 switch (type) {
322 case MAC_ADDR_TYPE_MULTI_MAC:
76b26694
RM
323 {
324 u32 upper = (addr[0] << 8) | addr[1];
325 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
326 (addr[4] << 8) | (addr[5]);
327
328 status =
329 ql_wait_reg_rdy(qdev,
330 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
331 if (status)
332 goto exit;
333 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
334 (index << MAC_ADDR_IDX_SHIFT) |
335 type | MAC_ADDR_E);
336 ql_write32(qdev, MAC_ADDR_DATA, lower);
337 status =
338 ql_wait_reg_rdy(qdev,
339 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
340 if (status)
341 goto exit;
342 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
343 (index << MAC_ADDR_IDX_SHIFT) |
344 type | MAC_ADDR_E);
345
346 ql_write32(qdev, MAC_ADDR_DATA, upper);
347 status =
348 ql_wait_reg_rdy(qdev,
349 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 if (status)
351 goto exit;
352 break;
353 }
c4e84bde
RM
354 case MAC_ADDR_TYPE_CAM_MAC:
355 {
356 u32 cam_output;
357 u32 upper = (addr[0] << 8) | addr[1];
358 u32 lower =
359 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
360 (addr[5]);
361
4974097a 362 QPRINTK(qdev, IFUP, DEBUG,
7c510e4b 363 "Adding %s address %pM"
c4e84bde
RM
364 " at index %d in the CAM.\n",
365 ((type ==
366 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
7c510e4b 367 "UNICAST"), addr, index);
c4e84bde
RM
368
369 status =
370 ql_wait_reg_rdy(qdev,
939678f8 371 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
372 if (status)
373 goto exit;
374 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
375 (index << MAC_ADDR_IDX_SHIFT) | /* index */
376 type); /* type */
377 ql_write32(qdev, MAC_ADDR_DATA, lower);
378 status =
379 ql_wait_reg_rdy(qdev,
939678f8 380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, upper);
387 status =
388 ql_wait_reg_rdy(qdev,
939678f8 389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 /* This field should also include the queue id
396 and possibly the function id. Right now we hardcode
397 the route field to NIC core.
398 */
76b26694
RM
399 cam_output = (CAM_OUT_ROUTE_NIC |
400 (qdev->
401 func << CAM_OUT_FUNC_SHIFT) |
402 (0 << CAM_OUT_CQ_ID_SHIFT));
403 if (qdev->vlgrp)
404 cam_output |= CAM_OUT_RV;
405 /* route to NIC core */
406 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
c4e84bde
RM
407 break;
408 }
409 case MAC_ADDR_TYPE_VLAN:
410 {
411 u32 enable_bit = *((u32 *) &addr[0]);
412 /* For VLAN, the addr actually holds a bit that
413 * either enables or disables the vlan id we are
414 * addressing. It's either MAC_ADDR_E on or off.
415 * That's bit-27 we're talking about.
416 */
417 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
418 (enable_bit ? "Adding" : "Removing"),
419 index, (enable_bit ? "to" : "from"));
420
421 status =
422 ql_wait_reg_rdy(qdev,
939678f8 423 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
424 if (status)
425 goto exit;
426 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
427 (index << MAC_ADDR_IDX_SHIFT) | /* index */
428 type | /* type */
429 enable_bit); /* enable/disable */
430 break;
431 }
432 case MAC_ADDR_TYPE_MULTI_FLTR:
433 default:
434 QPRINTK(qdev, IFUP, CRIT,
435 "Address type %d not yet supported.\n", type);
436 status = -EPERM;
437 }
438exit:
c4e84bde
RM
439 return status;
440}
441
7fab3bfe
RM
442/* Set or clear MAC address in hardware. We sometimes
443 * have to clear it to prevent wrong frame routing
444 * especially in a bonding environment.
445 */
446static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
447{
448 int status;
449 char zero_mac_addr[ETH_ALEN];
450 char *addr;
451
452 if (set) {
453 addr = &qdev->ndev->dev_addr[0];
454 QPRINTK(qdev, IFUP, DEBUG,
455 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
456 addr[0], addr[1], addr[2], addr[3],
457 addr[4], addr[5]);
458 } else {
459 memset(zero_mac_addr, 0, ETH_ALEN);
460 addr = &zero_mac_addr[0];
461 QPRINTK(qdev, IFUP, DEBUG,
462 "Clearing MAC address on %s\n",
463 qdev->ndev->name);
464 }
465 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
466 if (status)
467 return status;
468 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
469 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
470 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
471 if (status)
472 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
473 "address.\n");
474 return status;
475}
476
6a473308
RM
477void ql_link_on(struct ql_adapter *qdev)
478{
479 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
480 qdev->ndev->name);
481 netif_carrier_on(qdev->ndev);
482 ql_set_mac_addr(qdev, 1);
483}
484
485void ql_link_off(struct ql_adapter *qdev)
486{
487 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
488 qdev->ndev->name);
489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491}
492
c4e84bde
RM
493/* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497{
498 int status = 0;
499
939678f8 500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
c4e84bde
RM
501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
939678f8 506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
c4e84bde
RM
507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510exit:
c4e84bde
RM
511 return status;
512}
513
514/* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521{
8587ea35 522 int status = -EINVAL; /* Return error if no mask match. */
c4e84bde
RM
523 u32 value = 0;
524
c4e84bde
RM
525 QPRINTK(qdev, IFUP, DEBUG,
526 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
527 (enable ? "Adding" : "Removing"),
528 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
529 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
530 ((index ==
531 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
532 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
533 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
534 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
535 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
536 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
537 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
538 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
539 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
540 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
541 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
542 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
543 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
544 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
545 (enable ? "to" : "from"));
546
547 switch (mask) {
548 case RT_IDX_CAM_HIT:
549 {
550 value = RT_IDX_DST_CAM_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
553 break;
554 }
555 case RT_IDX_VALID: /* Promiscuous Mode frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
560 break;
561 }
562 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
563 {
564 value = RT_IDX_DST_DFLT_Q | /* dest */
565 RT_IDX_TYPE_NICQ | /* type */
566 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
567 break;
568 }
569 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
570 {
571 value = RT_IDX_DST_DFLT_Q | /* dest */
572 RT_IDX_TYPE_NICQ | /* type */
573 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
574 break;
575 }
576 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
577 {
e163d7f2 578 value = RT_IDX_DST_DFLT_Q | /* dest */
c4e84bde
RM
579 RT_IDX_TYPE_NICQ | /* type */
580 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
581 break;
582 }
583 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
584 {
e163d7f2 585 value = RT_IDX_DST_DFLT_Q | /* dest */
c4e84bde
RM
586 RT_IDX_TYPE_NICQ | /* type */
587 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
588 break;
589 }
590 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
591 {
592 value = RT_IDX_DST_RSS | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
595 break;
596 }
597 case 0: /* Clear the E-bit on an entry. */
598 {
599 value = RT_IDX_DST_DFLT_Q | /* dest */
600 RT_IDX_TYPE_NICQ | /* type */
601 (index << RT_IDX_IDX_SHIFT);/* index */
602 break;
603 }
604 default:
605 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
606 mask);
607 status = -EPERM;
608 goto exit;
609 }
610
611 if (value) {
612 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
613 if (status)
614 goto exit;
615 value |= (enable ? RT_IDX_E : 0);
616 ql_write32(qdev, RT_IDX, value);
617 ql_write32(qdev, RT_DATA, enable ? mask : 0);
618 }
619exit:
c4e84bde
RM
620 return status;
621}
622
623static void ql_enable_interrupts(struct ql_adapter *qdev)
624{
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
626}
627
628static void ql_disable_interrupts(struct ql_adapter *qdev)
629{
630 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
631}
632
633/* If we're running with multiple MSI-X vectors then we enable on the fly.
634 * Otherwise, we may have multiple outstanding workers and don't want to
635 * enable until the last one finishes. In this case, the irq_cnt gets
636 * incremented everytime we queue a worker and decremented everytime
637 * a worker finishes. Once it hits zero we enable the interrupt.
638 */
bb0d215c 639u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
c4e84bde 640{
bb0d215c
RM
641 u32 var = 0;
642 unsigned long hw_flags = 0;
643 struct intr_context *ctx = qdev->intr_context + intr;
644
645 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
646 /* Always enable if we're MSIX multi interrupts and
647 * it's not the default (zeroeth) interrupt.
648 */
c4e84bde 649 ql_write32(qdev, INTR_EN,
bb0d215c
RM
650 ctx->intr_en_mask);
651 var = ql_read32(qdev, STS);
652 return var;
c4e84bde 653 }
bb0d215c
RM
654
655 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
656 if (atomic_dec_and_test(&ctx->irq_cnt)) {
657 ql_write32(qdev, INTR_EN,
658 ctx->intr_en_mask);
659 var = ql_read32(qdev, STS);
660 }
661 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
662 return var;
c4e84bde
RM
663}
664
665static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
666{
667 u32 var = 0;
bb0d215c 668 struct intr_context *ctx;
c4e84bde 669
bb0d215c
RM
670 /* HW disables for us if we're MSIX multi interrupts and
671 * it's not the default (zeroeth) interrupt.
672 */
673 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
674 return 0;
675
676 ctx = qdev->intr_context + intr;
08b1bc8f 677 spin_lock(&qdev->hw_lock);
bb0d215c 678 if (!atomic_read(&ctx->irq_cnt)) {
c4e84bde 679 ql_write32(qdev, INTR_EN,
bb0d215c 680 ctx->intr_dis_mask);
c4e84bde
RM
681 var = ql_read32(qdev, STS);
682 }
bb0d215c 683 atomic_inc(&ctx->irq_cnt);
08b1bc8f 684 spin_unlock(&qdev->hw_lock);
c4e84bde
RM
685 return var;
686}
687
688static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
689{
690 int i;
691 for (i = 0; i < qdev->intr_count; i++) {
692 /* The enable call does a atomic_dec_and_test
693 * and enables only if the result is zero.
694 * So we precharge it here.
695 */
bb0d215c
RM
696 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
697 i == 0))
698 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
c4e84bde
RM
699 ql_enable_completion_interrupt(qdev, i);
700 }
701
702}
703
b0c2aadf
RM
704static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
705{
706 int status, i;
707 u16 csum = 0;
708 __le16 *flash = (__le16 *)&qdev->flash;
709
710 status = strncmp((char *)&qdev->flash, str, 4);
711 if (status) {
712 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
713 return status;
714 }
715
716 for (i = 0; i < size; i++)
717 csum += le16_to_cpu(*flash++);
718
719 if (csum)
720 QPRINTK(qdev, IFUP, ERR,
721 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
722
723 return csum;
724}
725
26351479 726static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
c4e84bde
RM
727{
728 int status = 0;
729 /* wait for reg to come ready */
730 status = ql_wait_reg_rdy(qdev,
731 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
732 if (status)
733 goto exit;
734 /* set up for reg read */
735 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
736 /* wait for reg to come ready */
737 status = ql_wait_reg_rdy(qdev,
738 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
739 if (status)
740 goto exit;
26351479
RM
741 /* This data is stored on flash as an array of
742 * __le32. Since ql_read32() returns cpu endian
743 * we need to swap it back.
744 */
745 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
c4e84bde
RM
746exit:
747 return status;
748}
749
cdca8d02
RM
750static int ql_get_8000_flash_params(struct ql_adapter *qdev)
751{
752 u32 i, size;
753 int status;
754 __le32 *p = (__le32 *)&qdev->flash;
755 u32 offset;
542512e4 756 u8 mac_addr[6];
cdca8d02
RM
757
758 /* Get flash offset for function and adjust
759 * for dword access.
760 */
e4552f51 761 if (!qdev->port)
cdca8d02
RM
762 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
763 else
764 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
765
766 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
767 return -ETIMEDOUT;
768
769 size = sizeof(struct flash_params_8000) / sizeof(u32);
770 for (i = 0; i < size; i++, p++) {
771 status = ql_read_flash_word(qdev, i+offset, p);
772 if (status) {
773 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
774 goto exit;
775 }
776 }
777
778 status = ql_validate_flash(qdev,
779 sizeof(struct flash_params_8000) / sizeof(u16),
780 "8000");
781 if (status) {
782 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
783 status = -EINVAL;
784 goto exit;
785 }
786
542512e4
RM
787 /* Extract either manufacturer or BOFM modified
788 * MAC address.
789 */
790 if (qdev->flash.flash_params_8000.data_type1 == 2)
791 memcpy(mac_addr,
792 qdev->flash.flash_params_8000.mac_addr1,
793 qdev->ndev->addr_len);
794 else
795 memcpy(mac_addr,
796 qdev->flash.flash_params_8000.mac_addr,
797 qdev->ndev->addr_len);
798
799 if (!is_valid_ether_addr(mac_addr)) {
cdca8d02
RM
800 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
801 status = -EINVAL;
802 goto exit;
803 }
804
805 memcpy(qdev->ndev->dev_addr,
542512e4 806 mac_addr,
cdca8d02
RM
807 qdev->ndev->addr_len);
808
809exit:
810 ql_sem_unlock(qdev, SEM_FLASH_MASK);
811 return status;
812}
813
b0c2aadf 814static int ql_get_8012_flash_params(struct ql_adapter *qdev)
c4e84bde
RM
815{
816 int i;
817 int status;
26351479 818 __le32 *p = (__le32 *)&qdev->flash;
e78f5fa7 819 u32 offset = 0;
b0c2aadf 820 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
e78f5fa7
RM
821
822 /* Second function's parameters follow the first
823 * function's.
824 */
e4552f51 825 if (qdev->port)
b0c2aadf 826 offset = size;
c4e84bde
RM
827
828 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
829 return -ETIMEDOUT;
830
b0c2aadf 831 for (i = 0; i < size; i++, p++) {
e78f5fa7 832 status = ql_read_flash_word(qdev, i+offset, p);
c4e84bde
RM
833 if (status) {
834 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
835 goto exit;
836 }
837
838 }
b0c2aadf
RM
839
840 status = ql_validate_flash(qdev,
841 sizeof(struct flash_params_8012) / sizeof(u16),
842 "8012");
843 if (status) {
844 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
845 status = -EINVAL;
846 goto exit;
847 }
848
849 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
850 status = -EINVAL;
851 goto exit;
852 }
853
854 memcpy(qdev->ndev->dev_addr,
855 qdev->flash.flash_params_8012.mac_addr,
856 qdev->ndev->addr_len);
857
c4e84bde
RM
858exit:
859 ql_sem_unlock(qdev, SEM_FLASH_MASK);
860 return status;
861}
862
863/* xgmac register are located behind the xgmac_addr and xgmac_data
864 * register pair. Each read/write requires us to wait for the ready
865 * bit before reading/writing the data.
866 */
867static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
868{
869 int status;
870 /* wait for reg to come ready */
871 status = ql_wait_reg_rdy(qdev,
872 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
873 if (status)
874 return status;
875 /* write the data to the data reg */
876 ql_write32(qdev, XGMAC_DATA, data);
877 /* trigger the write */
878 ql_write32(qdev, XGMAC_ADDR, reg);
879 return status;
880}
881
882/* xgmac register are located behind the xgmac_addr and xgmac_data
883 * register pair. Each read/write requires us to wait for the ready
884 * bit before reading/writing the data.
885 */
886int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
887{
888 int status = 0;
889 /* wait for reg to come ready */
890 status = ql_wait_reg_rdy(qdev,
891 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
892 if (status)
893 goto exit;
894 /* set up for reg read */
895 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
896 /* wait for reg to come ready */
897 status = ql_wait_reg_rdy(qdev,
898 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899 if (status)
900 goto exit;
901 /* get the data */
902 *data = ql_read32(qdev, XGMAC_DATA);
903exit:
904 return status;
905}
906
907/* This is used for reading the 64-bit statistics regs. */
908int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
909{
910 int status = 0;
911 u32 hi = 0;
912 u32 lo = 0;
913
914 status = ql_read_xgmac_reg(qdev, reg, &lo);
915 if (status)
916 goto exit;
917
918 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
919 if (status)
920 goto exit;
921
922 *data = (u64) lo | ((u64) hi << 32);
923
924exit:
925 return status;
926}
927
cdca8d02
RM
928static int ql_8000_port_initialize(struct ql_adapter *qdev)
929{
bcc2cb3b 930 int status;
cfec0cbc
RM
931 /*
932 * Get MPI firmware version for driver banner
933 * and ethool info.
934 */
935 status = ql_mb_about_fw(qdev);
936 if (status)
937 goto exit;
bcc2cb3b
RM
938 status = ql_mb_get_fw_state(qdev);
939 if (status)
940 goto exit;
941 /* Wake up a worker to get/set the TX/RX frame sizes. */
942 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
943exit:
944 return status;
cdca8d02
RM
945}
946
c4e84bde
RM
947/* Take the MAC Core out of reset.
948 * Enable statistics counting.
949 * Take the transmitter/receiver out of reset.
950 * This functionality may be done in the MPI firmware at a
951 * later date.
952 */
b0c2aadf 953static int ql_8012_port_initialize(struct ql_adapter *qdev)
c4e84bde
RM
954{
955 int status = 0;
956 u32 data;
957
958 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
959 /* Another function has the semaphore, so
960 * wait for the port init bit to come ready.
961 */
962 QPRINTK(qdev, LINK, INFO,
963 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
964 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
965 if (status) {
966 QPRINTK(qdev, LINK, CRIT,
967 "Port initialize timed out.\n");
968 }
969 return status;
970 }
971
972 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
973 /* Set the core reset. */
974 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
975 if (status)
976 goto end;
977 data |= GLOBAL_CFG_RESET;
978 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
979 if (status)
980 goto end;
981
982 /* Clear the core reset and turn on jumbo for receiver. */
983 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
984 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
985 data |= GLOBAL_CFG_TX_STAT_EN;
986 data |= GLOBAL_CFG_RX_STAT_EN;
987 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
988 if (status)
989 goto end;
990
991 /* Enable transmitter, and clear it's reset. */
992 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
993 if (status)
994 goto end;
995 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
996 data |= TX_CFG_EN; /* Enable the transmitter. */
997 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
998 if (status)
999 goto end;
1000
1001 /* Enable receiver and clear it's reset. */
1002 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1003 if (status)
1004 goto end;
1005 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1006 data |= RX_CFG_EN; /* Enable the receiver. */
1007 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1008 if (status)
1009 goto end;
1010
1011 /* Turn on jumbo. */
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1014 if (status)
1015 goto end;
1016 status =
1017 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1018 if (status)
1019 goto end;
1020
1021 /* Signal to the world that the port is enabled. */
1022 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1023end:
1024 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1025 return status;
1026}
1027
1028/* Get the next large buffer. */
8668ae92 1029static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
c4e84bde
RM
1030{
1031 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1032 rx_ring->lbq_curr_idx++;
1033 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1034 rx_ring->lbq_curr_idx = 0;
1035 rx_ring->lbq_free_cnt++;
1036 return lbq_desc;
1037}
1038
1039/* Get the next small buffer. */
8668ae92 1040static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
c4e84bde
RM
1041{
1042 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1043 rx_ring->sbq_curr_idx++;
1044 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1045 rx_ring->sbq_curr_idx = 0;
1046 rx_ring->sbq_free_cnt++;
1047 return sbq_desc;
1048}
1049
1050/* Update an rx ring index. */
1051static void ql_update_cq(struct rx_ring *rx_ring)
1052{
1053 rx_ring->cnsmr_idx++;
1054 rx_ring->curr_entry++;
1055 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1056 rx_ring->cnsmr_idx = 0;
1057 rx_ring->curr_entry = rx_ring->cq_base;
1058 }
1059}
1060
1061static void ql_write_cq_idx(struct rx_ring *rx_ring)
1062{
1063 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1064}
1065
1066/* Process (refill) a large buffer queue. */
1067static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1068{
49f2186d
RM
1069 u32 clean_idx = rx_ring->lbq_clean_idx;
1070 u32 start_idx = clean_idx;
c4e84bde 1071 struct bq_desc *lbq_desc;
c4e84bde
RM
1072 u64 map;
1073 int i;
1074
1075 while (rx_ring->lbq_free_cnt > 16) {
1076 for (i = 0; i < 16; i++) {
1077 QPRINTK(qdev, RX_STATUS, DEBUG,
1078 "lbq: try cleaning clean_idx = %d.\n",
1079 clean_idx);
1080 lbq_desc = &rx_ring->lbq[clean_idx];
c4e84bde
RM
1081 if (lbq_desc->p.lbq_page == NULL) {
1082 QPRINTK(qdev, RX_STATUS, DEBUG,
1083 "lbq: getting new page for index %d.\n",
1084 lbq_desc->index);
1085 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
1086 if (lbq_desc->p.lbq_page == NULL) {
79d2b29e 1087 rx_ring->lbq_clean_idx = clean_idx;
c4e84bde
RM
1088 QPRINTK(qdev, RX_STATUS, ERR,
1089 "Couldn't get a page.\n");
1090 return;
1091 }
1092 map = pci_map_page(qdev->pdev,
1093 lbq_desc->p.lbq_page,
1094 0, PAGE_SIZE,
1095 PCI_DMA_FROMDEVICE);
1096 if (pci_dma_mapping_error(qdev->pdev, map)) {
79d2b29e 1097 rx_ring->lbq_clean_idx = clean_idx;
f2603c2c
RM
1098 put_page(lbq_desc->p.lbq_page);
1099 lbq_desc->p.lbq_page = NULL;
c4e84bde
RM
1100 QPRINTK(qdev, RX_STATUS, ERR,
1101 "PCI mapping failed.\n");
1102 return;
1103 }
1104 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1105 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2c9a0d41 1106 *lbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
1107 }
1108 clean_idx++;
1109 if (clean_idx == rx_ring->lbq_len)
1110 clean_idx = 0;
1111 }
1112
1113 rx_ring->lbq_clean_idx = clean_idx;
1114 rx_ring->lbq_prod_idx += 16;
1115 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1116 rx_ring->lbq_prod_idx = 0;
49f2186d
RM
1117 rx_ring->lbq_free_cnt -= 16;
1118 }
1119
1120 if (start_idx != clean_idx) {
c4e84bde
RM
1121 QPRINTK(qdev, RX_STATUS, DEBUG,
1122 "lbq: updating prod idx = %d.\n",
1123 rx_ring->lbq_prod_idx);
1124 ql_write_db_reg(rx_ring->lbq_prod_idx,
1125 rx_ring->lbq_prod_idx_db_reg);
c4e84bde
RM
1126 }
1127}
1128
1129/* Process (refill) a small buffer queue. */
1130static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1131{
49f2186d
RM
1132 u32 clean_idx = rx_ring->sbq_clean_idx;
1133 u32 start_idx = clean_idx;
c4e84bde 1134 struct bq_desc *sbq_desc;
c4e84bde
RM
1135 u64 map;
1136 int i;
1137
1138 while (rx_ring->sbq_free_cnt > 16) {
1139 for (i = 0; i < 16; i++) {
1140 sbq_desc = &rx_ring->sbq[clean_idx];
1141 QPRINTK(qdev, RX_STATUS, DEBUG,
1142 "sbq: try cleaning clean_idx = %d.\n",
1143 clean_idx);
c4e84bde
RM
1144 if (sbq_desc->p.skb == NULL) {
1145 QPRINTK(qdev, RX_STATUS, DEBUG,
1146 "sbq: getting new skb for index %d.\n",
1147 sbq_desc->index);
1148 sbq_desc->p.skb =
1149 netdev_alloc_skb(qdev->ndev,
1150 rx_ring->sbq_buf_size);
1151 if (sbq_desc->p.skb == NULL) {
1152 QPRINTK(qdev, PROBE, ERR,
1153 "Couldn't get an skb.\n");
1154 rx_ring->sbq_clean_idx = clean_idx;
1155 return;
1156 }
1157 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1158 map = pci_map_single(qdev->pdev,
1159 sbq_desc->p.skb->data,
1160 rx_ring->sbq_buf_size /
1161 2, PCI_DMA_FROMDEVICE);
c907a35a
RM
1162 if (pci_dma_mapping_error(qdev->pdev, map)) {
1163 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1164 rx_ring->sbq_clean_idx = clean_idx;
06a3d510
RM
1165 dev_kfree_skb_any(sbq_desc->p.skb);
1166 sbq_desc->p.skb = NULL;
c907a35a
RM
1167 return;
1168 }
c4e84bde
RM
1169 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1170 pci_unmap_len_set(sbq_desc, maplen,
1171 rx_ring->sbq_buf_size / 2);
2c9a0d41 1172 *sbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
1173 }
1174
1175 clean_idx++;
1176 if (clean_idx == rx_ring->sbq_len)
1177 clean_idx = 0;
1178 }
1179 rx_ring->sbq_clean_idx = clean_idx;
1180 rx_ring->sbq_prod_idx += 16;
1181 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1182 rx_ring->sbq_prod_idx = 0;
49f2186d
RM
1183 rx_ring->sbq_free_cnt -= 16;
1184 }
1185
1186 if (start_idx != clean_idx) {
c4e84bde
RM
1187 QPRINTK(qdev, RX_STATUS, DEBUG,
1188 "sbq: updating prod idx = %d.\n",
1189 rx_ring->sbq_prod_idx);
1190 ql_write_db_reg(rx_ring->sbq_prod_idx,
1191 rx_ring->sbq_prod_idx_db_reg);
c4e84bde
RM
1192 }
1193}
1194
1195static void ql_update_buffer_queues(struct ql_adapter *qdev,
1196 struct rx_ring *rx_ring)
1197{
1198 ql_update_sbq(qdev, rx_ring);
1199 ql_update_lbq(qdev, rx_ring);
1200}
1201
1202/* Unmaps tx buffers. Can be called from send() if a pci mapping
1203 * fails at some stage, or from the interrupt when a tx completes.
1204 */
1205static void ql_unmap_send(struct ql_adapter *qdev,
1206 struct tx_ring_desc *tx_ring_desc, int mapped)
1207{
1208 int i;
1209 for (i = 0; i < mapped; i++) {
1210 if (i == 0 || (i == 7 && mapped > 7)) {
1211 /*
1212 * Unmap the skb->data area, or the
1213 * external sglist (AKA the Outbound
1214 * Address List (OAL)).
1215 * If its the zeroeth element, then it's
1216 * the skb->data area. If it's the 7th
1217 * element and there is more than 6 frags,
1218 * then its an OAL.
1219 */
1220 if (i == 7) {
1221 QPRINTK(qdev, TX_DONE, DEBUG,
1222 "unmapping OAL area.\n");
1223 }
1224 pci_unmap_single(qdev->pdev,
1225 pci_unmap_addr(&tx_ring_desc->map[i],
1226 mapaddr),
1227 pci_unmap_len(&tx_ring_desc->map[i],
1228 maplen),
1229 PCI_DMA_TODEVICE);
1230 } else {
1231 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1232 i);
1233 pci_unmap_page(qdev->pdev,
1234 pci_unmap_addr(&tx_ring_desc->map[i],
1235 mapaddr),
1236 pci_unmap_len(&tx_ring_desc->map[i],
1237 maplen), PCI_DMA_TODEVICE);
1238 }
1239 }
1240
1241}
1242
1243/* Map the buffers for this transmit. This will return
1244 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1245 */
1246static int ql_map_send(struct ql_adapter *qdev,
1247 struct ob_mac_iocb_req *mac_iocb_ptr,
1248 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1249{
1250 int len = skb_headlen(skb);
1251 dma_addr_t map;
1252 int frag_idx, err, map_idx = 0;
1253 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1254 int frag_cnt = skb_shinfo(skb)->nr_frags;
1255
1256 if (frag_cnt) {
1257 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1258 }
1259 /*
1260 * Map the skb buffer first.
1261 */
1262 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1263
1264 err = pci_dma_mapping_error(qdev->pdev, map);
1265 if (err) {
1266 QPRINTK(qdev, TX_QUEUED, ERR,
1267 "PCI mapping failed with error: %d\n", err);
1268
1269 return NETDEV_TX_BUSY;
1270 }
1271
1272 tbd->len = cpu_to_le32(len);
1273 tbd->addr = cpu_to_le64(map);
1274 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1275 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1276 map_idx++;
1277
1278 /*
1279 * This loop fills the remainder of the 8 address descriptors
1280 * in the IOCB. If there are more than 7 fragments, then the
1281 * eighth address desc will point to an external list (OAL).
1282 * When this happens, the remainder of the frags will be stored
1283 * in this list.
1284 */
1285 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1286 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1287 tbd++;
1288 if (frag_idx == 6 && frag_cnt > 7) {
1289 /* Let's tack on an sglist.
1290 * Our control block will now
1291 * look like this:
1292 * iocb->seg[0] = skb->data
1293 * iocb->seg[1] = frag[0]
1294 * iocb->seg[2] = frag[1]
1295 * iocb->seg[3] = frag[2]
1296 * iocb->seg[4] = frag[3]
1297 * iocb->seg[5] = frag[4]
1298 * iocb->seg[6] = frag[5]
1299 * iocb->seg[7] = ptr to OAL (external sglist)
1300 * oal->seg[0] = frag[6]
1301 * oal->seg[1] = frag[7]
1302 * oal->seg[2] = frag[8]
1303 * oal->seg[3] = frag[9]
1304 * oal->seg[4] = frag[10]
1305 * etc...
1306 */
1307 /* Tack on the OAL in the eighth segment of IOCB. */
1308 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1309 sizeof(struct oal),
1310 PCI_DMA_TODEVICE);
1311 err = pci_dma_mapping_error(qdev->pdev, map);
1312 if (err) {
1313 QPRINTK(qdev, TX_QUEUED, ERR,
1314 "PCI mapping outbound address list with error: %d\n",
1315 err);
1316 goto map_error;
1317 }
1318
1319 tbd->addr = cpu_to_le64(map);
1320 /*
1321 * The length is the number of fragments
1322 * that remain to be mapped times the length
1323 * of our sglist (OAL).
1324 */
1325 tbd->len =
1326 cpu_to_le32((sizeof(struct tx_buf_desc) *
1327 (frag_cnt - frag_idx)) | TX_DESC_C);
1328 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1329 map);
1330 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1331 sizeof(struct oal));
1332 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1333 map_idx++;
1334 }
1335
1336 map =
1337 pci_map_page(qdev->pdev, frag->page,
1338 frag->page_offset, frag->size,
1339 PCI_DMA_TODEVICE);
1340
1341 err = pci_dma_mapping_error(qdev->pdev, map);
1342 if (err) {
1343 QPRINTK(qdev, TX_QUEUED, ERR,
1344 "PCI mapping frags failed with error: %d.\n",
1345 err);
1346 goto map_error;
1347 }
1348
1349 tbd->addr = cpu_to_le64(map);
1350 tbd->len = cpu_to_le32(frag->size);
1351 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1352 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1353 frag->size);
1354
1355 }
1356 /* Save the number of segments we've mapped. */
1357 tx_ring_desc->map_cnt = map_idx;
1358 /* Terminate the last segment. */
1359 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1360 return NETDEV_TX_OK;
1361
1362map_error:
1363 /*
1364 * If the first frag mapping failed, then i will be zero.
1365 * This causes the unmap of the skb->data area. Otherwise
1366 * we pass in the number of frags that mapped successfully
1367 * so they can be umapped.
1368 */
1369 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1370 return NETDEV_TX_BUSY;
1371}
1372
8668ae92 1373static void ql_realign_skb(struct sk_buff *skb, int len)
c4e84bde
RM
1374{
1375 void *temp_addr = skb->data;
1376
1377 /* Undo the skb_reserve(skb,32) we did before
1378 * giving to hardware, and realign data on
1379 * a 2-byte boundary.
1380 */
1381 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1382 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1383 skb_copy_to_linear_data(skb, temp_addr,
1384 (unsigned int)len);
1385}
1386
1387/*
1388 * This function builds an skb for the given inbound
1389 * completion. It will be rewritten for readability in the near
1390 * future, but for not it works well.
1391 */
1392static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1393 struct rx_ring *rx_ring,
1394 struct ib_mac_iocb_rsp *ib_mac_rsp)
1395{
1396 struct bq_desc *lbq_desc;
1397 struct bq_desc *sbq_desc;
1398 struct sk_buff *skb = NULL;
1399 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1400 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1401
1402 /*
1403 * Handle the header buffer if present.
1404 */
1405 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1406 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1407 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1408 /*
1409 * Headers fit nicely into a small buffer.
1410 */
1411 sbq_desc = ql_get_curr_sbuf(rx_ring);
1412 pci_unmap_single(qdev->pdev,
1413 pci_unmap_addr(sbq_desc, mapaddr),
1414 pci_unmap_len(sbq_desc, maplen),
1415 PCI_DMA_FROMDEVICE);
1416 skb = sbq_desc->p.skb;
1417 ql_realign_skb(skb, hdr_len);
1418 skb_put(skb, hdr_len);
1419 sbq_desc->p.skb = NULL;
1420 }
1421
1422 /*
1423 * Handle the data buffer(s).
1424 */
1425 if (unlikely(!length)) { /* Is there data too? */
1426 QPRINTK(qdev, RX_STATUS, DEBUG,
1427 "No Data buffer in this packet.\n");
1428 return skb;
1429 }
1430
1431 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1432 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1433 QPRINTK(qdev, RX_STATUS, DEBUG,
1434 "Headers in small, data of %d bytes in small, combine them.\n", length);
1435 /*
1436 * Data is less than small buffer size so it's
1437 * stuffed in a small buffer.
1438 * For this case we append the data
1439 * from the "data" small buffer to the "header" small
1440 * buffer.
1441 */
1442 sbq_desc = ql_get_curr_sbuf(rx_ring);
1443 pci_dma_sync_single_for_cpu(qdev->pdev,
1444 pci_unmap_addr
1445 (sbq_desc, mapaddr),
1446 pci_unmap_len
1447 (sbq_desc, maplen),
1448 PCI_DMA_FROMDEVICE);
1449 memcpy(skb_put(skb, length),
1450 sbq_desc->p.skb->data, length);
1451 pci_dma_sync_single_for_device(qdev->pdev,
1452 pci_unmap_addr
1453 (sbq_desc,
1454 mapaddr),
1455 pci_unmap_len
1456 (sbq_desc,
1457 maplen),
1458 PCI_DMA_FROMDEVICE);
1459 } else {
1460 QPRINTK(qdev, RX_STATUS, DEBUG,
1461 "%d bytes in a single small buffer.\n", length);
1462 sbq_desc = ql_get_curr_sbuf(rx_ring);
1463 skb = sbq_desc->p.skb;
1464 ql_realign_skb(skb, length);
1465 skb_put(skb, length);
1466 pci_unmap_single(qdev->pdev,
1467 pci_unmap_addr(sbq_desc,
1468 mapaddr),
1469 pci_unmap_len(sbq_desc,
1470 maplen),
1471 PCI_DMA_FROMDEVICE);
1472 sbq_desc->p.skb = NULL;
1473 }
1474 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1475 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1476 QPRINTK(qdev, RX_STATUS, DEBUG,
1477 "Header in small, %d bytes in large. Chain large to small!\n", length);
1478 /*
1479 * The data is in a single large buffer. We
1480 * chain it to the header buffer's skb and let
1481 * it rip.
1482 */
1483 lbq_desc = ql_get_curr_lbuf(rx_ring);
1484 pci_unmap_page(qdev->pdev,
1485 pci_unmap_addr(lbq_desc,
1486 mapaddr),
1487 pci_unmap_len(lbq_desc, maplen),
1488 PCI_DMA_FROMDEVICE);
1489 QPRINTK(qdev, RX_STATUS, DEBUG,
1490 "Chaining page to skb.\n");
1491 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1492 0, length);
1493 skb->len += length;
1494 skb->data_len += length;
1495 skb->truesize += length;
1496 lbq_desc->p.lbq_page = NULL;
1497 } else {
1498 /*
1499 * The headers and data are in a single large buffer. We
1500 * copy it to a new skb and let it go. This can happen with
1501 * jumbo mtu on a non-TCP/UDP frame.
1502 */
1503 lbq_desc = ql_get_curr_lbuf(rx_ring);
1504 skb = netdev_alloc_skb(qdev->ndev, length);
1505 if (skb == NULL) {
1506 QPRINTK(qdev, PROBE, DEBUG,
1507 "No skb available, drop the packet.\n");
1508 return NULL;
1509 }
4055c7d4
RM
1510 pci_unmap_page(qdev->pdev,
1511 pci_unmap_addr(lbq_desc,
1512 mapaddr),
1513 pci_unmap_len(lbq_desc, maplen),
1514 PCI_DMA_FROMDEVICE);
c4e84bde
RM
1515 skb_reserve(skb, NET_IP_ALIGN);
1516 QPRINTK(qdev, RX_STATUS, DEBUG,
1517 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1518 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1519 0, length);
1520 skb->len += length;
1521 skb->data_len += length;
1522 skb->truesize += length;
1523 length -= length;
1524 lbq_desc->p.lbq_page = NULL;
1525 __pskb_pull_tail(skb,
1526 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1527 VLAN_ETH_HLEN : ETH_HLEN);
1528 }
1529 } else {
1530 /*
1531 * The data is in a chain of large buffers
1532 * pointed to by a small buffer. We loop
1533 * thru and chain them to the our small header
1534 * buffer's skb.
1535 * frags: There are 18 max frags and our small
1536 * buffer will hold 32 of them. The thing is,
1537 * we'll use 3 max for our 9000 byte jumbo
1538 * frames. If the MTU goes up we could
1539 * eventually be in trouble.
1540 */
1541 int size, offset, i = 0;
2c9a0d41 1542 __le64 *bq, bq_array[8];
c4e84bde
RM
1543 sbq_desc = ql_get_curr_sbuf(rx_ring);
1544 pci_unmap_single(qdev->pdev,
1545 pci_unmap_addr(sbq_desc, mapaddr),
1546 pci_unmap_len(sbq_desc, maplen),
1547 PCI_DMA_FROMDEVICE);
1548 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1549 /*
1550 * This is an non TCP/UDP IP frame, so
1551 * the headers aren't split into a small
1552 * buffer. We have to use the small buffer
1553 * that contains our sg list as our skb to
1554 * send upstairs. Copy the sg list here to
1555 * a local buffer and use it to find the
1556 * pages to chain.
1557 */
1558 QPRINTK(qdev, RX_STATUS, DEBUG,
1559 "%d bytes of headers & data in chain of large.\n", length);
1560 skb = sbq_desc->p.skb;
1561 bq = &bq_array[0];
1562 memcpy(bq, skb->data, sizeof(bq_array));
1563 sbq_desc->p.skb = NULL;
1564 skb_reserve(skb, NET_IP_ALIGN);
1565 } else {
1566 QPRINTK(qdev, RX_STATUS, DEBUG,
1567 "Headers in small, %d bytes of data in chain of large.\n", length);
2c9a0d41 1568 bq = (__le64 *)sbq_desc->p.skb->data;
c4e84bde
RM
1569 }
1570 while (length > 0) {
1571 lbq_desc = ql_get_curr_lbuf(rx_ring);
c4e84bde
RM
1572 pci_unmap_page(qdev->pdev,
1573 pci_unmap_addr(lbq_desc,
1574 mapaddr),
1575 pci_unmap_len(lbq_desc,
1576 maplen),
1577 PCI_DMA_FROMDEVICE);
1578 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1579 offset = 0;
1580
1581 QPRINTK(qdev, RX_STATUS, DEBUG,
1582 "Adding page %d to skb for %d bytes.\n",
1583 i, size);
1584 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1585 offset, size);
1586 skb->len += size;
1587 skb->data_len += size;
1588 skb->truesize += size;
1589 length -= size;
1590 lbq_desc->p.lbq_page = NULL;
1591 bq++;
1592 i++;
1593 }
1594 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1595 VLAN_ETH_HLEN : ETH_HLEN);
1596 }
1597 return skb;
1598}
1599
1600/* Process an inbound completion from an rx ring. */
1601static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1602 struct rx_ring *rx_ring,
1603 struct ib_mac_iocb_rsp *ib_mac_rsp)
1604{
1605 struct net_device *ndev = qdev->ndev;
1606 struct sk_buff *skb = NULL;
22bdd4f5
RM
1607 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1608 IB_MAC_IOCB_RSP_VLAN_MASK)
c4e84bde
RM
1609
1610 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1611
1612 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1613 if (unlikely(!skb)) {
1614 QPRINTK(qdev, RX_STATUS, DEBUG,
1615 "No skb available, drop packet.\n");
1616 return;
1617 }
1618
a32959cd
RM
1619 /* Frame error, so drop the packet. */
1620 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1621 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1622 ib_mac_rsp->flags2);
1623 dev_kfree_skb_any(skb);
1624 return;
1625 }
ec33a491
RM
1626
1627 /* The max framesize filter on this chip is set higher than
1628 * MTU since FCoE uses 2k frames.
1629 */
1630 if (skb->len > ndev->mtu + ETH_HLEN) {
1631 dev_kfree_skb_any(skb);
1632 return;
1633 }
1634
c4e84bde
RM
1635 prefetch(skb->data);
1636 skb->dev = ndev;
1637 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1638 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1639 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1640 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1641 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1642 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1643 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1644 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1645 }
1646 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1647 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1648 }
d555f592 1649
d555f592
RM
1650 skb->protocol = eth_type_trans(skb, ndev);
1651 skb->ip_summed = CHECKSUM_NONE;
1652
1653 /* If rx checksum is on, and there are no
1654 * csum or frame errors.
1655 */
1656 if (qdev->rx_csum &&
d555f592
RM
1657 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1658 /* TCP frame. */
1659 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1660 QPRINTK(qdev, RX_STATUS, DEBUG,
1661 "TCP checksum done!\n");
1662 skb->ip_summed = CHECKSUM_UNNECESSARY;
1663 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1664 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1665 /* Unfragmented ipv4 UDP frame. */
1666 struct iphdr *iph = (struct iphdr *) skb->data;
1667 if (!(iph->frag_off &
1668 cpu_to_be16(IP_MF|IP_OFFSET))) {
1669 skb->ip_summed = CHECKSUM_UNNECESSARY;
1670 QPRINTK(qdev, RX_STATUS, DEBUG,
1671 "TCP checksum done!\n");
1672 }
1673 }
c4e84bde 1674 }
d555f592 1675
bcc90f55
AK
1676 ndev->stats.rx_packets++;
1677 ndev->stats.rx_bytes += skb->len;
b2014ff8 1678 skb_record_rx_queue(skb, rx_ring->cq_id);
22bdd4f5
RM
1679 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1680 if (qdev->vlgrp &&
1681 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1682 (vlan_id != 0))
1683 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1684 vlan_id, skb);
1685 else
1686 napi_gro_receive(&rx_ring->napi, skb);
c4e84bde 1687 } else {
22bdd4f5
RM
1688 if (qdev->vlgrp &&
1689 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1690 (vlan_id != 0))
1691 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1692 else
1693 netif_receive_skb(skb);
c4e84bde 1694 }
c4e84bde
RM
1695}
1696
1697/* Process an outbound completion from an rx ring. */
1698static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1699 struct ob_mac_iocb_rsp *mac_rsp)
1700{
bcc90f55 1701 struct net_device *ndev = qdev->ndev;
c4e84bde
RM
1702 struct tx_ring *tx_ring;
1703 struct tx_ring_desc *tx_ring_desc;
1704
1705 QL_DUMP_OB_MAC_RSP(mac_rsp);
1706 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1707 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1708 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
bcc90f55
AK
1709 ndev->stats.tx_bytes += (tx_ring_desc->skb)->len;
1710 ndev->stats.tx_packets++;
c4e84bde
RM
1711 dev_kfree_skb(tx_ring_desc->skb);
1712 tx_ring_desc->skb = NULL;
1713
1714 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1715 OB_MAC_IOCB_RSP_S |
1716 OB_MAC_IOCB_RSP_L |
1717 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1718 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1719 QPRINTK(qdev, TX_DONE, WARNING,
1720 "Total descriptor length did not match transfer length.\n");
1721 }
1722 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1723 QPRINTK(qdev, TX_DONE, WARNING,
1724 "Frame too short to be legal, not sent.\n");
1725 }
1726 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1727 QPRINTK(qdev, TX_DONE, WARNING,
1728 "Frame too long, but sent anyway.\n");
1729 }
1730 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1731 QPRINTK(qdev, TX_DONE, WARNING,
1732 "PCI backplane error. Frame not sent.\n");
1733 }
1734 }
1735 atomic_inc(&tx_ring->tx_count);
1736}
1737
1738/* Fire up a handler to reset the MPI processor. */
1739void ql_queue_fw_error(struct ql_adapter *qdev)
1740{
6a473308 1741 ql_link_off(qdev);
c4e84bde
RM
1742 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1743}
1744
1745void ql_queue_asic_error(struct ql_adapter *qdev)
1746{
6a473308 1747 ql_link_off(qdev);
c4e84bde 1748 ql_disable_interrupts(qdev);
6497b607
RM
1749 /* Clear adapter up bit to signal the recovery
1750 * process that it shouldn't kill the reset worker
1751 * thread
1752 */
1753 clear_bit(QL_ADAPTER_UP, &qdev->flags);
c4e84bde
RM
1754 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1755}
1756
1757static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1758 struct ib_ae_iocb_rsp *ib_ae_rsp)
1759{
1760 switch (ib_ae_rsp->event) {
1761 case MGMT_ERR_EVENT:
1762 QPRINTK(qdev, RX_ERR, ERR,
1763 "Management Processor Fatal Error.\n");
1764 ql_queue_fw_error(qdev);
1765 return;
1766
1767 case CAM_LOOKUP_ERR_EVENT:
1768 QPRINTK(qdev, LINK, ERR,
1769 "Multiple CAM hits lookup occurred.\n");
1770 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1771 ql_queue_asic_error(qdev);
1772 return;
1773
1774 case SOFT_ECC_ERROR_EVENT:
1775 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1776 ql_queue_asic_error(qdev);
1777 break;
1778
1779 case PCI_ERR_ANON_BUF_RD:
1780 QPRINTK(qdev, RX_ERR, ERR,
1781 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1782 ib_ae_rsp->q_id);
1783 ql_queue_asic_error(qdev);
1784 break;
1785
1786 default:
1787 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1788 ib_ae_rsp->event);
1789 ql_queue_asic_error(qdev);
1790 break;
1791 }
1792}
1793
1794static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1795{
1796 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 1797 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1798 struct ob_mac_iocb_rsp *net_rsp = NULL;
1799 int count = 0;
1800
1e213303 1801 struct tx_ring *tx_ring;
c4e84bde
RM
1802 /* While there are entries in the completion queue. */
1803 while (prod != rx_ring->cnsmr_idx) {
1804
1805 QPRINTK(qdev, RX_STATUS, DEBUG,
1806 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1807 prod, rx_ring->cnsmr_idx);
1808
1809 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1810 rmb();
1811 switch (net_rsp->opcode) {
1812
1813 case OPCODE_OB_MAC_TSO_IOCB:
1814 case OPCODE_OB_MAC_IOCB:
1815 ql_process_mac_tx_intr(qdev, net_rsp);
1816 break;
1817 default:
1818 QPRINTK(qdev, RX_STATUS, DEBUG,
1819 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1820 net_rsp->opcode);
1821 }
1822 count++;
1823 ql_update_cq(rx_ring);
ba7cd3ba 1824 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1825 }
1826 ql_write_cq_idx(rx_ring);
1e213303
RM
1827 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1828 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1829 net_rsp != NULL) {
c4e84bde
RM
1830 if (atomic_read(&tx_ring->queue_stopped) &&
1831 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1832 /*
1833 * The queue got stopped because the tx_ring was full.
1834 * Wake it up, because it's now at least 25% empty.
1835 */
1e213303 1836 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
c4e84bde
RM
1837 }
1838
1839 return count;
1840}
1841
1842static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1843{
1844 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 1845 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1846 struct ql_net_rsp_iocb *net_rsp;
1847 int count = 0;
1848
1849 /* While there are entries in the completion queue. */
1850 while (prod != rx_ring->cnsmr_idx) {
1851
1852 QPRINTK(qdev, RX_STATUS, DEBUG,
1853 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1854 prod, rx_ring->cnsmr_idx);
1855
1856 net_rsp = rx_ring->curr_entry;
1857 rmb();
1858 switch (net_rsp->opcode) {
1859 case OPCODE_IB_MAC_IOCB:
1860 ql_process_mac_rx_intr(qdev, rx_ring,
1861 (struct ib_mac_iocb_rsp *)
1862 net_rsp);
1863 break;
1864
1865 case OPCODE_IB_AE_IOCB:
1866 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1867 net_rsp);
1868 break;
1869 default:
1870 {
1871 QPRINTK(qdev, RX_STATUS, DEBUG,
1872 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1873 net_rsp->opcode);
1874 }
1875 }
1876 count++;
1877 ql_update_cq(rx_ring);
ba7cd3ba 1878 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1879 if (count == budget)
1880 break;
1881 }
1882 ql_update_buffer_queues(qdev, rx_ring);
1883 ql_write_cq_idx(rx_ring);
1884 return count;
1885}
1886
1887static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1888{
1889 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1890 struct ql_adapter *qdev = rx_ring->qdev;
39aa8165
RM
1891 struct rx_ring *trx_ring;
1892 int i, work_done = 0;
1893 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
c4e84bde
RM
1894
1895 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1896 rx_ring->cq_id);
1897
39aa8165
RM
1898 /* Service the TX rings first. They start
1899 * right after the RSS rings. */
1900 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
1901 trx_ring = &qdev->rx_ring[i];
1902 /* If this TX completion ring belongs to this vector and
1903 * it's not empty then service it.
1904 */
1905 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
1906 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
1907 trx_ring->cnsmr_idx)) {
1908 QPRINTK(qdev, INTR, DEBUG,
1909 "%s: Servicing TX completion ring %d.\n",
1910 __func__, trx_ring->cq_id);
1911 ql_clean_outbound_rx_ring(trx_ring);
1912 }
1913 }
1914
1915 /*
1916 * Now service the RSS ring if it's active.
1917 */
1918 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1919 rx_ring->cnsmr_idx) {
1920 QPRINTK(qdev, INTR, DEBUG,
1921 "%s: Servicing RX completion ring %d.\n",
1922 __func__, rx_ring->cq_id);
1923 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1924 }
1925
c4e84bde 1926 if (work_done < budget) {
22bdd4f5 1927 napi_complete(napi);
c4e84bde
RM
1928 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1929 }
1930 return work_done;
1931}
1932
1933static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1934{
1935 struct ql_adapter *qdev = netdev_priv(ndev);
1936
1937 qdev->vlgrp = grp;
1938 if (grp) {
1939 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1940 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1941 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1942 } else {
1943 QPRINTK(qdev, IFUP, DEBUG,
1944 "Turning off VLAN in NIC_RCV_CFG.\n");
1945 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1946 }
1947}
1948
1949static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1950{
1951 struct ql_adapter *qdev = netdev_priv(ndev);
1952 u32 enable_bit = MAC_ADDR_E;
cc288f54 1953 int status;
c4e84bde 1954
cc288f54
RM
1955 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1956 if (status)
1957 return;
c4e84bde
RM
1958 if (ql_set_mac_addr_reg
1959 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1960 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1961 }
cc288f54 1962 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
1963}
1964
1965static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1966{
1967 struct ql_adapter *qdev = netdev_priv(ndev);
1968 u32 enable_bit = 0;
cc288f54
RM
1969 int status;
1970
1971 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1972 if (status)
1973 return;
c4e84bde 1974
c4e84bde
RM
1975 if (ql_set_mac_addr_reg
1976 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1977 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1978 }
cc288f54 1979 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
1980
1981}
1982
c4e84bde
RM
1983/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1984static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1985{
1986 struct rx_ring *rx_ring = dev_id;
288379f0 1987 napi_schedule(&rx_ring->napi);
c4e84bde
RM
1988 return IRQ_HANDLED;
1989}
1990
c4e84bde
RM
1991/* This handles a fatal error, MPI activity, and the default
1992 * rx_ring in an MSI-X multiple vector environment.
1993 * In MSI/Legacy environment it also process the rest of
1994 * the rx_rings.
1995 */
1996static irqreturn_t qlge_isr(int irq, void *dev_id)
1997{
1998 struct rx_ring *rx_ring = dev_id;
1999 struct ql_adapter *qdev = rx_ring->qdev;
2000 struct intr_context *intr_context = &qdev->intr_context[0];
2001 u32 var;
c4e84bde
RM
2002 int work_done = 0;
2003
bb0d215c
RM
2004 spin_lock(&qdev->hw_lock);
2005 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2006 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
2007 spin_unlock(&qdev->hw_lock);
2008 return IRQ_NONE;
c4e84bde 2009 }
bb0d215c 2010 spin_unlock(&qdev->hw_lock);
c4e84bde 2011
bb0d215c 2012 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
2013
2014 /*
2015 * Check for fatal error.
2016 */
2017 if (var & STS_FE) {
2018 ql_queue_asic_error(qdev);
2019 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
2020 var = ql_read32(qdev, ERR_STS);
2021 QPRINTK(qdev, INTR, ERR,
2022 "Resetting chip. Error Status Register = 0x%x\n", var);
2023 return IRQ_HANDLED;
2024 }
2025
2026 /*
2027 * Check MPI processor activity.
2028 */
5ee22a5a
RM
2029 if ((var & STS_PI) &&
2030 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
c4e84bde
RM
2031 /*
2032 * We've got an async event or mailbox completion.
2033 * Handle it and clear the source of the interrupt.
2034 */
2035 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2036 ql_disable_completion_interrupt(qdev, intr_context->intr);
5ee22a5a
RM
2037 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2038 queue_delayed_work_on(smp_processor_id(),
2039 qdev->workqueue, &qdev->mpi_work, 0);
c4e84bde
RM
2040 work_done++;
2041 }
2042
2043 /*
39aa8165
RM
2044 * Get the bit-mask that shows the active queues for this
2045 * pass. Compare it to the queues that this irq services
2046 * and call napi if there's a match.
c4e84bde 2047 */
39aa8165
RM
2048 var = ql_read32(qdev, ISR1);
2049 if (var & intr_context->irq_mask) {
c4e84bde 2050 QPRINTK(qdev, INTR, INFO,
39aa8165
RM
2051 "Waking handler for rx_ring[0].\n");
2052 ql_disable_completion_interrupt(qdev, intr_context->intr);
288379f0 2053 napi_schedule(&rx_ring->napi);
c4e84bde
RM
2054 work_done++;
2055 }
bb0d215c 2056 ql_enable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
2057 return work_done ? IRQ_HANDLED : IRQ_NONE;
2058}
2059
2060static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2061{
2062
2063 if (skb_is_gso(skb)) {
2064 int err;
2065 if (skb_header_cloned(skb)) {
2066 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2067 if (err)
2068 return err;
2069 }
2070
2071 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2072 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2073 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2074 mac_iocb_ptr->total_hdrs_len =
2075 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2076 mac_iocb_ptr->net_trans_offset =
2077 cpu_to_le16(skb_network_offset(skb) |
2078 skb_transport_offset(skb)
2079 << OB_MAC_TRANSPORT_HDR_SHIFT);
2080 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2081 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2082 if (likely(skb->protocol == htons(ETH_P_IP))) {
2083 struct iphdr *iph = ip_hdr(skb);
2084 iph->check = 0;
2085 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2086 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2087 iph->daddr, 0,
2088 IPPROTO_TCP,
2089 0);
2090 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2091 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2092 tcp_hdr(skb)->check =
2093 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2094 &ipv6_hdr(skb)->daddr,
2095 0, IPPROTO_TCP, 0);
2096 }
2097 return 1;
2098 }
2099 return 0;
2100}
2101
2102static void ql_hw_csum_setup(struct sk_buff *skb,
2103 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2104{
2105 int len;
2106 struct iphdr *iph = ip_hdr(skb);
fd2df4f7 2107 __sum16 *check;
c4e84bde
RM
2108 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2109 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2110 mac_iocb_ptr->net_trans_offset =
2111 cpu_to_le16(skb_network_offset(skb) |
2112 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2113
2114 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2115 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2116 if (likely(iph->protocol == IPPROTO_TCP)) {
2117 check = &(tcp_hdr(skb)->check);
2118 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2119 mac_iocb_ptr->total_hdrs_len =
2120 cpu_to_le16(skb_transport_offset(skb) +
2121 (tcp_hdr(skb)->doff << 2));
2122 } else {
2123 check = &(udp_hdr(skb)->check);
2124 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2125 mac_iocb_ptr->total_hdrs_len =
2126 cpu_to_le16(skb_transport_offset(skb) +
2127 sizeof(struct udphdr));
2128 }
2129 *check = ~csum_tcpudp_magic(iph->saddr,
2130 iph->daddr, len, iph->protocol, 0);
2131}
2132
61357325 2133static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
c4e84bde
RM
2134{
2135 struct tx_ring_desc *tx_ring_desc;
2136 struct ob_mac_iocb_req *mac_iocb_ptr;
2137 struct ql_adapter *qdev = netdev_priv(ndev);
2138 int tso;
2139 struct tx_ring *tx_ring;
1e213303 2140 u32 tx_ring_idx = (u32) skb->queue_mapping;
c4e84bde
RM
2141
2142 tx_ring = &qdev->tx_ring[tx_ring_idx];
2143
74c50b4b
RM
2144 if (skb_padto(skb, ETH_ZLEN))
2145 return NETDEV_TX_OK;
2146
c4e84bde
RM
2147 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2148 QPRINTK(qdev, TX_QUEUED, INFO,
2149 "%s: shutting down tx queue %d du to lack of resources.\n",
2150 __func__, tx_ring_idx);
1e213303 2151 netif_stop_subqueue(ndev, tx_ring->wq_id);
c4e84bde
RM
2152 atomic_inc(&tx_ring->queue_stopped);
2153 return NETDEV_TX_BUSY;
2154 }
2155 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2156 mac_iocb_ptr = tx_ring_desc->queue_entry;
e332471c 2157 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
c4e84bde
RM
2158
2159 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2160 mac_iocb_ptr->tid = tx_ring_desc->index;
2161 /* We use the upper 32-bits to store the tx queue for this IO.
2162 * When we get the completion we can use it to establish the context.
2163 */
2164 mac_iocb_ptr->txq_idx = tx_ring_idx;
2165 tx_ring_desc->skb = skb;
2166
2167 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2168
2169 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2170 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2171 vlan_tx_tag_get(skb));
2172 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2173 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2174 }
2175 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2176 if (tso < 0) {
2177 dev_kfree_skb_any(skb);
2178 return NETDEV_TX_OK;
2179 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2180 ql_hw_csum_setup(skb,
2181 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2182 }
0d979f74
RM
2183 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2184 NETDEV_TX_OK) {
2185 QPRINTK(qdev, TX_QUEUED, ERR,
2186 "Could not map the segments.\n");
2187 return NETDEV_TX_BUSY;
2188 }
c4e84bde
RM
2189 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2190 tx_ring->prod_idx++;
2191 if (tx_ring->prod_idx == tx_ring->wq_len)
2192 tx_ring->prod_idx = 0;
2193 wmb();
2194
2195 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
c4e84bde
RM
2196 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2197 tx_ring->prod_idx, skb->len);
2198
2199 atomic_dec(&tx_ring->tx_count);
2200 return NETDEV_TX_OK;
2201}
2202
2203static void ql_free_shadow_space(struct ql_adapter *qdev)
2204{
2205 if (qdev->rx_ring_shadow_reg_area) {
2206 pci_free_consistent(qdev->pdev,
2207 PAGE_SIZE,
2208 qdev->rx_ring_shadow_reg_area,
2209 qdev->rx_ring_shadow_reg_dma);
2210 qdev->rx_ring_shadow_reg_area = NULL;
2211 }
2212 if (qdev->tx_ring_shadow_reg_area) {
2213 pci_free_consistent(qdev->pdev,
2214 PAGE_SIZE,
2215 qdev->tx_ring_shadow_reg_area,
2216 qdev->tx_ring_shadow_reg_dma);
2217 qdev->tx_ring_shadow_reg_area = NULL;
2218 }
2219}
2220
2221static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2222{
2223 qdev->rx_ring_shadow_reg_area =
2224 pci_alloc_consistent(qdev->pdev,
2225 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2226 if (qdev->rx_ring_shadow_reg_area == NULL) {
2227 QPRINTK(qdev, IFUP, ERR,
2228 "Allocation of RX shadow space failed.\n");
2229 return -ENOMEM;
2230 }
b25215d0 2231 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
c4e84bde
RM
2232 qdev->tx_ring_shadow_reg_area =
2233 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2234 &qdev->tx_ring_shadow_reg_dma);
2235 if (qdev->tx_ring_shadow_reg_area == NULL) {
2236 QPRINTK(qdev, IFUP, ERR,
2237 "Allocation of TX shadow space failed.\n");
2238 goto err_wqp_sh_area;
2239 }
b25215d0 2240 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
c4e84bde
RM
2241 return 0;
2242
2243err_wqp_sh_area:
2244 pci_free_consistent(qdev->pdev,
2245 PAGE_SIZE,
2246 qdev->rx_ring_shadow_reg_area,
2247 qdev->rx_ring_shadow_reg_dma);
2248 return -ENOMEM;
2249}
2250
2251static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2252{
2253 struct tx_ring_desc *tx_ring_desc;
2254 int i;
2255 struct ob_mac_iocb_req *mac_iocb_ptr;
2256
2257 mac_iocb_ptr = tx_ring->wq_base;
2258 tx_ring_desc = tx_ring->q;
2259 for (i = 0; i < tx_ring->wq_len; i++) {
2260 tx_ring_desc->index = i;
2261 tx_ring_desc->skb = NULL;
2262 tx_ring_desc->queue_entry = mac_iocb_ptr;
2263 mac_iocb_ptr++;
2264 tx_ring_desc++;
2265 }
2266 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2267 atomic_set(&tx_ring->queue_stopped, 0);
2268}
2269
2270static void ql_free_tx_resources(struct ql_adapter *qdev,
2271 struct tx_ring *tx_ring)
2272{
2273 if (tx_ring->wq_base) {
2274 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2275 tx_ring->wq_base, tx_ring->wq_base_dma);
2276 tx_ring->wq_base = NULL;
2277 }
2278 kfree(tx_ring->q);
2279 tx_ring->q = NULL;
2280}
2281
2282static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2283 struct tx_ring *tx_ring)
2284{
2285 tx_ring->wq_base =
2286 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2287 &tx_ring->wq_base_dma);
2288
2289 if ((tx_ring->wq_base == NULL)
88c55e3c 2290 || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
c4e84bde
RM
2291 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2292 return -ENOMEM;
2293 }
2294 tx_ring->q =
2295 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2296 if (tx_ring->q == NULL)
2297 goto err;
2298
2299 return 0;
2300err:
2301 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2302 tx_ring->wq_base, tx_ring->wq_base_dma);
2303 return -ENOMEM;
2304}
2305
8668ae92 2306static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde
RM
2307{
2308 int i;
2309 struct bq_desc *lbq_desc;
2310
2311 for (i = 0; i < rx_ring->lbq_len; i++) {
2312 lbq_desc = &rx_ring->lbq[i];
2313 if (lbq_desc->p.lbq_page) {
2314 pci_unmap_page(qdev->pdev,
2315 pci_unmap_addr(lbq_desc, mapaddr),
2316 pci_unmap_len(lbq_desc, maplen),
2317 PCI_DMA_FROMDEVICE);
2318
2319 put_page(lbq_desc->p.lbq_page);
2320 lbq_desc->p.lbq_page = NULL;
2321 }
c4e84bde
RM
2322 }
2323}
2324
8668ae92 2325static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde
RM
2326{
2327 int i;
2328 struct bq_desc *sbq_desc;
2329
2330 for (i = 0; i < rx_ring->sbq_len; i++) {
2331 sbq_desc = &rx_ring->sbq[i];
2332 if (sbq_desc == NULL) {
2333 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2334 return;
2335 }
2336 if (sbq_desc->p.skb) {
2337 pci_unmap_single(qdev->pdev,
2338 pci_unmap_addr(sbq_desc, mapaddr),
2339 pci_unmap_len(sbq_desc, maplen),
2340 PCI_DMA_FROMDEVICE);
2341 dev_kfree_skb(sbq_desc->p.skb);
2342 sbq_desc->p.skb = NULL;
2343 }
c4e84bde
RM
2344 }
2345}
2346
4545a3f2
RM
2347/* Free all large and small rx buffers associated
2348 * with the completion queues for this device.
2349 */
2350static void ql_free_rx_buffers(struct ql_adapter *qdev)
2351{
2352 int i;
2353 struct rx_ring *rx_ring;
2354
2355 for (i = 0; i < qdev->rx_ring_count; i++) {
2356 rx_ring = &qdev->rx_ring[i];
2357 if (rx_ring->lbq)
2358 ql_free_lbq_buffers(qdev, rx_ring);
2359 if (rx_ring->sbq)
2360 ql_free_sbq_buffers(qdev, rx_ring);
2361 }
2362}
2363
2364static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2365{
2366 struct rx_ring *rx_ring;
2367 int i;
2368
2369 for (i = 0; i < qdev->rx_ring_count; i++) {
2370 rx_ring = &qdev->rx_ring[i];
2371 if (rx_ring->type != TX_Q)
2372 ql_update_buffer_queues(qdev, rx_ring);
2373 }
2374}
2375
2376static void ql_init_lbq_ring(struct ql_adapter *qdev,
2377 struct rx_ring *rx_ring)
2378{
2379 int i;
2380 struct bq_desc *lbq_desc;
2381 __le64 *bq = rx_ring->lbq_base;
2382
2383 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2384 for (i = 0; i < rx_ring->lbq_len; i++) {
2385 lbq_desc = &rx_ring->lbq[i];
2386 memset(lbq_desc, 0, sizeof(*lbq_desc));
2387 lbq_desc->index = i;
2388 lbq_desc->addr = bq;
2389 bq++;
2390 }
2391}
2392
2393static void ql_init_sbq_ring(struct ql_adapter *qdev,
c4e84bde
RM
2394 struct rx_ring *rx_ring)
2395{
2396 int i;
2397 struct bq_desc *sbq_desc;
2c9a0d41 2398 __le64 *bq = rx_ring->sbq_base;
c4e84bde 2399
4545a3f2 2400 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
c4e84bde
RM
2401 for (i = 0; i < rx_ring->sbq_len; i++) {
2402 sbq_desc = &rx_ring->sbq[i];
4545a3f2 2403 memset(sbq_desc, 0, sizeof(*sbq_desc));
c4e84bde 2404 sbq_desc->index = i;
2c9a0d41 2405 sbq_desc->addr = bq;
c4e84bde
RM
2406 bq++;
2407 }
c4e84bde
RM
2408}
2409
2410static void ql_free_rx_resources(struct ql_adapter *qdev,
2411 struct rx_ring *rx_ring)
2412{
c4e84bde
RM
2413 /* Free the small buffer queue. */
2414 if (rx_ring->sbq_base) {
2415 pci_free_consistent(qdev->pdev,
2416 rx_ring->sbq_size,
2417 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2418 rx_ring->sbq_base = NULL;
2419 }
2420
2421 /* Free the small buffer queue control blocks. */
2422 kfree(rx_ring->sbq);
2423 rx_ring->sbq = NULL;
2424
2425 /* Free the large buffer queue. */
2426 if (rx_ring->lbq_base) {
2427 pci_free_consistent(qdev->pdev,
2428 rx_ring->lbq_size,
2429 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2430 rx_ring->lbq_base = NULL;
2431 }
2432
2433 /* Free the large buffer queue control blocks. */
2434 kfree(rx_ring->lbq);
2435 rx_ring->lbq = NULL;
2436
2437 /* Free the rx queue. */
2438 if (rx_ring->cq_base) {
2439 pci_free_consistent(qdev->pdev,
2440 rx_ring->cq_size,
2441 rx_ring->cq_base, rx_ring->cq_base_dma);
2442 rx_ring->cq_base = NULL;
2443 }
2444}
2445
2446/* Allocate queues and buffers for this completions queue based
2447 * on the values in the parameter structure. */
2448static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2449 struct rx_ring *rx_ring)
2450{
2451
2452 /*
2453 * Allocate the completion queue for this rx_ring.
2454 */
2455 rx_ring->cq_base =
2456 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2457 &rx_ring->cq_base_dma);
2458
2459 if (rx_ring->cq_base == NULL) {
2460 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2461 return -ENOMEM;
2462 }
2463
2464 if (rx_ring->sbq_len) {
2465 /*
2466 * Allocate small buffer queue.
2467 */
2468 rx_ring->sbq_base =
2469 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2470 &rx_ring->sbq_base_dma);
2471
2472 if (rx_ring->sbq_base == NULL) {
2473 QPRINTK(qdev, IFUP, ERR,
2474 "Small buffer queue allocation failed.\n");
2475 goto err_mem;
2476 }
2477
2478 /*
2479 * Allocate small buffer queue control blocks.
2480 */
2481 rx_ring->sbq =
2482 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2483 GFP_KERNEL);
2484 if (rx_ring->sbq == NULL) {
2485 QPRINTK(qdev, IFUP, ERR,
2486 "Small buffer queue control block allocation failed.\n");
2487 goto err_mem;
2488 }
2489
4545a3f2 2490 ql_init_sbq_ring(qdev, rx_ring);
c4e84bde
RM
2491 }
2492
2493 if (rx_ring->lbq_len) {
2494 /*
2495 * Allocate large buffer queue.
2496 */
2497 rx_ring->lbq_base =
2498 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2499 &rx_ring->lbq_base_dma);
2500
2501 if (rx_ring->lbq_base == NULL) {
2502 QPRINTK(qdev, IFUP, ERR,
2503 "Large buffer queue allocation failed.\n");
2504 goto err_mem;
2505 }
2506 /*
2507 * Allocate large buffer queue control blocks.
2508 */
2509 rx_ring->lbq =
2510 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2511 GFP_KERNEL);
2512 if (rx_ring->lbq == NULL) {
2513 QPRINTK(qdev, IFUP, ERR,
2514 "Large buffer queue control block allocation failed.\n");
2515 goto err_mem;
2516 }
2517
4545a3f2 2518 ql_init_lbq_ring(qdev, rx_ring);
c4e84bde
RM
2519 }
2520
2521 return 0;
2522
2523err_mem:
2524 ql_free_rx_resources(qdev, rx_ring);
2525 return -ENOMEM;
2526}
2527
2528static void ql_tx_ring_clean(struct ql_adapter *qdev)
2529{
2530 struct tx_ring *tx_ring;
2531 struct tx_ring_desc *tx_ring_desc;
2532 int i, j;
2533
2534 /*
2535 * Loop through all queues and free
2536 * any resources.
2537 */
2538 for (j = 0; j < qdev->tx_ring_count; j++) {
2539 tx_ring = &qdev->tx_ring[j];
2540 for (i = 0; i < tx_ring->wq_len; i++) {
2541 tx_ring_desc = &tx_ring->q[i];
2542 if (tx_ring_desc && tx_ring_desc->skb) {
2543 QPRINTK(qdev, IFDOWN, ERR,
2544 "Freeing lost SKB %p, from queue %d, index %d.\n",
2545 tx_ring_desc->skb, j,
2546 tx_ring_desc->index);
2547 ql_unmap_send(qdev, tx_ring_desc,
2548 tx_ring_desc->map_cnt);
2549 dev_kfree_skb(tx_ring_desc->skb);
2550 tx_ring_desc->skb = NULL;
2551 }
2552 }
2553 }
2554}
2555
c4e84bde
RM
2556static void ql_free_mem_resources(struct ql_adapter *qdev)
2557{
2558 int i;
2559
2560 for (i = 0; i < qdev->tx_ring_count; i++)
2561 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2562 for (i = 0; i < qdev->rx_ring_count; i++)
2563 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2564 ql_free_shadow_space(qdev);
2565}
2566
2567static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2568{
2569 int i;
2570
2571 /* Allocate space for our shadow registers and such. */
2572 if (ql_alloc_shadow_space(qdev))
2573 return -ENOMEM;
2574
2575 for (i = 0; i < qdev->rx_ring_count; i++) {
2576 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2577 QPRINTK(qdev, IFUP, ERR,
2578 "RX resource allocation failed.\n");
2579 goto err_mem;
2580 }
2581 }
2582 /* Allocate tx queue resources */
2583 for (i = 0; i < qdev->tx_ring_count; i++) {
2584 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2585 QPRINTK(qdev, IFUP, ERR,
2586 "TX resource allocation failed.\n");
2587 goto err_mem;
2588 }
2589 }
2590 return 0;
2591
2592err_mem:
2593 ql_free_mem_resources(qdev);
2594 return -ENOMEM;
2595}
2596
2597/* Set up the rx ring control block and pass it to the chip.
2598 * The control block is defined as
2599 * "Completion Queue Initialization Control Block", or cqicb.
2600 */
2601static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2602{
2603 struct cqicb *cqicb = &rx_ring->cqicb;
2604 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
b8facca0 2605 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde 2606 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
b8facca0 2607 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde
RM
2608 void __iomem *doorbell_area =
2609 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2610 int err = 0;
2611 u16 bq_len;
d4a4aba6 2612 u64 tmp;
b8facca0
RM
2613 __le64 *base_indirect_ptr;
2614 int page_entries;
c4e84bde
RM
2615
2616 /* Set up the shadow registers for this ring. */
2617 rx_ring->prod_idx_sh_reg = shadow_reg;
2618 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2619 shadow_reg += sizeof(u64);
2620 shadow_reg_dma += sizeof(u64);
2621 rx_ring->lbq_base_indirect = shadow_reg;
2622 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
b8facca0
RM
2623 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2624 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
c4e84bde
RM
2625 rx_ring->sbq_base_indirect = shadow_reg;
2626 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2627
2628 /* PCI doorbell mem area + 0x00 for consumer index register */
8668ae92 2629 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
2630 rx_ring->cnsmr_idx = 0;
2631 rx_ring->curr_entry = rx_ring->cq_base;
2632
2633 /* PCI doorbell mem area + 0x04 for valid register */
2634 rx_ring->valid_db_reg = doorbell_area + 0x04;
2635
2636 /* PCI doorbell mem area + 0x18 for large buffer consumer */
8668ae92 2637 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
c4e84bde
RM
2638
2639 /* PCI doorbell mem area + 0x1c */
8668ae92 2640 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
c4e84bde
RM
2641
2642 memset((void *)cqicb, 0, sizeof(struct cqicb));
2643 cqicb->msix_vect = rx_ring->irq;
2644
459caf5a
RM
2645 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2646 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
c4e84bde 2647
97345524 2648 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
c4e84bde 2649
97345524 2650 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
c4e84bde
RM
2651
2652 /*
2653 * Set up the control block load flags.
2654 */
2655 cqicb->flags = FLAGS_LC | /* Load queue base address */
2656 FLAGS_LV | /* Load MSI-X vector */
2657 FLAGS_LI; /* Load irq delay values */
2658 if (rx_ring->lbq_len) {
2659 cqicb->flags |= FLAGS_LL; /* Load lbq values */
a419aef8 2660 tmp = (u64)rx_ring->lbq_base_dma;
b8facca0
RM
2661 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
2662 page_entries = 0;
2663 do {
2664 *base_indirect_ptr = cpu_to_le64(tmp);
2665 tmp += DB_PAGE_SIZE;
2666 base_indirect_ptr++;
2667 page_entries++;
2668 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
97345524
RM
2669 cqicb->lbq_addr =
2670 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
459caf5a
RM
2671 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2672 (u16) rx_ring->lbq_buf_size;
2673 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2674 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2675 (u16) rx_ring->lbq_len;
c4e84bde 2676 cqicb->lbq_len = cpu_to_le16(bq_len);
4545a3f2 2677 rx_ring->lbq_prod_idx = 0;
c4e84bde 2678 rx_ring->lbq_curr_idx = 0;
4545a3f2
RM
2679 rx_ring->lbq_clean_idx = 0;
2680 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
c4e84bde
RM
2681 }
2682 if (rx_ring->sbq_len) {
2683 cqicb->flags |= FLAGS_LS; /* Load sbq values */
a419aef8 2684 tmp = (u64)rx_ring->sbq_base_dma;
b8facca0
RM
2685 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
2686 page_entries = 0;
2687 do {
2688 *base_indirect_ptr = cpu_to_le64(tmp);
2689 tmp += DB_PAGE_SIZE;
2690 base_indirect_ptr++;
2691 page_entries++;
2692 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
97345524
RM
2693 cqicb->sbq_addr =
2694 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
c4e84bde 2695 cqicb->sbq_buf_size =
d4a4aba6 2696 cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
459caf5a
RM
2697 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2698 (u16) rx_ring->sbq_len;
c4e84bde 2699 cqicb->sbq_len = cpu_to_le16(bq_len);
4545a3f2 2700 rx_ring->sbq_prod_idx = 0;
c4e84bde 2701 rx_ring->sbq_curr_idx = 0;
4545a3f2
RM
2702 rx_ring->sbq_clean_idx = 0;
2703 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
c4e84bde
RM
2704 }
2705 switch (rx_ring->type) {
2706 case TX_Q:
c4e84bde
RM
2707 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2708 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2709 break;
c4e84bde
RM
2710 case RX_Q:
2711 /* Inbound completion handling rx_rings run in
2712 * separate NAPI contexts.
2713 */
2714 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2715 64);
2716 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2717 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2718 break;
2719 default:
2720 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2721 rx_ring->type);
2722 }
4974097a 2723 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
c4e84bde
RM
2724 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2725 CFG_LCQ, rx_ring->cq_id);
2726 if (err) {
2727 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2728 return err;
2729 }
c4e84bde
RM
2730 return err;
2731}
2732
2733static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2734{
2735 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2736 void __iomem *doorbell_area =
2737 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2738 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2739 (tx_ring->wq_id * sizeof(u64));
2740 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2741 (tx_ring->wq_id * sizeof(u64));
2742 int err = 0;
2743
2744 /*
2745 * Assign doorbell registers for this tx_ring.
2746 */
2747 /* TX PCI doorbell mem area for tx producer index */
8668ae92 2748 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
2749 tx_ring->prod_idx = 0;
2750 /* TX PCI doorbell mem area + 0x04 */
2751 tx_ring->valid_db_reg = doorbell_area + 0x04;
2752
2753 /*
2754 * Assign shadow registers for this tx_ring.
2755 */
2756 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2757 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2758
2759 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2760 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2761 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2762 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2763 wqicb->rid = 0;
97345524 2764 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
c4e84bde 2765
97345524 2766 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
c4e84bde
RM
2767
2768 ql_init_tx_ring(qdev, tx_ring);
2769
e332471c 2770 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
c4e84bde
RM
2771 (u16) tx_ring->wq_id);
2772 if (err) {
2773 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2774 return err;
2775 }
4974097a 2776 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
c4e84bde
RM
2777 return err;
2778}
2779
2780static void ql_disable_msix(struct ql_adapter *qdev)
2781{
2782 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2783 pci_disable_msix(qdev->pdev);
2784 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2785 kfree(qdev->msi_x_entry);
2786 qdev->msi_x_entry = NULL;
2787 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2788 pci_disable_msi(qdev->pdev);
2789 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2790 }
2791}
2792
a4ab6137
RM
2793/* We start by trying to get the number of vectors
2794 * stored in qdev->intr_count. If we don't get that
2795 * many then we reduce the count and try again.
2796 */
c4e84bde
RM
2797static void ql_enable_msix(struct ql_adapter *qdev)
2798{
a4ab6137 2799 int i, err;
c4e84bde 2800
c4e84bde
RM
2801 /* Get the MSIX vectors. */
2802 if (irq_type == MSIX_IRQ) {
2803 /* Try to alloc space for the msix struct,
2804 * if it fails then go to MSI/legacy.
2805 */
a4ab6137 2806 qdev->msi_x_entry = kcalloc(qdev->intr_count,
c4e84bde
RM
2807 sizeof(struct msix_entry),
2808 GFP_KERNEL);
2809 if (!qdev->msi_x_entry) {
2810 irq_type = MSI_IRQ;
2811 goto msi;
2812 }
2813
a4ab6137 2814 for (i = 0; i < qdev->intr_count; i++)
c4e84bde
RM
2815 qdev->msi_x_entry[i].entry = i;
2816
a4ab6137
RM
2817 /* Loop to get our vectors. We start with
2818 * what we want and settle for what we get.
2819 */
2820 do {
2821 err = pci_enable_msix(qdev->pdev,
2822 qdev->msi_x_entry, qdev->intr_count);
2823 if (err > 0)
2824 qdev->intr_count = err;
2825 } while (err > 0);
2826
2827 if (err < 0) {
c4e84bde
RM
2828 kfree(qdev->msi_x_entry);
2829 qdev->msi_x_entry = NULL;
2830 QPRINTK(qdev, IFUP, WARNING,
2831 "MSI-X Enable failed, trying MSI.\n");
a4ab6137 2832 qdev->intr_count = 1;
c4e84bde 2833 irq_type = MSI_IRQ;
a4ab6137
RM
2834 } else if (err == 0) {
2835 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2836 QPRINTK(qdev, IFUP, INFO,
2837 "MSI-X Enabled, got %d vectors.\n",
2838 qdev->intr_count);
2839 return;
c4e84bde
RM
2840 }
2841 }
2842msi:
a4ab6137 2843 qdev->intr_count = 1;
c4e84bde
RM
2844 if (irq_type == MSI_IRQ) {
2845 if (!pci_enable_msi(qdev->pdev)) {
2846 set_bit(QL_MSI_ENABLED, &qdev->flags);
2847 QPRINTK(qdev, IFUP, INFO,
2848 "Running with MSI interrupts.\n");
2849 return;
2850 }
2851 }
2852 irq_type = LEG_IRQ;
c4e84bde
RM
2853 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2854}
2855
39aa8165
RM
2856/* Each vector services 1 RSS ring and and 1 or more
2857 * TX completion rings. This function loops through
2858 * the TX completion rings and assigns the vector that
2859 * will service it. An example would be if there are
2860 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2861 * This would mean that vector 0 would service RSS ring 0
2862 * and TX competion rings 0,1,2 and 3. Vector 1 would
2863 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2864 */
2865static void ql_set_tx_vect(struct ql_adapter *qdev)
2866{
2867 int i, j, vect;
2868 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2869
2870 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2871 /* Assign irq vectors to TX rx_rings.*/
2872 for (vect = 0, j = 0, i = qdev->rss_ring_count;
2873 i < qdev->rx_ring_count; i++) {
2874 if (j == tx_rings_per_vector) {
2875 vect++;
2876 j = 0;
2877 }
2878 qdev->rx_ring[i].irq = vect;
2879 j++;
2880 }
2881 } else {
2882 /* For single vector all rings have an irq
2883 * of zero.
2884 */
2885 for (i = 0; i < qdev->rx_ring_count; i++)
2886 qdev->rx_ring[i].irq = 0;
2887 }
2888}
2889
2890/* Set the interrupt mask for this vector. Each vector
2891 * will service 1 RSS ring and 1 or more TX completion
2892 * rings. This function sets up a bit mask per vector
2893 * that indicates which rings it services.
2894 */
2895static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
2896{
2897 int j, vect = ctx->intr;
2898 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2899
2900 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2901 /* Add the RSS ring serviced by this vector
2902 * to the mask.
2903 */
2904 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
2905 /* Add the TX ring(s) serviced by this vector
2906 * to the mask. */
2907 for (j = 0; j < tx_rings_per_vector; j++) {
2908 ctx->irq_mask |=
2909 (1 << qdev->rx_ring[qdev->rss_ring_count +
2910 (vect * tx_rings_per_vector) + j].cq_id);
2911 }
2912 } else {
2913 /* For single vector we just shift each queue's
2914 * ID into the mask.
2915 */
2916 for (j = 0; j < qdev->rx_ring_count; j++)
2917 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
2918 }
2919}
2920
c4e84bde
RM
2921/*
2922 * Here we build the intr_context structures based on
2923 * our rx_ring count and intr vector count.
2924 * The intr_context structure is used to hook each vector
2925 * to possibly different handlers.
2926 */
2927static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2928{
2929 int i = 0;
2930 struct intr_context *intr_context = &qdev->intr_context[0];
2931
c4e84bde
RM
2932 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2933 /* Each rx_ring has it's
2934 * own intr_context since we have separate
2935 * vectors for each queue.
c4e84bde
RM
2936 */
2937 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2938 qdev->rx_ring[i].irq = i;
2939 intr_context->intr = i;
2940 intr_context->qdev = qdev;
39aa8165
RM
2941 /* Set up this vector's bit-mask that indicates
2942 * which queues it services.
2943 */
2944 ql_set_irq_mask(qdev, intr_context);
c4e84bde
RM
2945 /*
2946 * We set up each vectors enable/disable/read bits so
2947 * there's no bit/mask calculations in the critical path.
2948 */
2949 intr_context->intr_en_mask =
2950 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2951 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2952 | i;
2953 intr_context->intr_dis_mask =
2954 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2955 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2956 INTR_EN_IHD | i;
2957 intr_context->intr_read_mask =
2958 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2959 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2960 i;
39aa8165
RM
2961 if (i == 0) {
2962 /* The first vector/queue handles
2963 * broadcast/multicast, fatal errors,
2964 * and firmware events. This in addition
2965 * to normal inbound NAPI processing.
c4e84bde 2966 */
39aa8165 2967 intr_context->handler = qlge_isr;
b2014ff8
RM
2968 sprintf(intr_context->name, "%s-rx-%d",
2969 qdev->ndev->name, i);
2970 } else {
c4e84bde 2971 /*
39aa8165 2972 * Inbound queues handle unicast frames only.
c4e84bde 2973 */
39aa8165
RM
2974 intr_context->handler = qlge_msix_rx_isr;
2975 sprintf(intr_context->name, "%s-rx-%d",
c4e84bde 2976 qdev->ndev->name, i);
c4e84bde
RM
2977 }
2978 }
2979 } else {
2980 /*
2981 * All rx_rings use the same intr_context since
2982 * there is only one vector.
2983 */
2984 intr_context->intr = 0;
2985 intr_context->qdev = qdev;
2986 /*
2987 * We set up each vectors enable/disable/read bits so
2988 * there's no bit/mask calculations in the critical path.
2989 */
2990 intr_context->intr_en_mask =
2991 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2992 intr_context->intr_dis_mask =
2993 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2994 INTR_EN_TYPE_DISABLE;
2995 intr_context->intr_read_mask =
2996 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2997 /*
2998 * Single interrupt means one handler for all rings.
2999 */
3000 intr_context->handler = qlge_isr;
3001 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
39aa8165
RM
3002 /* Set up this vector's bit-mask that indicates
3003 * which queues it services. In this case there is
3004 * a single vector so it will service all RSS and
3005 * TX completion rings.
3006 */
3007 ql_set_irq_mask(qdev, intr_context);
c4e84bde 3008 }
39aa8165
RM
3009 /* Tell the TX completion rings which MSIx vector
3010 * they will be using.
3011 */
3012 ql_set_tx_vect(qdev);
c4e84bde
RM
3013}
3014
3015static void ql_free_irq(struct ql_adapter *qdev)
3016{
3017 int i;
3018 struct intr_context *intr_context = &qdev->intr_context[0];
3019
3020 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3021 if (intr_context->hooked) {
3022 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3023 free_irq(qdev->msi_x_entry[i].vector,
3024 &qdev->rx_ring[i]);
4974097a 3025 QPRINTK(qdev, IFDOWN, DEBUG,
c4e84bde
RM
3026 "freeing msix interrupt %d.\n", i);
3027 } else {
3028 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
4974097a 3029 QPRINTK(qdev, IFDOWN, DEBUG,
c4e84bde
RM
3030 "freeing msi interrupt %d.\n", i);
3031 }
3032 }
3033 }
3034 ql_disable_msix(qdev);
3035}
3036
3037static int ql_request_irq(struct ql_adapter *qdev)
3038{
3039 int i;
3040 int status = 0;
3041 struct pci_dev *pdev = qdev->pdev;
3042 struct intr_context *intr_context = &qdev->intr_context[0];
3043
3044 ql_resolve_queues_to_irqs(qdev);
3045
3046 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3047 atomic_set(&intr_context->irq_cnt, 0);
3048 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3049 status = request_irq(qdev->msi_x_entry[i].vector,
3050 intr_context->handler,
3051 0,
3052 intr_context->name,
3053 &qdev->rx_ring[i]);
3054 if (status) {
3055 QPRINTK(qdev, IFUP, ERR,
3056 "Failed request for MSIX interrupt %d.\n",
3057 i);
3058 goto err_irq;
3059 } else {
4974097a 3060 QPRINTK(qdev, IFUP, DEBUG,
c4e84bde
RM
3061 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3062 i,
3063 qdev->rx_ring[i].type ==
3064 DEFAULT_Q ? "DEFAULT_Q" : "",
3065 qdev->rx_ring[i].type ==
3066 TX_Q ? "TX_Q" : "",
3067 qdev->rx_ring[i].type ==
3068 RX_Q ? "RX_Q" : "", intr_context->name);
3069 }
3070 } else {
3071 QPRINTK(qdev, IFUP, DEBUG,
3072 "trying msi or legacy interrupts.\n");
3073 QPRINTK(qdev, IFUP, DEBUG,
3074 "%s: irq = %d.\n", __func__, pdev->irq);
3075 QPRINTK(qdev, IFUP, DEBUG,
3076 "%s: context->name = %s.\n", __func__,
3077 intr_context->name);
3078 QPRINTK(qdev, IFUP, DEBUG,
3079 "%s: dev_id = 0x%p.\n", __func__,
3080 &qdev->rx_ring[0]);
3081 status =
3082 request_irq(pdev->irq, qlge_isr,
3083 test_bit(QL_MSI_ENABLED,
3084 &qdev->
3085 flags) ? 0 : IRQF_SHARED,
3086 intr_context->name, &qdev->rx_ring[0]);
3087 if (status)
3088 goto err_irq;
3089
3090 QPRINTK(qdev, IFUP, ERR,
3091 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3092 i,
3093 qdev->rx_ring[0].type ==
3094 DEFAULT_Q ? "DEFAULT_Q" : "",
3095 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3096 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3097 intr_context->name);
3098 }
3099 intr_context->hooked = 1;
3100 }
3101 return status;
3102err_irq:
3103 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3104 ql_free_irq(qdev);
3105 return status;
3106}
3107
3108static int ql_start_rss(struct ql_adapter *qdev)
3109{
541ae28c
RM
3110 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3111 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3112 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3113 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3114 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3115 0xbe, 0xac, 0x01, 0xfa};
c4e84bde
RM
3116 struct ricb *ricb = &qdev->ricb;
3117 int status = 0;
3118 int i;
3119 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3120
e332471c 3121 memset((void *)ricb, 0, sizeof(*ricb));
c4e84bde 3122
b2014ff8 3123 ricb->base_cq = RSS_L4K;
c4e84bde 3124 ricb->flags =
541ae28c
RM
3125 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3126 ricb->mask = cpu_to_le16((u16)(0x3ff));
c4e84bde
RM
3127
3128 /*
3129 * Fill out the Indirection Table.
3130 */
541ae28c
RM
3131 for (i = 0; i < 1024; i++)
3132 hash_id[i] = (i & (qdev->rss_ring_count - 1));
c4e84bde 3133
541ae28c
RM
3134 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3135 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
c4e84bde 3136
4974097a 3137 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
c4e84bde 3138
e332471c 3139 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
c4e84bde
RM
3140 if (status) {
3141 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3142 return status;
3143 }
4974097a 3144 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
c4e84bde
RM
3145 return status;
3146}
3147
a5f59dc9 3148static int ql_clear_routing_entries(struct ql_adapter *qdev)
c4e84bde 3149{
a5f59dc9 3150 int i, status = 0;
c4e84bde 3151
8587ea35
RM
3152 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3153 if (status)
3154 return status;
c4e84bde
RM
3155 /* Clear all the entries in the routing table. */
3156 for (i = 0; i < 16; i++) {
3157 status = ql_set_routing_reg(qdev, i, 0, 0);
3158 if (status) {
3159 QPRINTK(qdev, IFUP, ERR,
a5f59dc9
RM
3160 "Failed to init routing register for CAM "
3161 "packets.\n");
3162 break;
c4e84bde
RM
3163 }
3164 }
a5f59dc9
RM
3165 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3166 return status;
3167}
3168
3169/* Initialize the frame-to-queue routing. */
3170static int ql_route_initialize(struct ql_adapter *qdev)
3171{
3172 int status = 0;
3173
fd21cf52
RM
3174 /* Clear all the entries in the routing table. */
3175 status = ql_clear_routing_entries(qdev);
a5f59dc9
RM
3176 if (status)
3177 return status;
3178
fd21cf52 3179 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
a5f59dc9 3180 if (status)
fd21cf52 3181 return status;
c4e84bde
RM
3182
3183 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3184 if (status) {
3185 QPRINTK(qdev, IFUP, ERR,
3186 "Failed to init routing register for error packets.\n");
8587ea35 3187 goto exit;
c4e84bde
RM
3188 }
3189 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3190 if (status) {
3191 QPRINTK(qdev, IFUP, ERR,
3192 "Failed to init routing register for broadcast packets.\n");
8587ea35 3193 goto exit;
c4e84bde
RM
3194 }
3195 /* If we have more than one inbound queue, then turn on RSS in the
3196 * routing block.
3197 */
3198 if (qdev->rss_ring_count > 1) {
3199 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3200 RT_IDX_RSS_MATCH, 1);
3201 if (status) {
3202 QPRINTK(qdev, IFUP, ERR,
3203 "Failed to init routing register for MATCH RSS packets.\n");
8587ea35 3204 goto exit;
c4e84bde
RM
3205 }
3206 }
3207
3208 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3209 RT_IDX_CAM_HIT, 1);
8587ea35 3210 if (status)
c4e84bde
RM
3211 QPRINTK(qdev, IFUP, ERR,
3212 "Failed to init routing register for CAM packets.\n");
8587ea35
RM
3213exit:
3214 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
3215 return status;
3216}
3217
2ee1e272 3218int ql_cam_route_initialize(struct ql_adapter *qdev)
bb58b5b6 3219{
7fab3bfe 3220 int status, set;
bb58b5b6 3221
7fab3bfe
RM
3222 /* If check if the link is up and use to
3223 * determine if we are setting or clearing
3224 * the MAC address in the CAM.
3225 */
3226 set = ql_read32(qdev, STS);
3227 set &= qdev->port_link_up;
3228 status = ql_set_mac_addr(qdev, set);
bb58b5b6
RM
3229 if (status) {
3230 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3231 return status;
3232 }
3233
3234 status = ql_route_initialize(qdev);
3235 if (status)
3236 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3237
3238 return status;
3239}
3240
c4e84bde
RM
3241static int ql_adapter_initialize(struct ql_adapter *qdev)
3242{
3243 u32 value, mask;
3244 int i;
3245 int status = 0;
3246
3247 /*
3248 * Set up the System register to halt on errors.
3249 */
3250 value = SYS_EFE | SYS_FAE;
3251 mask = value << 16;
3252 ql_write32(qdev, SYS, mask | value);
3253
c9cf0a04
RM
3254 /* Set the default queue, and VLAN behavior. */
3255 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3256 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
c4e84bde
RM
3257 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3258
3259 /* Set the MPI interrupt to enabled. */
3260 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3261
3262 /* Enable the function, set pagesize, enable error checking. */
3263 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3264 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3265
3266 /* Set/clear header splitting. */
3267 mask = FSC_VM_PAGESIZE_MASK |
3268 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3269 ql_write32(qdev, FSC, mask | value);
3270
3271 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3272 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3273
a3b71939
RM
3274 /* Set RX packet routing to use port/pci function on which the
3275 * packet arrived on in addition to usual frame routing.
3276 * This is helpful on bonding where both interfaces can have
3277 * the same MAC address.
3278 */
3279 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3280
c4e84bde
RM
3281 /* Start up the rx queues. */
3282 for (i = 0; i < qdev->rx_ring_count; i++) {
3283 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3284 if (status) {
3285 QPRINTK(qdev, IFUP, ERR,
3286 "Failed to start rx ring[%d].\n", i);
3287 return status;
3288 }
3289 }
3290
3291 /* If there is more than one inbound completion queue
3292 * then download a RICB to configure RSS.
3293 */
3294 if (qdev->rss_ring_count > 1) {
3295 status = ql_start_rss(qdev);
3296 if (status) {
3297 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3298 return status;
3299 }
3300 }
3301
3302 /* Start up the tx queues. */
3303 for (i = 0; i < qdev->tx_ring_count; i++) {
3304 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3305 if (status) {
3306 QPRINTK(qdev, IFUP, ERR,
3307 "Failed to start tx ring[%d].\n", i);
3308 return status;
3309 }
3310 }
3311
b0c2aadf
RM
3312 /* Initialize the port and set the max framesize. */
3313 status = qdev->nic_ops->port_initialize(qdev);
3314 if (status) {
3315 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3316 return status;
3317 }
c4e84bde 3318
bb58b5b6
RM
3319 /* Set up the MAC address and frame routing filter. */
3320 status = ql_cam_route_initialize(qdev);
c4e84bde 3321 if (status) {
bb58b5b6
RM
3322 QPRINTK(qdev, IFUP, ERR,
3323 "Failed to init CAM/Routing tables.\n");
c4e84bde
RM
3324 return status;
3325 }
3326
3327 /* Start NAPI for the RSS queues. */
b2014ff8 3328 for (i = 0; i < qdev->rss_ring_count; i++) {
4974097a 3329 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
c4e84bde
RM
3330 i);
3331 napi_enable(&qdev->rx_ring[i].napi);
3332 }
3333
3334 return status;
3335}
3336
3337/* Issue soft reset to chip. */
3338static int ql_adapter_reset(struct ql_adapter *qdev)
3339{
3340 u32 value;
c4e84bde 3341 int status = 0;
a5f59dc9 3342 unsigned long end_jiffies;
c4e84bde 3343
a5f59dc9
RM
3344 /* Clear all the entries in the routing table. */
3345 status = ql_clear_routing_entries(qdev);
3346 if (status) {
3347 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3348 return status;
3349 }
3350
3351 end_jiffies = jiffies +
3352 max((unsigned long)1, usecs_to_jiffies(30));
84087f4d
RM
3353
3354 /* Stop management traffic. */
3355 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3356
3357 /* Wait for the NIC and MGMNT FIFOs to empty. */
3358 ql_wait_fifo_empty(qdev);
3359
c4e84bde 3360 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
a75ee7f1 3361
c4e84bde
RM
3362 do {
3363 value = ql_read32(qdev, RST_FO);
3364 if ((value & RST_FO_FR) == 0)
3365 break;
a75ee7f1
RM
3366 cpu_relax();
3367 } while (time_before(jiffies, end_jiffies));
c4e84bde 3368
c4e84bde 3369 if (value & RST_FO_FR) {
c4e84bde 3370 QPRINTK(qdev, IFDOWN, ERR,
3ac49a1c 3371 "ETIMEDOUT!!! errored out of resetting the chip!\n");
a75ee7f1 3372 status = -ETIMEDOUT;
c4e84bde
RM
3373 }
3374
84087f4d
RM
3375 /* Resume management traffic. */
3376 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
c4e84bde
RM
3377 return status;
3378}
3379
3380static void ql_display_dev_info(struct net_device *ndev)
3381{
3382 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3383
3384 QPRINTK(qdev, PROBE, INFO,
e4552f51 3385 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
c4e84bde
RM
3386 "XG Roll = %d, XG Rev = %d.\n",
3387 qdev->func,
e4552f51 3388 qdev->port,
c4e84bde
RM
3389 qdev->chip_rev_id & 0x0000000f,
3390 qdev->chip_rev_id >> 4 & 0x0000000f,
3391 qdev->chip_rev_id >> 8 & 0x0000000f,
3392 qdev->chip_rev_id >> 12 & 0x0000000f);
7c510e4b 3393 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
c4e84bde
RM
3394}
3395
3396static int ql_adapter_down(struct ql_adapter *qdev)
3397{
c4e84bde 3398 int i, status = 0;
c4e84bde 3399
6a473308 3400 ql_link_off(qdev);
c4e84bde 3401
6497b607
RM
3402 /* Don't kill the reset worker thread if we
3403 * are in the process of recovery.
3404 */
3405 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3406 cancel_delayed_work_sync(&qdev->asic_reset_work);
c4e84bde
RM
3407 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3408 cancel_delayed_work_sync(&qdev->mpi_work);
2ee1e272 3409 cancel_delayed_work_sync(&qdev->mpi_idc_work);
bcc2cb3b 3410 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
c4e84bde 3411
39aa8165
RM
3412 for (i = 0; i < qdev->rss_ring_count; i++)
3413 napi_disable(&qdev->rx_ring[i].napi);
c4e84bde
RM
3414
3415 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3416
3417 ql_disable_interrupts(qdev);
3418
3419 ql_tx_ring_clean(qdev);
3420
6b318cb3
RM
3421 /* Call netif_napi_del() from common point.
3422 */
b2014ff8 3423 for (i = 0; i < qdev->rss_ring_count; i++)
6b318cb3
RM
3424 netif_napi_del(&qdev->rx_ring[i].napi);
3425
4545a3f2 3426 ql_free_rx_buffers(qdev);
2d6a5e95 3427
c4e84bde
RM
3428 status = ql_adapter_reset(qdev);
3429 if (status)
3430 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3431 qdev->func);
c4e84bde
RM
3432 return status;
3433}
3434
3435static int ql_adapter_up(struct ql_adapter *qdev)
3436{
3437 int err = 0;
3438
c4e84bde
RM
3439 err = ql_adapter_initialize(qdev);
3440 if (err) {
3441 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
c4e84bde
RM
3442 goto err_init;
3443 }
c4e84bde 3444 set_bit(QL_ADAPTER_UP, &qdev->flags);
4545a3f2 3445 ql_alloc_rx_buffers(qdev);
8b007de1
RM
3446 /* If the port is initialized and the
3447 * link is up the turn on the carrier.
3448 */
3449 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3450 (ql_read32(qdev, STS) & qdev->port_link_up))
6a473308 3451 ql_link_on(qdev);
c4e84bde
RM
3452 ql_enable_interrupts(qdev);
3453 ql_enable_all_completion_interrupts(qdev);
1e213303 3454 netif_tx_start_all_queues(qdev->ndev);
c4e84bde
RM
3455
3456 return 0;
3457err_init:
3458 ql_adapter_reset(qdev);
3459 return err;
3460}
3461
c4e84bde
RM
3462static void ql_release_adapter_resources(struct ql_adapter *qdev)
3463{
3464 ql_free_mem_resources(qdev);
3465 ql_free_irq(qdev);
3466}
3467
3468static int ql_get_adapter_resources(struct ql_adapter *qdev)
3469{
3470 int status = 0;
3471
3472 if (ql_alloc_mem_resources(qdev)) {
3473 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3474 return -ENOMEM;
3475 }
3476 status = ql_request_irq(qdev);
c4e84bde
RM
3477 return status;
3478}
3479
3480static int qlge_close(struct net_device *ndev)
3481{
3482 struct ql_adapter *qdev = netdev_priv(ndev);
3483
3484 /*
3485 * Wait for device to recover from a reset.
3486 * (Rarely happens, but possible.)
3487 */
3488 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3489 msleep(1);
3490 ql_adapter_down(qdev);
3491 ql_release_adapter_resources(qdev);
c4e84bde
RM
3492 return 0;
3493}
3494
3495static int ql_configure_rings(struct ql_adapter *qdev)
3496{
3497 int i;
3498 struct rx_ring *rx_ring;
3499 struct tx_ring *tx_ring;
a4ab6137
RM
3500 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3501
3502 /* In a perfect world we have one RSS ring for each CPU
3503 * and each has it's own vector. To do that we ask for
3504 * cpu_cnt vectors. ql_enable_msix() will adjust the
3505 * vector count to what we actually get. We then
3506 * allocate an RSS ring for each.
3507 * Essentially, we are doing min(cpu_count, msix_vector_count).
c4e84bde 3508 */
a4ab6137
RM
3509 qdev->intr_count = cpu_cnt;
3510 ql_enable_msix(qdev);
3511 /* Adjust the RSS ring count to the actual vector count. */
3512 qdev->rss_ring_count = qdev->intr_count;
c4e84bde 3513 qdev->tx_ring_count = cpu_cnt;
b2014ff8 3514 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
c4e84bde 3515
c4e84bde
RM
3516 for (i = 0; i < qdev->tx_ring_count; i++) {
3517 tx_ring = &qdev->tx_ring[i];
e332471c 3518 memset((void *)tx_ring, 0, sizeof(*tx_ring));
c4e84bde
RM
3519 tx_ring->qdev = qdev;
3520 tx_ring->wq_id = i;
3521 tx_ring->wq_len = qdev->tx_ring_size;
3522 tx_ring->wq_size =
3523 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3524
3525 /*
3526 * The completion queue ID for the tx rings start
39aa8165 3527 * immediately after the rss rings.
c4e84bde 3528 */
39aa8165 3529 tx_ring->cq_id = qdev->rss_ring_count + i;
c4e84bde
RM
3530 }
3531
3532 for (i = 0; i < qdev->rx_ring_count; i++) {
3533 rx_ring = &qdev->rx_ring[i];
e332471c 3534 memset((void *)rx_ring, 0, sizeof(*rx_ring));
c4e84bde
RM
3535 rx_ring->qdev = qdev;
3536 rx_ring->cq_id = i;
3537 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
b2014ff8 3538 if (i < qdev->rss_ring_count) {
39aa8165
RM
3539 /*
3540 * Inbound (RSS) queues.
3541 */
c4e84bde
RM
3542 rx_ring->cq_len = qdev->rx_ring_size;
3543 rx_ring->cq_size =
3544 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3545 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3546 rx_ring->lbq_size =
2c9a0d41 3547 rx_ring->lbq_len * sizeof(__le64);
c4e84bde
RM
3548 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3549 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3550 rx_ring->sbq_size =
2c9a0d41 3551 rx_ring->sbq_len * sizeof(__le64);
c4e84bde 3552 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
b2014ff8
RM
3553 rx_ring->type = RX_Q;
3554 } else {
c4e84bde
RM
3555 /*
3556 * Outbound queue handles outbound completions only.
3557 */
3558 /* outbound cq is same size as tx_ring it services. */
3559 rx_ring->cq_len = qdev->tx_ring_size;
3560 rx_ring->cq_size =
3561 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3562 rx_ring->lbq_len = 0;
3563 rx_ring->lbq_size = 0;
3564 rx_ring->lbq_buf_size = 0;
3565 rx_ring->sbq_len = 0;
3566 rx_ring->sbq_size = 0;
3567 rx_ring->sbq_buf_size = 0;
3568 rx_ring->type = TX_Q;
c4e84bde
RM
3569 }
3570 }
3571 return 0;
3572}
3573
3574static int qlge_open(struct net_device *ndev)
3575{
3576 int err = 0;
3577 struct ql_adapter *qdev = netdev_priv(ndev);
3578
3579 err = ql_configure_rings(qdev);
3580 if (err)
3581 return err;
3582
3583 err = ql_get_adapter_resources(qdev);
3584 if (err)
3585 goto error_up;
3586
3587 err = ql_adapter_up(qdev);
3588 if (err)
3589 goto error_up;
3590
3591 return err;
3592
3593error_up:
3594 ql_release_adapter_resources(qdev);
c4e84bde
RM
3595 return err;
3596}
3597
3598static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3599{
3600 struct ql_adapter *qdev = netdev_priv(ndev);
3601
3602 if (ndev->mtu == 1500 && new_mtu == 9000) {
3603 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
bcc2cb3b
RM
3604 queue_delayed_work(qdev->workqueue,
3605 &qdev->mpi_port_cfg_work, 0);
c4e84bde
RM
3606 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3607 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3608 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3609 (ndev->mtu == 9000 && new_mtu == 9000)) {
3610 return 0;
3611 } else
3612 return -EINVAL;
3613 ndev->mtu = new_mtu;
3614 return 0;
3615}
3616
3617static struct net_device_stats *qlge_get_stats(struct net_device
3618 *ndev)
3619{
bcc90f55 3620 return &ndev->stats;
c4e84bde
RM
3621}
3622
3623static void qlge_set_multicast_list(struct net_device *ndev)
3624{
3625 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3626 struct dev_mc_list *mc_ptr;
cc288f54 3627 int i, status;
c4e84bde 3628
cc288f54
RM
3629 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3630 if (status)
3631 return;
c4e84bde
RM
3632 /*
3633 * Set or clear promiscuous mode if a
3634 * transition is taking place.
3635 */
3636 if (ndev->flags & IFF_PROMISC) {
3637 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3638 if (ql_set_routing_reg
3639 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3640 QPRINTK(qdev, HW, ERR,
3641 "Failed to set promiscous mode.\n");
3642 } else {
3643 set_bit(QL_PROMISCUOUS, &qdev->flags);
3644 }
3645 }
3646 } else {
3647 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3648 if (ql_set_routing_reg
3649 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3650 QPRINTK(qdev, HW, ERR,
3651 "Failed to clear promiscous mode.\n");
3652 } else {
3653 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3654 }
3655 }
3656 }
3657
3658 /*
3659 * Set or clear all multicast mode if a
3660 * transition is taking place.
3661 */
3662 if ((ndev->flags & IFF_ALLMULTI) ||
3663 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3664 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3665 if (ql_set_routing_reg
3666 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3667 QPRINTK(qdev, HW, ERR,
3668 "Failed to set all-multi mode.\n");
3669 } else {
3670 set_bit(QL_ALLMULTI, &qdev->flags);
3671 }
3672 }
3673 } else {
3674 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3675 if (ql_set_routing_reg
3676 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3677 QPRINTK(qdev, HW, ERR,
3678 "Failed to clear all-multi mode.\n");
3679 } else {
3680 clear_bit(QL_ALLMULTI, &qdev->flags);
3681 }
3682 }
3683 }
3684
3685 if (ndev->mc_count) {
cc288f54
RM
3686 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3687 if (status)
3688 goto exit;
c4e84bde
RM
3689 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3690 i++, mc_ptr = mc_ptr->next)
3691 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3692 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3693 QPRINTK(qdev, HW, ERR,
3694 "Failed to loadmulticast address.\n");
cc288f54 3695 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
3696 goto exit;
3697 }
cc288f54 3698 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
3699 if (ql_set_routing_reg
3700 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3701 QPRINTK(qdev, HW, ERR,
3702 "Failed to set multicast match mode.\n");
3703 } else {
3704 set_bit(QL_ALLMULTI, &qdev->flags);
3705 }
3706 }
3707exit:
8587ea35 3708 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
3709}
3710
3711static int qlge_set_mac_address(struct net_device *ndev, void *p)
3712{
3713 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3714 struct sockaddr *addr = p;
cc288f54 3715 int status;
c4e84bde
RM
3716
3717 if (netif_running(ndev))
3718 return -EBUSY;
3719
3720 if (!is_valid_ether_addr(addr->sa_data))
3721 return -EADDRNOTAVAIL;
3722 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3723
cc288f54
RM
3724 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3725 if (status)
3726 return status;
cc288f54
RM
3727 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3728 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
cc288f54
RM
3729 if (status)
3730 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3731 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3732 return status;
c4e84bde
RM
3733}
3734
3735static void qlge_tx_timeout(struct net_device *ndev)
3736{
3737 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
6497b607 3738 ql_queue_asic_error(qdev);
c4e84bde
RM
3739}
3740
3741static void ql_asic_reset_work(struct work_struct *work)
3742{
3743 struct ql_adapter *qdev =
3744 container_of(work, struct ql_adapter, asic_reset_work.work);
db98812f 3745 int status;
f2c0d8df 3746 rtnl_lock();
db98812f
RM
3747 status = ql_adapter_down(qdev);
3748 if (status)
3749 goto error;
3750
3751 status = ql_adapter_up(qdev);
3752 if (status)
3753 goto error;
2cd6dbaa
RM
3754
3755 /* Restore rx mode. */
3756 clear_bit(QL_ALLMULTI, &qdev->flags);
3757 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3758 qlge_set_multicast_list(qdev->ndev);
3759
f2c0d8df 3760 rtnl_unlock();
db98812f
RM
3761 return;
3762error:
3763 QPRINTK(qdev, IFUP, ALERT,
3764 "Driver up/down cycle failed, closing device\n");
f2c0d8df 3765
db98812f
RM
3766 set_bit(QL_ADAPTER_UP, &qdev->flags);
3767 dev_close(qdev->ndev);
3768 rtnl_unlock();
c4e84bde
RM
3769}
3770
b0c2aadf
RM
3771static struct nic_operations qla8012_nic_ops = {
3772 .get_flash = ql_get_8012_flash_params,
3773 .port_initialize = ql_8012_port_initialize,
3774};
3775
cdca8d02
RM
3776static struct nic_operations qla8000_nic_ops = {
3777 .get_flash = ql_get_8000_flash_params,
3778 .port_initialize = ql_8000_port_initialize,
3779};
3780
e4552f51
RM
3781/* Find the pcie function number for the other NIC
3782 * on this chip. Since both NIC functions share a
3783 * common firmware we have the lowest enabled function
3784 * do any common work. Examples would be resetting
3785 * after a fatal firmware error, or doing a firmware
3786 * coredump.
3787 */
3788static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
3789{
3790 int status = 0;
3791 u32 temp;
3792 u32 nic_func1, nic_func2;
3793
3794 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
3795 &temp);
3796 if (status)
3797 return status;
3798
3799 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
3800 MPI_TEST_NIC_FUNC_MASK);
3801 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
3802 MPI_TEST_NIC_FUNC_MASK);
3803
3804 if (qdev->func == nic_func1)
3805 qdev->alt_func = nic_func2;
3806 else if (qdev->func == nic_func2)
3807 qdev->alt_func = nic_func1;
3808 else
3809 status = -EIO;
3810
3811 return status;
3812}
b0c2aadf 3813
e4552f51 3814static int ql_get_board_info(struct ql_adapter *qdev)
c4e84bde 3815{
e4552f51 3816 int status;
c4e84bde
RM
3817 qdev->func =
3818 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
e4552f51
RM
3819 if (qdev->func > 3)
3820 return -EIO;
3821
3822 status = ql_get_alt_pcie_func(qdev);
3823 if (status)
3824 return status;
3825
3826 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
3827 if (qdev->port) {
c4e84bde
RM
3828 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3829 qdev->port_link_up = STS_PL1;
3830 qdev->port_init = STS_PI1;
3831 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3832 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3833 } else {
3834 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3835 qdev->port_link_up = STS_PL0;
3836 qdev->port_init = STS_PI0;
3837 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3838 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3839 }
3840 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
b0c2aadf
RM
3841 qdev->device_id = qdev->pdev->device;
3842 if (qdev->device_id == QLGE_DEVICE_ID_8012)
3843 qdev->nic_ops = &qla8012_nic_ops;
cdca8d02
RM
3844 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
3845 qdev->nic_ops = &qla8000_nic_ops;
e4552f51 3846 return status;
c4e84bde
RM
3847}
3848
3849static void ql_release_all(struct pci_dev *pdev)
3850{
3851 struct net_device *ndev = pci_get_drvdata(pdev);
3852 struct ql_adapter *qdev = netdev_priv(ndev);
3853
3854 if (qdev->workqueue) {
3855 destroy_workqueue(qdev->workqueue);
3856 qdev->workqueue = NULL;
3857 }
39aa8165 3858
c4e84bde 3859 if (qdev->reg_base)
8668ae92 3860 iounmap(qdev->reg_base);
c4e84bde
RM
3861 if (qdev->doorbell_area)
3862 iounmap(qdev->doorbell_area);
3863 pci_release_regions(pdev);
3864 pci_set_drvdata(pdev, NULL);
3865}
3866
3867static int __devinit ql_init_device(struct pci_dev *pdev,
3868 struct net_device *ndev, int cards_found)
3869{
3870 struct ql_adapter *qdev = netdev_priv(ndev);
1d1023d0 3871 int err = 0;
c4e84bde 3872
e332471c 3873 memset((void *)qdev, 0, sizeof(*qdev));
c4e84bde
RM
3874 err = pci_enable_device(pdev);
3875 if (err) {
3876 dev_err(&pdev->dev, "PCI device enable failed.\n");
3877 return err;
3878 }
3879
ebd6e774
RM
3880 qdev->ndev = ndev;
3881 qdev->pdev = pdev;
3882 pci_set_drvdata(pdev, ndev);
c4e84bde 3883
bc9167f3
RM
3884 /* Set PCIe read request size */
3885 err = pcie_set_readrq(pdev, 4096);
3886 if (err) {
3887 dev_err(&pdev->dev, "Set readrq failed.\n");
3888 goto err_out;
3889 }
3890
c4e84bde
RM
3891 err = pci_request_regions(pdev, DRV_NAME);
3892 if (err) {
3893 dev_err(&pdev->dev, "PCI region request failed.\n");
ebd6e774 3894 return err;
c4e84bde
RM
3895 }
3896
3897 pci_set_master(pdev);
6a35528a 3898 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c4e84bde 3899 set_bit(QL_DMA64, &qdev->flags);
6a35528a 3900 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
c4e84bde 3901 } else {
284901a9 3902 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde 3903 if (!err)
284901a9 3904 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde
RM
3905 }
3906
3907 if (err) {
3908 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3909 goto err_out;
3910 }
3911
c4e84bde
RM
3912 qdev->reg_base =
3913 ioremap_nocache(pci_resource_start(pdev, 1),
3914 pci_resource_len(pdev, 1));
3915 if (!qdev->reg_base) {
3916 dev_err(&pdev->dev, "Register mapping failed.\n");
3917 err = -ENOMEM;
3918 goto err_out;
3919 }
3920
3921 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3922 qdev->doorbell_area =
3923 ioremap_nocache(pci_resource_start(pdev, 3),
3924 pci_resource_len(pdev, 3));
3925 if (!qdev->doorbell_area) {
3926 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3927 err = -ENOMEM;
3928 goto err_out;
3929 }
3930
e4552f51
RM
3931 err = ql_get_board_info(qdev);
3932 if (err) {
3933 dev_err(&pdev->dev, "Register access failed.\n");
3934 err = -EIO;
3935 goto err_out;
3936 }
c4e84bde
RM
3937 qdev->msg_enable = netif_msg_init(debug, default_msg);
3938 spin_lock_init(&qdev->hw_lock);
3939 spin_lock_init(&qdev->stats_lock);
3940
3941 /* make sure the EEPROM is good */
b0c2aadf 3942 err = qdev->nic_ops->get_flash(qdev);
c4e84bde
RM
3943 if (err) {
3944 dev_err(&pdev->dev, "Invalid FLASH.\n");
3945 goto err_out;
3946 }
3947
c4e84bde
RM
3948 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3949
3950 /* Set up the default ring sizes. */
3951 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3952 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3953
3954 /* Set up the coalescing parameters. */
3955 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3956 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3957 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3958 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3959
3960 /*
3961 * Set up the operating parameters.
3962 */
3963 qdev->rx_csum = 1;
c4e84bde
RM
3964 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3965 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3966 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3967 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
bcc2cb3b 3968 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
2ee1e272 3969 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
bcc2cb3b 3970 init_completion(&qdev->ide_completion);
c4e84bde
RM
3971
3972 if (!cards_found) {
3973 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3974 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3975 DRV_NAME, DRV_VERSION);
3976 }
3977 return 0;
3978err_out:
3979 ql_release_all(pdev);
3980 pci_disable_device(pdev);
3981 return err;
3982}
3983
25ed7849
SH
3984
3985static const struct net_device_ops qlge_netdev_ops = {
3986 .ndo_open = qlge_open,
3987 .ndo_stop = qlge_close,
3988 .ndo_start_xmit = qlge_send,
3989 .ndo_change_mtu = qlge_change_mtu,
3990 .ndo_get_stats = qlge_get_stats,
3991 .ndo_set_multicast_list = qlge_set_multicast_list,
3992 .ndo_set_mac_address = qlge_set_mac_address,
3993 .ndo_validate_addr = eth_validate_addr,
3994 .ndo_tx_timeout = qlge_tx_timeout,
3995 .ndo_vlan_rx_register = ql_vlan_rx_register,
3996 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid,
3997 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid,
3998};
3999
c4e84bde
RM
4000static int __devinit qlge_probe(struct pci_dev *pdev,
4001 const struct pci_device_id *pci_entry)
4002{
4003 struct net_device *ndev = NULL;
4004 struct ql_adapter *qdev = NULL;
4005 static int cards_found = 0;
4006 int err = 0;
4007
1e213303
RM
4008 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4009 min(MAX_CPUS, (int)num_online_cpus()));
c4e84bde
RM
4010 if (!ndev)
4011 return -ENOMEM;
4012
4013 err = ql_init_device(pdev, ndev, cards_found);
4014 if (err < 0) {
4015 free_netdev(ndev);
4016 return err;
4017 }
4018
4019 qdev = netdev_priv(ndev);
4020 SET_NETDEV_DEV(ndev, &pdev->dev);
4021 ndev->features = (0
4022 | NETIF_F_IP_CSUM
4023 | NETIF_F_SG
4024 | NETIF_F_TSO
4025 | NETIF_F_TSO6
4026 | NETIF_F_TSO_ECN
4027 | NETIF_F_HW_VLAN_TX
4028 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
22bdd4f5 4029 ndev->features |= NETIF_F_GRO;
c4e84bde
RM
4030
4031 if (test_bit(QL_DMA64, &qdev->flags))
4032 ndev->features |= NETIF_F_HIGHDMA;
4033
4034 /*
4035 * Set up net_device structure.
4036 */
4037 ndev->tx_queue_len = qdev->tx_ring_size;
4038 ndev->irq = pdev->irq;
25ed7849
SH
4039
4040 ndev->netdev_ops = &qlge_netdev_ops;
c4e84bde 4041 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
c4e84bde 4042 ndev->watchdog_timeo = 10 * HZ;
25ed7849 4043
c4e84bde
RM
4044 err = register_netdev(ndev);
4045 if (err) {
4046 dev_err(&pdev->dev, "net device registration failed.\n");
4047 ql_release_all(pdev);
4048 pci_disable_device(pdev);
4049 return err;
4050 }
6a473308 4051 ql_link_off(qdev);
c4e84bde
RM
4052 ql_display_dev_info(ndev);
4053 cards_found++;
4054 return 0;
4055}
4056
4057static void __devexit qlge_remove(struct pci_dev *pdev)
4058{
4059 struct net_device *ndev = pci_get_drvdata(pdev);
4060 unregister_netdev(ndev);
4061 ql_release_all(pdev);
4062 pci_disable_device(pdev);
4063 free_netdev(ndev);
4064}
4065
4066/*
4067 * This callback is called by the PCI subsystem whenever
4068 * a PCI bus error is detected.
4069 */
4070static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4071 enum pci_channel_state state)
4072{
4073 struct net_device *ndev = pci_get_drvdata(pdev);
4074 struct ql_adapter *qdev = netdev_priv(ndev);
4075
fbc663ce
DN
4076 netif_device_detach(ndev);
4077
4078 if (state == pci_channel_io_perm_failure)
4079 return PCI_ERS_RESULT_DISCONNECT;
4080
c4e84bde
RM
4081 if (netif_running(ndev))
4082 ql_adapter_down(qdev);
4083
4084 pci_disable_device(pdev);
4085
4086 /* Request a slot reset. */
4087 return PCI_ERS_RESULT_NEED_RESET;
4088}
4089
4090/*
4091 * This callback is called after the PCI buss has been reset.
4092 * Basically, this tries to restart the card from scratch.
4093 * This is a shortened version of the device probe/discovery code,
4094 * it resembles the first-half of the () routine.
4095 */
4096static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4097{
4098 struct net_device *ndev = pci_get_drvdata(pdev);
4099 struct ql_adapter *qdev = netdev_priv(ndev);
4100
4101 if (pci_enable_device(pdev)) {
4102 QPRINTK(qdev, IFUP, ERR,
4103 "Cannot re-enable PCI device after reset.\n");
4104 return PCI_ERS_RESULT_DISCONNECT;
4105 }
4106
4107 pci_set_master(pdev);
4108
4109 netif_carrier_off(ndev);
c4e84bde
RM
4110 ql_adapter_reset(qdev);
4111
4112 /* Make sure the EEPROM is good */
4113 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4114
4115 if (!is_valid_ether_addr(ndev->perm_addr)) {
4116 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
4117 return PCI_ERS_RESULT_DISCONNECT;
4118 }
4119
4120 return PCI_ERS_RESULT_RECOVERED;
4121}
4122
4123static void qlge_io_resume(struct pci_dev *pdev)
4124{
4125 struct net_device *ndev = pci_get_drvdata(pdev);
4126 struct ql_adapter *qdev = netdev_priv(ndev);
4127
4128 pci_set_master(pdev);
4129
4130 if (netif_running(ndev)) {
4131 if (ql_adapter_up(qdev)) {
4132 QPRINTK(qdev, IFUP, ERR,
4133 "Device initialization failed after reset.\n");
4134 return;
4135 }
4136 }
4137
4138 netif_device_attach(ndev);
4139}
4140
4141static struct pci_error_handlers qlge_err_handler = {
4142 .error_detected = qlge_io_error_detected,
4143 .slot_reset = qlge_io_slot_reset,
4144 .resume = qlge_io_resume,
4145};
4146
4147static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4148{
4149 struct net_device *ndev = pci_get_drvdata(pdev);
4150 struct ql_adapter *qdev = netdev_priv(ndev);
6b318cb3 4151 int err;
c4e84bde
RM
4152
4153 netif_device_detach(ndev);
4154
4155 if (netif_running(ndev)) {
4156 err = ql_adapter_down(qdev);
4157 if (!err)
4158 return err;
4159 }
4160
4161 err = pci_save_state(pdev);
4162 if (err)
4163 return err;
4164
4165 pci_disable_device(pdev);
4166
4167 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4168
4169 return 0;
4170}
4171
04da2cf9 4172#ifdef CONFIG_PM
c4e84bde
RM
4173static int qlge_resume(struct pci_dev *pdev)
4174{
4175 struct net_device *ndev = pci_get_drvdata(pdev);
4176 struct ql_adapter *qdev = netdev_priv(ndev);
4177 int err;
4178
4179 pci_set_power_state(pdev, PCI_D0);
4180 pci_restore_state(pdev);
4181 err = pci_enable_device(pdev);
4182 if (err) {
4183 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4184 return err;
4185 }
4186 pci_set_master(pdev);
4187
4188 pci_enable_wake(pdev, PCI_D3hot, 0);
4189 pci_enable_wake(pdev, PCI_D3cold, 0);
4190
4191 if (netif_running(ndev)) {
4192 err = ql_adapter_up(qdev);
4193 if (err)
4194 return err;
4195 }
4196
4197 netif_device_attach(ndev);
4198
4199 return 0;
4200}
04da2cf9 4201#endif /* CONFIG_PM */
c4e84bde
RM
4202
4203static void qlge_shutdown(struct pci_dev *pdev)
4204{
4205 qlge_suspend(pdev, PMSG_SUSPEND);
4206}
4207
4208static struct pci_driver qlge_driver = {
4209 .name = DRV_NAME,
4210 .id_table = qlge_pci_tbl,
4211 .probe = qlge_probe,
4212 .remove = __devexit_p(qlge_remove),
4213#ifdef CONFIG_PM
4214 .suspend = qlge_suspend,
4215 .resume = qlge_resume,
4216#endif
4217 .shutdown = qlge_shutdown,
4218 .err_handler = &qlge_err_handler
4219};
4220
4221static int __init qlge_init_module(void)
4222{
4223 return pci_register_driver(&qlge_driver);
4224}
4225
4226static void __exit qlge_exit(void)
4227{
4228 pci_unregister_driver(&qlge_driver);
4229}
4230
4231module_init(qlge_init_module);
4232module_exit(qlge_exit);