]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/infiniband/hw/amso1100/c2.c
atm: convert clip driver to net_device_ops
[net-next-2.6.git] / drivers / infiniband / hw / amso1100 / c2.c
CommitLineData
f94b533d
TT
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/module.h>
34#include <linux/moduleparam.h>
35#include <linux/pci.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/inetdevice.h>
39#include <linux/delay.h>
40#include <linux/ethtool.h>
41#include <linux/mii.h>
42#include <linux/if_vlan.h>
43#include <linux/crc32.h>
44#include <linux/in.h>
45#include <linux/ip.h>
46#include <linux/tcp.h>
47#include <linux/init.h>
48#include <linux/dma-mapping.h>
49
50#include <asm/io.h>
51#include <asm/irq.h>
52#include <asm/byteorder.h>
53
54#include <rdma/ib_smi.h>
55#include "c2.h"
56#include "c2_provider.h"
57
58MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
59MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");
60MODULE_LICENSE("Dual BSD/GPL");
61MODULE_VERSION(DRV_VERSION);
62
63static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
64 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
65
66static int debug = -1; /* defaults above */
67module_param(debug, int, 0);
68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70static int c2_up(struct net_device *netdev);
71static int c2_down(struct net_device *netdev);
72static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
73static void c2_tx_interrupt(struct net_device *netdev);
74static void c2_rx_interrupt(struct net_device *netdev);
7d12e780 75static irqreturn_t c2_interrupt(int irq, void *dev_id);
f94b533d
TT
76static void c2_tx_timeout(struct net_device *netdev);
77static int c2_change_mtu(struct net_device *netdev, int new_mtu);
78static void c2_reset(struct c2_port *c2_port);
79static struct net_device_stats *c2_get_stats(struct net_device *netdev);
80
81static struct pci_device_id c2_pci_table[] = {
82 { PCI_DEVICE(0x18b8, 0xb001) },
83 { 0 }
84};
85
86MODULE_DEVICE_TABLE(pci, c2_pci_table);
87
88static void c2_print_macaddr(struct net_device *netdev)
89{
90 pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, "
91 "IRQ %u\n", netdev->name,
92 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
93 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
94 netdev->irq);
95}
96
97static void c2_set_rxbufsize(struct c2_port *c2_port)
98{
99 struct net_device *netdev = c2_port->netdev;
100
101 if (netdev->mtu > RX_BUF_SIZE)
102 c2_port->rx_buf_size =
103 netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) +
104 NET_IP_ALIGN;
105 else
106 c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
107}
108
109/*
110 * Allocate TX ring elements and chain them together.
111 * One-to-one association of adapter descriptors with ring elements.
112 */
113static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
114 dma_addr_t base, void __iomem * mmio_txp_ring)
115{
116 struct c2_tx_desc *tx_desc;
117 struct c2_txp_desc __iomem *txp_desc;
118 struct c2_element *elem;
119 int i;
120
121 tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL);
122 if (!tx_ring->start)
123 return -ENOMEM;
124
125 elem = tx_ring->start;
126 tx_desc = vaddr;
127 txp_desc = mmio_txp_ring;
128 for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
129 tx_desc->len = 0;
130 tx_desc->status = 0;
131
132 /* Set TXP_HTXD_UNINIT */
dc544bc9 133 __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
f94b533d
TT
134 (void __iomem *) txp_desc + C2_TXP_ADDR);
135 __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
dc544bc9 136 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
f94b533d
TT
137 (void __iomem *) txp_desc + C2_TXP_FLAGS);
138
139 elem->skb = NULL;
140 elem->ht_desc = tx_desc;
141 elem->hw_desc = txp_desc;
142
143 if (i == tx_ring->count - 1) {
144 elem->next = tx_ring->start;
145 tx_desc->next_offset = base;
146 } else {
147 elem->next = elem + 1;
148 tx_desc->next_offset =
149 base + (i + 1) * sizeof(*tx_desc);
150 }
151 }
152
153 tx_ring->to_use = tx_ring->to_clean = tx_ring->start;
154
155 return 0;
156}
157
158/*
159 * Allocate RX ring elements and chain them together.
160 * One-to-one association of adapter descriptors with ring elements.
161 */
162static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
163 dma_addr_t base, void __iomem * mmio_rxp_ring)
164{
165 struct c2_rx_desc *rx_desc;
166 struct c2_rxp_desc __iomem *rxp_desc;
167 struct c2_element *elem;
168 int i;
169
170 rx_ring->start = kmalloc(sizeof(*elem) * rx_ring->count, GFP_KERNEL);
171 if (!rx_ring->start)
172 return -ENOMEM;
173
174 elem = rx_ring->start;
175 rx_desc = vaddr;
176 rxp_desc = mmio_rxp_ring;
177 for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) {
178 rx_desc->len = 0;
179 rx_desc->status = 0;
180
181 /* Set RXP_HRXD_UNINIT */
dc544bc9 182 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
f94b533d
TT
183 (void __iomem *) rxp_desc + C2_RXP_STATUS);
184 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
185 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
dc544bc9 186 __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
f94b533d 187 (void __iomem *) rxp_desc + C2_RXP_ADDR);
dc544bc9 188 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
f94b533d
TT
189 (void __iomem *) rxp_desc + C2_RXP_FLAGS);
190
191 elem->skb = NULL;
192 elem->ht_desc = rx_desc;
193 elem->hw_desc = rxp_desc;
194
195 if (i == rx_ring->count - 1) {
196 elem->next = rx_ring->start;
197 rx_desc->next_offset = base;
198 } else {
199 elem->next = elem + 1;
200 rx_desc->next_offset =
201 base + (i + 1) * sizeof(*rx_desc);
202 }
203 }
204
205 rx_ring->to_use = rx_ring->to_clean = rx_ring->start;
206
207 return 0;
208}
209
210/* Setup buffer for receiving */
211static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
212{
213 struct c2_dev *c2dev = c2_port->c2dev;
214 struct c2_rx_desc *rx_desc = elem->ht_desc;
215 struct sk_buff *skb;
216 dma_addr_t mapaddr;
217 u32 maplen;
218 struct c2_rxp_hdr *rxp_hdr;
219
220 skb = dev_alloc_skb(c2_port->rx_buf_size);
221 if (unlikely(!skb)) {
222 pr_debug("%s: out of memory for receive\n",
223 c2_port->netdev->name);
224 return -ENOMEM;
225 }
226
227 /* Zero out the rxp hdr in the sk_buff */
228 memset(skb->data, 0, sizeof(*rxp_hdr));
229
230 skb->dev = c2_port->netdev;
231
232 maplen = c2_port->rx_buf_size;
233 mapaddr =
234 pci_map_single(c2dev->pcidev, skb->data, maplen,
235 PCI_DMA_FROMDEVICE);
236
237 /* Set the sk_buff RXP_header to RXP_HRXD_READY */
238 rxp_hdr = (struct c2_rxp_hdr *) skb->data;
239 rxp_hdr->flags = RXP_HRXD_READY;
240
241 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
dc544bc9 242 __raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
f94b533d 243 elem->hw_desc + C2_RXP_LEN);
dc544bc9
RD
244 __raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
245 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
246 elem->hw_desc + C2_RXP_FLAGS);
f94b533d
TT
247
248 elem->skb = skb;
249 elem->mapaddr = mapaddr;
250 elem->maplen = maplen;
251 rx_desc->len = maplen;
252
253 return 0;
254}
255
256/*
257 * Allocate buffers for the Rx ring
258 * For receive: rx_ring.to_clean is next received frame
259 */
260static int c2_rx_fill(struct c2_port *c2_port)
261{
262 struct c2_ring *rx_ring = &c2_port->rx_ring;
263 struct c2_element *elem;
264 int ret = 0;
265
266 elem = rx_ring->start;
267 do {
268 if (c2_rx_alloc(c2_port, elem)) {
269 ret = 1;
270 break;
271 }
272 } while ((elem = elem->next) != rx_ring->start);
273
274 rx_ring->to_clean = rx_ring->start;
275 return ret;
276}
277
278/* Free all buffers in RX ring, assumes receiver stopped */
279static void c2_rx_clean(struct c2_port *c2_port)
280{
281 struct c2_dev *c2dev = c2_port->c2dev;
282 struct c2_ring *rx_ring = &c2_port->rx_ring;
283 struct c2_element *elem;
284 struct c2_rx_desc *rx_desc;
285
286 elem = rx_ring->start;
287 do {
288 rx_desc = elem->ht_desc;
289 rx_desc->len = 0;
290
291 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
292 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
293 __raw_writew(0, elem->hw_desc + C2_RXP_LEN);
dc544bc9 294 __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
f94b533d 295 elem->hw_desc + C2_RXP_ADDR);
dc544bc9 296 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
f94b533d
TT
297 elem->hw_desc + C2_RXP_FLAGS);
298
299 if (elem->skb) {
300 pci_unmap_single(c2dev->pcidev, elem->mapaddr,
301 elem->maplen, PCI_DMA_FROMDEVICE);
302 dev_kfree_skb(elem->skb);
303 elem->skb = NULL;
304 }
305 } while ((elem = elem->next) != rx_ring->start);
306}
307
308static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem)
309{
310 struct c2_tx_desc *tx_desc = elem->ht_desc;
311
312 tx_desc->len = 0;
313
314 pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen,
315 PCI_DMA_TODEVICE);
316
317 if (elem->skb) {
318 dev_kfree_skb_any(elem->skb);
319 elem->skb = NULL;
320 }
321
322 return 0;
323}
324
325/* Free all buffers in TX ring, assumes transmitter stopped */
326static void c2_tx_clean(struct c2_port *c2_port)
327{
328 struct c2_ring *tx_ring = &c2_port->tx_ring;
329 struct c2_element *elem;
330 struct c2_txp_desc txp_htxd;
331 int retry;
332 unsigned long flags;
333
334 spin_lock_irqsave(&c2_port->tx_lock, flags);
335
336 elem = tx_ring->start;
337
338 do {
339 retry = 0;
340 do {
341 txp_htxd.flags =
342 readw(elem->hw_desc + C2_TXP_FLAGS);
343
344 if (txp_htxd.flags == TXP_HTXD_READY) {
345 retry = 1;
346 __raw_writew(0,
347 elem->hw_desc + C2_TXP_LEN);
348 __raw_writeq(0,
349 elem->hw_desc + C2_TXP_ADDR);
dc544bc9 350 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
f94b533d
TT
351 elem->hw_desc + C2_TXP_FLAGS);
352 c2_port->netstats.tx_dropped++;
353 break;
354 } else {
355 __raw_writew(0,
356 elem->hw_desc + C2_TXP_LEN);
dc544bc9 357 __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
f94b533d 358 elem->hw_desc + C2_TXP_ADDR);
dc544bc9 359 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
f94b533d
TT
360 elem->hw_desc + C2_TXP_FLAGS);
361 }
362
363 c2_tx_free(c2_port->c2dev, elem);
364
365 } while ((elem = elem->next) != tx_ring->start);
366 } while (retry);
367
368 c2_port->tx_avail = c2_port->tx_ring.count - 1;
369 c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
370
371 if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
372 netif_wake_queue(c2_port->netdev);
373
374 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
375}
376
377/*
378 * Process transmit descriptors marked 'DONE' by the firmware,
379 * freeing up their unneeded sk_buffs.
380 */
381static void c2_tx_interrupt(struct net_device *netdev)
382{
383 struct c2_port *c2_port = netdev_priv(netdev);
384 struct c2_dev *c2dev = c2_port->c2dev;
385 struct c2_ring *tx_ring = &c2_port->tx_ring;
386 struct c2_element *elem;
387 struct c2_txp_desc txp_htxd;
388
389 spin_lock(&c2_port->tx_lock);
390
391 for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
392 elem = elem->next) {
393 txp_htxd.flags =
dc544bc9 394 be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
f94b533d
TT
395
396 if (txp_htxd.flags != TXP_HTXD_DONE)
397 break;
398
399 if (netif_msg_tx_done(c2_port)) {
400 /* PCI reads are expensive in fast path */
401 txp_htxd.len =
dc544bc9 402 be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
f94b533d
TT
403 pr_debug("%s: tx done slot %3Zu status 0x%x len "
404 "%5u bytes\n",
405 netdev->name, elem - tx_ring->start,
406 txp_htxd.flags, txp_htxd.len);
407 }
408
409 c2_tx_free(c2dev, elem);
410 ++(c2_port->tx_avail);
411 }
412
413 tx_ring->to_clean = elem;
414
415 if (netif_queue_stopped(netdev)
416 && c2_port->tx_avail > MAX_SKB_FRAGS + 1)
417 netif_wake_queue(netdev);
418
419 spin_unlock(&c2_port->tx_lock);
420}
421
422static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
423{
424 struct c2_rx_desc *rx_desc = elem->ht_desc;
425 struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
426
427 if (rxp_hdr->status != RXP_HRXD_OK ||
428 rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {
429 pr_debug("BAD RXP_HRXD\n");
430 pr_debug(" rx_desc : %p\n", rx_desc);
431 pr_debug(" index : %Zu\n",
432 elem - c2_port->rx_ring.start);
433 pr_debug(" len : %u\n", rx_desc->len);
434 pr_debug(" rxp_hdr : %p [PA %p]\n", rxp_hdr,
435 (void *) __pa((unsigned long) rxp_hdr));
436 pr_debug(" flags : 0x%x\n", rxp_hdr->flags);
437 pr_debug(" status: 0x%x\n", rxp_hdr->status);
438 pr_debug(" len : %u\n", rxp_hdr->len);
439 pr_debug(" rsvd : 0x%x\n", rxp_hdr->rsvd);
440 }
441
442 /* Setup the skb for reuse since we're dropping this pkt */
27a884dc
ACM
443 elem->skb->data = elem->skb->head;
444 skb_reset_tail_pointer(elem->skb);
f94b533d
TT
445
446 /* Zero out the rxp hdr in the sk_buff */
447 memset(elem->skb->data, 0, sizeof(*rxp_hdr));
448
449 /* Write the descriptor to the adapter's rx ring */
450 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
451 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
dc544bc9 452 __raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
f94b533d 453 elem->hw_desc + C2_RXP_LEN);
dc544bc9
RD
454 __raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
455 elem->hw_desc + C2_RXP_ADDR);
456 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
457 elem->hw_desc + C2_RXP_FLAGS);
f94b533d
TT
458
459 pr_debug("packet dropped\n");
460 c2_port->netstats.rx_dropped++;
461}
462
463static void c2_rx_interrupt(struct net_device *netdev)
464{
465 struct c2_port *c2_port = netdev_priv(netdev);
466 struct c2_dev *c2dev = c2_port->c2dev;
467 struct c2_ring *rx_ring = &c2_port->rx_ring;
468 struct c2_element *elem;
469 struct c2_rx_desc *rx_desc;
470 struct c2_rxp_hdr *rxp_hdr;
471 struct sk_buff *skb;
472 dma_addr_t mapaddr;
473 u32 maplen, buflen;
474 unsigned long flags;
475
476 spin_lock_irqsave(&c2dev->lock, flags);
477
478 /* Begin where we left off */
479 rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;
480
481 for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
482 elem = elem->next) {
483 rx_desc = elem->ht_desc;
484 mapaddr = elem->mapaddr;
485 maplen = elem->maplen;
486 skb = elem->skb;
487 rxp_hdr = (struct c2_rxp_hdr *) skb->data;
488
489 if (rxp_hdr->flags != RXP_HRXD_DONE)
490 break;
491 buflen = rxp_hdr->len;
492
493 /* Sanity check the RXP header */
494 if (rxp_hdr->status != RXP_HRXD_OK ||
495 buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
496 c2_rx_error(c2_port, elem);
497 continue;
498 }
499
500 /*
501 * Allocate and map a new skb for replenishing the host
502 * RX desc
503 */
504 if (c2_rx_alloc(c2_port, elem)) {
505 c2_rx_error(c2_port, elem);
506 continue;
507 }
508
509 /* Unmap the old skb */
510 pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
511 PCI_DMA_FROMDEVICE);
512
513 prefetch(skb->data);
514
515 /*
516 * Skip past the leading 8 bytes comprising of the
517 * "struct c2_rxp_hdr", prepended by the adapter
518 * to the usual Ethernet header ("struct ethhdr"),
519 * to the start of the raw Ethernet packet.
520 *
521 * Fix up the various fields in the sk_buff before
522 * passing it up to netif_rx(). The transfer size
523 * (in bytes) specified by the adapter len field of
524 * the "struct rxp_hdr_t" does NOT include the
525 * "sizeof(struct c2_rxp_hdr)".
526 */
527 skb->data += sizeof(*rxp_hdr);
27a884dc 528 skb_set_tail_pointer(skb, buflen);
f94b533d 529 skb->len = buflen;
f94b533d
TT
530 skb->protocol = eth_type_trans(skb, netdev);
531
532 netif_rx(skb);
533
534 netdev->last_rx = jiffies;
535 c2_port->netstats.rx_packets++;
536 c2_port->netstats.rx_bytes += buflen;
537 }
538
539 /* Save where we left off */
540 rx_ring->to_clean = elem;
541 c2dev->cur_rx = elem - rx_ring->start;
542 C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
543
544 spin_unlock_irqrestore(&c2dev->lock, flags);
545}
546
547/*
548 * Handle netisr0 TX & RX interrupts.
549 */
7d12e780 550static irqreturn_t c2_interrupt(int irq, void *dev_id)
f94b533d
TT
551{
552 unsigned int netisr0, dmaisr;
553 int handled = 0;
554 struct c2_dev *c2dev = (struct c2_dev *) dev_id;
555
556 /* Process CCILNET interrupts */
557 netisr0 = readl(c2dev->regs + C2_NISR0);
558 if (netisr0) {
559
560 /*
561 * There is an issue with the firmware that always
562 * provides the status of RX for both TX & RX
563 * interrupts. So process both queues here.
564 */
565 c2_rx_interrupt(c2dev->netdev);
566 c2_tx_interrupt(c2dev->netdev);
567
568 /* Clear the interrupt */
569 writel(netisr0, c2dev->regs + C2_NISR0);
570 handled++;
571 }
572
573 /* Process RNIC interrupts */
574 dmaisr = readl(c2dev->regs + C2_DISR);
575 if (dmaisr) {
576 writel(dmaisr, c2dev->regs + C2_DISR);
577 c2_rnic_interrupt(c2dev);
578 handled++;
579 }
580
581 if (handled) {
582 return IRQ_HANDLED;
583 } else {
584 return IRQ_NONE;
585 }
586}
587
588static int c2_up(struct net_device *netdev)
589{
590 struct c2_port *c2_port = netdev_priv(netdev);
591 struct c2_dev *c2dev = c2_port->c2dev;
592 struct c2_element *elem;
593 struct c2_rxp_hdr *rxp_hdr;
594 struct in_device *in_dev;
595 size_t rx_size, tx_size;
596 int ret, i;
597 unsigned int netimr0;
598
599 if (netif_msg_ifup(c2_port))
600 pr_debug("%s: enabling interface\n", netdev->name);
601
602 /* Set the Rx buffer size based on MTU */
603 c2_set_rxbufsize(c2_port);
604
605 /* Allocate DMA'able memory for Tx/Rx host descriptor rings */
606 rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
607 tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
608
609 c2_port->mem_size = tx_size + rx_size;
610 c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,
611 &c2_port->dma);
612 if (c2_port->mem == NULL) {
613 pr_debug("Unable to allocate memory for "
614 "host descriptor rings\n");
615 return -ENOMEM;
616 }
617
618 memset(c2_port->mem, 0, c2_port->mem_size);
619
620 /* Create the Rx host descriptor ring */
621 if ((ret =
622 c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
623 c2dev->mmio_rxp_ring))) {
624 pr_debug("Unable to create RX ring\n");
625 goto bail0;
626 }
627
628 /* Allocate Rx buffers for the host descriptor ring */
629 if (c2_rx_fill(c2_port)) {
630 pr_debug("Unable to fill RX ring\n");
631 goto bail1;
632 }
633
634 /* Create the Tx host descriptor ring */
635 if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
636 c2_port->dma + rx_size,
637 c2dev->mmio_txp_ring))) {
638 pr_debug("Unable to create TX ring\n");
639 goto bail1;
640 }
641
642 /* Set the TX pointer to where we left off */
643 c2_port->tx_avail = c2_port->tx_ring.count - 1;
644 c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
645 c2_port->tx_ring.start + c2dev->cur_tx;
646
647 /* missing: Initialize MAC */
648
649 BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
650
651 /* Reset the adapter, ensures the driver is in sync with the RXP */
652 c2_reset(c2_port);
653
654 /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
655 for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
656 i++, elem++) {
657 rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
658 rxp_hdr->flags = 0;
dc544bc9 659 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
f94b533d
TT
660 elem->hw_desc + C2_RXP_FLAGS);
661 }
662
663 /* Enable network packets */
664 netif_start_queue(netdev);
665
666 /* Enable IRQ */
667 writel(0, c2dev->regs + C2_IDIS);
668 netimr0 = readl(c2dev->regs + C2_NIMR0);
669 netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
670 writel(netimr0, c2dev->regs + C2_NIMR0);
671
672 /* Tell the stack to ignore arp requests for ipaddrs bound to
673 * other interfaces. This is needed to prevent the host stack
674 * from responding to arp requests to the ipaddr bound on the
675 * rdma interface.
676 */
677 in_dev = in_dev_get(netdev);
42f811b8 678 IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
f94b533d
TT
679 in_dev_put(in_dev);
680
681 return 0;
682
683 bail1:
684 c2_rx_clean(c2_port);
685 kfree(c2_port->rx_ring.start);
686
687 bail0:
688 pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
689 c2_port->dma);
690
691 return ret;
692}
693
694static int c2_down(struct net_device *netdev)
695{
696 struct c2_port *c2_port = netdev_priv(netdev);
697 struct c2_dev *c2dev = c2_port->c2dev;
698
699 if (netif_msg_ifdown(c2_port))
700 pr_debug("%s: disabling interface\n",
701 netdev->name);
702
703 /* Wait for all the queued packets to get sent */
704 c2_tx_interrupt(netdev);
705
706 /* Disable network packets */
707 netif_stop_queue(netdev);
708
709 /* Disable IRQs by clearing the interrupt mask */
710 writel(1, c2dev->regs + C2_IDIS);
711 writel(0, c2dev->regs + C2_NIMR0);
712
713 /* missing: Stop transmitter */
714
715 /* missing: Stop receiver */
716
717 /* Reset the adapter, ensures the driver is in sync with the RXP */
718 c2_reset(c2_port);
719
720 /* missing: Turn off LEDs here */
721
722 /* Free all buffers in the host descriptor rings */
723 c2_tx_clean(c2_port);
724 c2_rx_clean(c2_port);
725
726 /* Free the host descriptor rings */
727 kfree(c2_port->rx_ring.start);
728 kfree(c2_port->tx_ring.start);
729 pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
730 c2_port->dma);
731
732 return 0;
733}
734
735static void c2_reset(struct c2_port *c2_port)
736{
737 struct c2_dev *c2dev = c2_port->c2dev;
738 unsigned int cur_rx = c2dev->cur_rx;
739
740 /* Tell the hardware to quiesce */
741 C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI);
742
743 /*
744 * The hardware will reset the C2_PCI_HRX_QUI bit once
745 * the RXP is quiesced. Wait 2 seconds for this.
746 */
747 ssleep(2);
748
749 cur_rx = C2_GET_CUR_RX(c2dev);
750
751 if (cur_rx & C2_PCI_HRX_QUI)
752 pr_debug("c2_reset: failed to quiesce the hardware!\n");
753
754 cur_rx &= ~C2_PCI_HRX_QUI;
755
756 c2dev->cur_rx = cur_rx;
757
758 pr_debug("Current RX: %u\n", c2dev->cur_rx);
759}
760
761static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
762{
763 struct c2_port *c2_port = netdev_priv(netdev);
764 struct c2_dev *c2dev = c2_port->c2dev;
765 struct c2_ring *tx_ring = &c2_port->tx_ring;
766 struct c2_element *elem;
767 dma_addr_t mapaddr;
768 u32 maplen;
769 unsigned long flags;
770 unsigned int i;
771
772 spin_lock_irqsave(&c2_port->tx_lock, flags);
773
774 if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
775 netif_stop_queue(netdev);
776 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
777
778 pr_debug("%s: Tx ring full when queue awake!\n",
779 netdev->name);
780 return NETDEV_TX_BUSY;
781 }
782
783 maplen = skb_headlen(skb);
784 mapaddr =
785 pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);
786
787 elem = tx_ring->to_use;
788 elem->skb = skb;
789 elem->mapaddr = mapaddr;
790 elem->maplen = maplen;
791
792 /* Tell HW to xmit */
dc544bc9
RD
793 __raw_writeq((__force u64) cpu_to_be64(mapaddr),
794 elem->hw_desc + C2_TXP_ADDR);
795 __raw_writew((__force u16) cpu_to_be16(maplen),
796 elem->hw_desc + C2_TXP_LEN);
797 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
798 elem->hw_desc + C2_TXP_FLAGS);
f94b533d
TT
799
800 c2_port->netstats.tx_packets++;
801 c2_port->netstats.tx_bytes += maplen;
802
803 /* Loop thru additional data fragments and queue them */
804 if (skb_shinfo(skb)->nr_frags) {
805 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
806 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
807 maplen = frag->size;
808 mapaddr =
809 pci_map_page(c2dev->pcidev, frag->page,
810 frag->page_offset, maplen,
811 PCI_DMA_TODEVICE);
812
813 elem = elem->next;
814 elem->skb = NULL;
815 elem->mapaddr = mapaddr;
816 elem->maplen = maplen;
817
818 /* Tell HW to xmit */
dc544bc9 819 __raw_writeq((__force u64) cpu_to_be64(mapaddr),
f94b533d 820 elem->hw_desc + C2_TXP_ADDR);
dc544bc9 821 __raw_writew((__force u16) cpu_to_be16(maplen),
f94b533d 822 elem->hw_desc + C2_TXP_LEN);
dc544bc9 823 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
f94b533d
TT
824 elem->hw_desc + C2_TXP_FLAGS);
825
826 c2_port->netstats.tx_packets++;
827 c2_port->netstats.tx_bytes += maplen;
828 }
829 }
830
831 tx_ring->to_use = elem->next;
832 c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
833
834 if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
835 netif_stop_queue(netdev);
836 if (netif_msg_tx_queued(c2_port))
837 pr_debug("%s: transmit queue full\n",
838 netdev->name);
839 }
840
841 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
842
843 netdev->trans_start = jiffies;
844
845 return NETDEV_TX_OK;
846}
847
848static struct net_device_stats *c2_get_stats(struct net_device *netdev)
849{
850 struct c2_port *c2_port = netdev_priv(netdev);
851
852 return &c2_port->netstats;
853}
854
855static void c2_tx_timeout(struct net_device *netdev)
856{
857 struct c2_port *c2_port = netdev_priv(netdev);
858
859 if (netif_msg_timer(c2_port))
860 pr_debug("%s: tx timeout\n", netdev->name);
861
862 c2_tx_clean(c2_port);
863}
864
865static int c2_change_mtu(struct net_device *netdev, int new_mtu)
866{
867 int ret = 0;
868
869 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
870 return -EINVAL;
871
872 netdev->mtu = new_mtu;
873
874 if (netif_running(netdev)) {
875 c2_down(netdev);
876
877 c2_up(netdev);
878 }
879
880 return ret;
881}
882
883/* Initialize network device */
884static struct net_device *c2_devinit(struct c2_dev *c2dev,
885 void __iomem * mmio_addr)
886{
887 struct c2_port *c2_port = NULL;
888 struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
889
890 if (!netdev) {
891 pr_debug("c2_port etherdev alloc failed");
892 return NULL;
893 }
894
f94b533d
TT
895 SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
896
897 netdev->open = c2_up;
898 netdev->stop = c2_down;
899 netdev->hard_start_xmit = c2_xmit_frame;
900 netdev->get_stats = c2_get_stats;
901 netdev->tx_timeout = c2_tx_timeout;
902 netdev->change_mtu = c2_change_mtu;
903 netdev->watchdog_timeo = C2_TX_TIMEOUT;
904 netdev->irq = c2dev->pcidev->irq;
905
906 c2_port = netdev_priv(netdev);
907 c2_port->netdev = netdev;
908 c2_port->c2dev = c2dev;
909 c2_port->msg_enable = netif_msg_init(debug, default_msg);
910 c2_port->tx_ring.count = C2_NUM_TX_DESC;
911 c2_port->rx_ring.count = C2_NUM_RX_DESC;
912
913 spin_lock_init(&c2_port->tx_lock);
914
915 /* Copy our 48-bit ethernet hardware address */
916 memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6);
917
918 /* Validate the MAC address */
919 if (!is_valid_ether_addr(netdev->dev_addr)) {
920 pr_debug("Invalid MAC Address\n");
921 c2_print_macaddr(netdev);
922 free_netdev(netdev);
923 return NULL;
924 }
925
926 c2dev->netdev = netdev;
927
928 return netdev;
929}
930
931static int __devinit c2_probe(struct pci_dev *pcidev,
932 const struct pci_device_id *ent)
933{
934 int ret = 0, i;
935 unsigned long reg0_start, reg0_flags, reg0_len;
936 unsigned long reg2_start, reg2_flags, reg2_len;
937 unsigned long reg4_start, reg4_flags, reg4_len;
938 unsigned kva_map_size;
939 struct net_device *netdev = NULL;
940 struct c2_dev *c2dev = NULL;
941 void __iomem *mmio_regs = NULL;
942
943 printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n",
944 DRV_VERSION);
945
946 /* Enable PCI device */
947 ret = pci_enable_device(pcidev);
948 if (ret) {
949 printk(KERN_ERR PFX "%s: Unable to enable PCI device\n",
950 pci_name(pcidev));
951 goto bail0;
952 }
953
954 reg0_start = pci_resource_start(pcidev, BAR_0);
955 reg0_len = pci_resource_len(pcidev, BAR_0);
956 reg0_flags = pci_resource_flags(pcidev, BAR_0);
957
958 reg2_start = pci_resource_start(pcidev, BAR_2);
959 reg2_len = pci_resource_len(pcidev, BAR_2);
960 reg2_flags = pci_resource_flags(pcidev, BAR_2);
961
962 reg4_start = pci_resource_start(pcidev, BAR_4);
963 reg4_len = pci_resource_len(pcidev, BAR_4);
964 reg4_flags = pci_resource_flags(pcidev, BAR_4);
965
966 pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len);
967 pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len);
968 pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len);
969
970 /* Make sure PCI base addr are MMIO */
971 if (!(reg0_flags & IORESOURCE_MEM) ||
972 !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) {
973 printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
974 ret = -ENODEV;
975 goto bail1;
976 }
977
978 /* Check for weird/broken PCI region reporting */
979 if ((reg0_len < C2_REG0_SIZE) ||
980 (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) {
981 printk(KERN_ERR PFX "Invalid PCI region sizes\n");
982 ret = -ENODEV;
983 goto bail1;
984 }
985
986 /* Reserve PCI I/O and memory resources */
987 ret = pci_request_regions(pcidev, DRV_NAME);
988 if (ret) {
989 printk(KERN_ERR PFX "%s: Unable to request regions\n",
990 pci_name(pcidev));
991 goto bail1;
992 }
993
994 if ((sizeof(dma_addr_t) > 4)) {
995 ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
996 if (ret < 0) {
997 printk(KERN_ERR PFX "64b DMA configuration failed\n");
998 goto bail2;
999 }
1000 } else {
1001 ret = pci_set_dma_mask(pcidev, DMA_32BIT_MASK);
1002 if (ret < 0) {
1003 printk(KERN_ERR PFX "32b DMA configuration failed\n");
1004 goto bail2;
1005 }
1006 }
1007
1008 /* Enables bus-mastering on the device */
1009 pci_set_master(pcidev);
1010
1011 /* Remap the adapter PCI registers in BAR4 */
1012 mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1013 sizeof(struct c2_adapter_pci_regs));
4b290439 1014 if (!mmio_regs) {
f94b533d
TT
1015 printk(KERN_ERR PFX
1016 "Unable to remap adapter PCI registers in BAR4\n");
1017 ret = -EIO;
1018 goto bail2;
1019 }
1020
1021 /* Validate PCI regs magic */
1022 for (i = 0; i < sizeof(c2_magic); i++) {
1023 if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) {
1024 printk(KERN_ERR PFX "Downlevel Firmware boot loader "
1025 "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
1026 "utility to update your boot loader\n",
1027 i + 1, sizeof(c2_magic),
1028 readb(mmio_regs + C2_REGS_MAGIC + i),
1029 c2_magic[i]);
1030 printk(KERN_ERR PFX "Adapter not claimed\n");
1031 iounmap(mmio_regs);
1032 ret = -EIO;
1033 goto bail2;
1034 }
1035 }
1036
1037 /* Validate the adapter version */
dc544bc9 1038 if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
f94b533d
TT
1039 printk(KERN_ERR PFX "Version mismatch "
1040 "[fw=%u, c2=%u], Adapter not claimed\n",
dc544bc9 1041 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
f94b533d
TT
1042 C2_VERSION);
1043 ret = -EINVAL;
1044 iounmap(mmio_regs);
1045 goto bail2;
1046 }
1047
1048 /* Validate the adapter IVN */
dc544bc9 1049 if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
f94b533d
TT
1050 printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
1051 "the OpenIB device support kit. "
1052 "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
dc544bc9
RD
1053 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
1054 C2_IVN);
f94b533d
TT
1055 ret = -EINVAL;
1056 iounmap(mmio_regs);
1057 goto bail2;
1058 }
1059
1060 /* Allocate hardware structure */
1061 c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev));
1062 if (!c2dev) {
1063 printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n",
1064 pci_name(pcidev));
1065 ret = -ENOMEM;
1066 iounmap(mmio_regs);
1067 goto bail2;
1068 }
1069
1070 memset(c2dev, 0, sizeof(*c2dev));
1071 spin_lock_init(&c2dev->lock);
1072 c2dev->pcidev = pcidev;
1073 c2dev->cur_tx = 0;
1074
1075 /* Get the last RX index */
1076 c2dev->cur_rx =
dc544bc9 1077 (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
f94b533d
TT
1078 0xffffc000) / sizeof(struct c2_rxp_desc);
1079
1080 /* Request an interrupt line for the driver */
38515e90 1081 ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev);
f94b533d
TT
1082 if (ret) {
1083 printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
1084 pci_name(pcidev), pcidev->irq);
1085 iounmap(mmio_regs);
1086 goto bail3;
1087 }
1088
1089 /* Set driver specific data */
1090 pci_set_drvdata(pcidev, c2dev);
1091
1092 /* Initialize network device */
1093 if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
1094 iounmap(mmio_regs);
1095 goto bail4;
1096 }
1097
1098 /* Save off the actual size prior to unmapping mmio_regs */
dc544bc9 1099 kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
f94b533d
TT
1100
1101 /* Unmap the adapter PCI registers in BAR4 */
1102 iounmap(mmio_regs);
1103
1104 /* Register network device */
1105 ret = register_netdev(netdev);
1106 if (ret) {
1107 printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n",
1108 ret);
1109 goto bail5;
1110 }
1111
1112 /* Disable network packets */
1113 netif_stop_queue(netdev);
1114
1115 /* Remap the adapter HRXDQ PA space to kernel VA space */
1116 c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
1117 C2_RXP_HRXDQ_SIZE);
4b290439 1118 if (!c2dev->mmio_rxp_ring) {
f94b533d
TT
1119 printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
1120 ret = -EIO;
1121 goto bail6;
1122 }
1123
1124 /* Remap the adapter HTXDQ PA space to kernel VA space */
1125 c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
1126 C2_TXP_HTXDQ_SIZE);
4b290439 1127 if (!c2dev->mmio_txp_ring) {
f94b533d
TT
1128 printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
1129 ret = -EIO;
1130 goto bail7;
1131 }
1132
1133 /* Save off the current RX index in the last 4 bytes of the TXP Ring */
1134 C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
1135
1136 /* Remap the PCI registers in adapter BAR0 to kernel VA space */
1137 c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
4b290439 1138 if (!c2dev->regs) {
f94b533d
TT
1139 printk(KERN_ERR PFX "Unable to remap BAR0\n");
1140 ret = -EIO;
1141 goto bail8;
1142 }
1143
1144 /* Remap the PCI registers in adapter BAR4 to kernel VA space */
1145 c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
1146 c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1147 kva_map_size);
4b290439 1148 if (!c2dev->kva) {
f94b533d
TT
1149 printk(KERN_ERR PFX "Unable to remap BAR4\n");
1150 ret = -EIO;
1151 goto bail9;
1152 }
1153
1154 /* Print out the MAC address */
1155 c2_print_macaddr(netdev);
1156
1157 ret = c2_rnic_init(c2dev);
1158 if (ret) {
1159 printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret);
1160 goto bail10;
1161 }
1162
2ffcab6a
TT
1163 if (c2_register_device(c2dev))
1164 goto bail10;
f94b533d
TT
1165
1166 return 0;
1167
1168 bail10:
1169 iounmap(c2dev->kva);
1170
1171 bail9:
1172 iounmap(c2dev->regs);
1173
1174 bail8:
1175 iounmap(c2dev->mmio_txp_ring);
1176
1177 bail7:
1178 iounmap(c2dev->mmio_rxp_ring);
1179
1180 bail6:
1181 unregister_netdev(netdev);
1182
1183 bail5:
1184 free_netdev(netdev);
1185
1186 bail4:
1187 free_irq(pcidev->irq, c2dev);
1188
1189 bail3:
1190 ib_dealloc_device(&c2dev->ibdev);
1191
1192 bail2:
1193 pci_release_regions(pcidev);
1194
1195 bail1:
1196 pci_disable_device(pcidev);
1197
1198 bail0:
1199 return ret;
1200}
1201
1202static void __devexit c2_remove(struct pci_dev *pcidev)
1203{
1204 struct c2_dev *c2dev = pci_get_drvdata(pcidev);
1205 struct net_device *netdev = c2dev->netdev;
1206
1207 /* Unregister with OpenIB */
1208 c2_unregister_device(c2dev);
1209
1210 /* Clean up the RNIC resources */
1211 c2_rnic_term(c2dev);
1212
1213 /* Remove network device from the kernel */
1214 unregister_netdev(netdev);
1215
1216 /* Free network device */
1217 free_netdev(netdev);
1218
1219 /* Free the interrupt line */
1220 free_irq(pcidev->irq, c2dev);
1221
1222 /* missing: Turn LEDs off here */
1223
1224 /* Unmap adapter PA space */
1225 iounmap(c2dev->kva);
1226 iounmap(c2dev->regs);
1227 iounmap(c2dev->mmio_txp_ring);
1228 iounmap(c2dev->mmio_rxp_ring);
1229
1230 /* Free the hardware structure */
1231 ib_dealloc_device(&c2dev->ibdev);
1232
1233 /* Release reserved PCI I/O and memory resources */
1234 pci_release_regions(pcidev);
1235
1236 /* Disable PCI device */
1237 pci_disable_device(pcidev);
1238
1239 /* Clear driver specific data */
1240 pci_set_drvdata(pcidev, NULL);
1241}
1242
1243static struct pci_driver c2_pci_driver = {
1244 .name = DRV_NAME,
1245 .id_table = c2_pci_table,
1246 .probe = c2_probe,
1247 .remove = __devexit_p(c2_remove),
1248};
1249
1250static int __init c2_init_module(void)
1251{
d986a274 1252 return pci_register_driver(&c2_pci_driver);
f94b533d
TT
1253}
1254
1255static void __exit c2_exit_module(void)
1256{
1257 pci_unregister_driver(&c2_pci_driver);
1258}
1259
1260module_init(c2_init_module);
1261module_exit(c2_exit_module);