]>
Commit | Line | Data |
---|---|---|
f94b533d TT |
1 | /* |
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | |
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | #include <linux/module.h> | |
34 | #include <linux/moduleparam.h> | |
35 | #include <linux/pci.h> | |
36 | #include <linux/netdevice.h> | |
37 | #include <linux/etherdevice.h> | |
38 | #include <linux/inetdevice.h> | |
39 | #include <linux/delay.h> | |
40 | #include <linux/ethtool.h> | |
41 | #include <linux/mii.h> | |
42 | #include <linux/if_vlan.h> | |
43 | #include <linux/crc32.h> | |
44 | #include <linux/in.h> | |
45 | #include <linux/ip.h> | |
46 | #include <linux/tcp.h> | |
47 | #include <linux/init.h> | |
48 | #include <linux/dma-mapping.h> | |
5a0e3ad6 | 49 | #include <linux/slab.h> |
f94b533d TT |
50 | |
51 | #include <asm/io.h> | |
52 | #include <asm/irq.h> | |
53 | #include <asm/byteorder.h> | |
54 | ||
55 | #include <rdma/ib_smi.h> | |
56 | #include "c2.h" | |
57 | #include "c2_provider.h" | |
58 | ||
59 | MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); | |
60 | MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver"); | |
61 | MODULE_LICENSE("Dual BSD/GPL"); | |
62 | MODULE_VERSION(DRV_VERSION); | |
63 | ||
64 | static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | |
65 | | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; | |
66 | ||
67 | static int debug = -1; /* defaults above */ | |
68 | module_param(debug, int, 0); | |
69 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | |
70 | ||
71 | static int c2_up(struct net_device *netdev); | |
72 | static int c2_down(struct net_device *netdev); | |
73 | static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev); | |
74 | static void c2_tx_interrupt(struct net_device *netdev); | |
75 | static void c2_rx_interrupt(struct net_device *netdev); | |
7d12e780 | 76 | static irqreturn_t c2_interrupt(int irq, void *dev_id); |
f94b533d TT |
77 | static void c2_tx_timeout(struct net_device *netdev); |
78 | static int c2_change_mtu(struct net_device *netdev, int new_mtu); | |
79 | static void c2_reset(struct c2_port *c2_port); | |
f94b533d TT |
80 | |
81 | static struct pci_device_id c2_pci_table[] = { | |
82 | { PCI_DEVICE(0x18b8, 0xb001) }, | |
83 | { 0 } | |
84 | }; | |
85 | ||
86 | MODULE_DEVICE_TABLE(pci, c2_pci_table); | |
87 | ||
88 | static void c2_print_macaddr(struct net_device *netdev) | |
89 | { | |
181c74e8 | 90 | pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq); |
f94b533d TT |
91 | } |
92 | ||
93 | static void c2_set_rxbufsize(struct c2_port *c2_port) | |
94 | { | |
95 | struct net_device *netdev = c2_port->netdev; | |
96 | ||
97 | if (netdev->mtu > RX_BUF_SIZE) | |
98 | c2_port->rx_buf_size = | |
99 | netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) + | |
100 | NET_IP_ALIGN; | |
101 | else | |
102 | c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE; | |
103 | } | |
104 | ||
105 | /* | |
106 | * Allocate TX ring elements and chain them together. | |
107 | * One-to-one association of adapter descriptors with ring elements. | |
108 | */ | |
109 | static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr, | |
110 | dma_addr_t base, void __iomem * mmio_txp_ring) | |
111 | { | |
112 | struct c2_tx_desc *tx_desc; | |
113 | struct c2_txp_desc __iomem *txp_desc; | |
114 | struct c2_element *elem; | |
115 | int i; | |
116 | ||
117 | tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL); | |
118 | if (!tx_ring->start) | |
119 | return -ENOMEM; | |
120 | ||
121 | elem = tx_ring->start; | |
122 | tx_desc = vaddr; | |
123 | txp_desc = mmio_txp_ring; | |
124 | for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) { | |
125 | tx_desc->len = 0; | |
126 | tx_desc->status = 0; | |
127 | ||
128 | /* Set TXP_HTXD_UNINIT */ | |
dc544bc9 | 129 | __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL), |
f94b533d TT |
130 | (void __iomem *) txp_desc + C2_TXP_ADDR); |
131 | __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN); | |
dc544bc9 | 132 | __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT), |
f94b533d TT |
133 | (void __iomem *) txp_desc + C2_TXP_FLAGS); |
134 | ||
135 | elem->skb = NULL; | |
136 | elem->ht_desc = tx_desc; | |
137 | elem->hw_desc = txp_desc; | |
138 | ||
139 | if (i == tx_ring->count - 1) { | |
140 | elem->next = tx_ring->start; | |
141 | tx_desc->next_offset = base; | |
142 | } else { | |
143 | elem->next = elem + 1; | |
144 | tx_desc->next_offset = | |
145 | base + (i + 1) * sizeof(*tx_desc); | |
146 | } | |
147 | } | |
148 | ||
149 | tx_ring->to_use = tx_ring->to_clean = tx_ring->start; | |
150 | ||
151 | return 0; | |
152 | } | |
153 | ||
154 | /* | |
155 | * Allocate RX ring elements and chain them together. | |
156 | * One-to-one association of adapter descriptors with ring elements. | |
157 | */ | |
158 | static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr, | |
159 | dma_addr_t base, void __iomem * mmio_rxp_ring) | |
160 | { | |
161 | struct c2_rx_desc *rx_desc; | |
162 | struct c2_rxp_desc __iomem *rxp_desc; | |
163 | struct c2_element *elem; | |
164 | int i; | |
165 | ||
166 | rx_ring->start = kmalloc(sizeof(*elem) * rx_ring->count, GFP_KERNEL); | |
167 | if (!rx_ring->start) | |
168 | return -ENOMEM; | |
169 | ||
170 | elem = rx_ring->start; | |
171 | rx_desc = vaddr; | |
172 | rxp_desc = mmio_rxp_ring; | |
173 | for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) { | |
174 | rx_desc->len = 0; | |
175 | rx_desc->status = 0; | |
176 | ||
177 | /* Set RXP_HRXD_UNINIT */ | |
dc544bc9 | 178 | __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK), |
f94b533d TT |
179 | (void __iomem *) rxp_desc + C2_RXP_STATUS); |
180 | __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT); | |
181 | __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN); | |
dc544bc9 | 182 | __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL), |
f94b533d | 183 | (void __iomem *) rxp_desc + C2_RXP_ADDR); |
dc544bc9 | 184 | __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT), |
f94b533d TT |
185 | (void __iomem *) rxp_desc + C2_RXP_FLAGS); |
186 | ||
187 | elem->skb = NULL; | |
188 | elem->ht_desc = rx_desc; | |
189 | elem->hw_desc = rxp_desc; | |
190 | ||
191 | if (i == rx_ring->count - 1) { | |
192 | elem->next = rx_ring->start; | |
193 | rx_desc->next_offset = base; | |
194 | } else { | |
195 | elem->next = elem + 1; | |
196 | rx_desc->next_offset = | |
197 | base + (i + 1) * sizeof(*rx_desc); | |
198 | } | |
199 | } | |
200 | ||
201 | rx_ring->to_use = rx_ring->to_clean = rx_ring->start; | |
202 | ||
203 | return 0; | |
204 | } | |
205 | ||
206 | /* Setup buffer for receiving */ | |
207 | static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem) | |
208 | { | |
209 | struct c2_dev *c2dev = c2_port->c2dev; | |
210 | struct c2_rx_desc *rx_desc = elem->ht_desc; | |
211 | struct sk_buff *skb; | |
212 | dma_addr_t mapaddr; | |
213 | u32 maplen; | |
214 | struct c2_rxp_hdr *rxp_hdr; | |
215 | ||
216 | skb = dev_alloc_skb(c2_port->rx_buf_size); | |
217 | if (unlikely(!skb)) { | |
218 | pr_debug("%s: out of memory for receive\n", | |
219 | c2_port->netdev->name); | |
220 | return -ENOMEM; | |
221 | } | |
222 | ||
223 | /* Zero out the rxp hdr in the sk_buff */ | |
224 | memset(skb->data, 0, sizeof(*rxp_hdr)); | |
225 | ||
226 | skb->dev = c2_port->netdev; | |
227 | ||
228 | maplen = c2_port->rx_buf_size; | |
229 | mapaddr = | |
230 | pci_map_single(c2dev->pcidev, skb->data, maplen, | |
231 | PCI_DMA_FROMDEVICE); | |
232 | ||
233 | /* Set the sk_buff RXP_header to RXP_HRXD_READY */ | |
234 | rxp_hdr = (struct c2_rxp_hdr *) skb->data; | |
235 | rxp_hdr->flags = RXP_HRXD_READY; | |
236 | ||
237 | __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); | |
dc544bc9 | 238 | __raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)), |
f94b533d | 239 | elem->hw_desc + C2_RXP_LEN); |
dc544bc9 RD |
240 | __raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR); |
241 | __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY), | |
242 | elem->hw_desc + C2_RXP_FLAGS); | |
f94b533d TT |
243 | |
244 | elem->skb = skb; | |
245 | elem->mapaddr = mapaddr; | |
246 | elem->maplen = maplen; | |
247 | rx_desc->len = maplen; | |
248 | ||
249 | return 0; | |
250 | } | |
251 | ||
252 | /* | |
253 | * Allocate buffers for the Rx ring | |
254 | * For receive: rx_ring.to_clean is next received frame | |
255 | */ | |
256 | static int c2_rx_fill(struct c2_port *c2_port) | |
257 | { | |
258 | struct c2_ring *rx_ring = &c2_port->rx_ring; | |
259 | struct c2_element *elem; | |
260 | int ret = 0; | |
261 | ||
262 | elem = rx_ring->start; | |
263 | do { | |
264 | if (c2_rx_alloc(c2_port, elem)) { | |
265 | ret = 1; | |
266 | break; | |
267 | } | |
268 | } while ((elem = elem->next) != rx_ring->start); | |
269 | ||
270 | rx_ring->to_clean = rx_ring->start; | |
271 | return ret; | |
272 | } | |
273 | ||
274 | /* Free all buffers in RX ring, assumes receiver stopped */ | |
275 | static void c2_rx_clean(struct c2_port *c2_port) | |
276 | { | |
277 | struct c2_dev *c2dev = c2_port->c2dev; | |
278 | struct c2_ring *rx_ring = &c2_port->rx_ring; | |
279 | struct c2_element *elem; | |
280 | struct c2_rx_desc *rx_desc; | |
281 | ||
282 | elem = rx_ring->start; | |
283 | do { | |
284 | rx_desc = elem->ht_desc; | |
285 | rx_desc->len = 0; | |
286 | ||
287 | __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); | |
288 | __raw_writew(0, elem->hw_desc + C2_RXP_COUNT); | |
289 | __raw_writew(0, elem->hw_desc + C2_RXP_LEN); | |
dc544bc9 | 290 | __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL), |
f94b533d | 291 | elem->hw_desc + C2_RXP_ADDR); |
dc544bc9 | 292 | __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT), |
f94b533d TT |
293 | elem->hw_desc + C2_RXP_FLAGS); |
294 | ||
295 | if (elem->skb) { | |
296 | pci_unmap_single(c2dev->pcidev, elem->mapaddr, | |
297 | elem->maplen, PCI_DMA_FROMDEVICE); | |
298 | dev_kfree_skb(elem->skb); | |
299 | elem->skb = NULL; | |
300 | } | |
301 | } while ((elem = elem->next) != rx_ring->start); | |
302 | } | |
303 | ||
304 | static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem) | |
305 | { | |
306 | struct c2_tx_desc *tx_desc = elem->ht_desc; | |
307 | ||
308 | tx_desc->len = 0; | |
309 | ||
310 | pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen, | |
311 | PCI_DMA_TODEVICE); | |
312 | ||
313 | if (elem->skb) { | |
314 | dev_kfree_skb_any(elem->skb); | |
315 | elem->skb = NULL; | |
316 | } | |
317 | ||
318 | return 0; | |
319 | } | |
320 | ||
321 | /* Free all buffers in TX ring, assumes transmitter stopped */ | |
322 | static void c2_tx_clean(struct c2_port *c2_port) | |
323 | { | |
324 | struct c2_ring *tx_ring = &c2_port->tx_ring; | |
325 | struct c2_element *elem; | |
326 | struct c2_txp_desc txp_htxd; | |
327 | int retry; | |
328 | unsigned long flags; | |
329 | ||
330 | spin_lock_irqsave(&c2_port->tx_lock, flags); | |
331 | ||
332 | elem = tx_ring->start; | |
333 | ||
334 | do { | |
335 | retry = 0; | |
336 | do { | |
337 | txp_htxd.flags = | |
338 | readw(elem->hw_desc + C2_TXP_FLAGS); | |
339 | ||
340 | if (txp_htxd.flags == TXP_HTXD_READY) { | |
341 | retry = 1; | |
342 | __raw_writew(0, | |
343 | elem->hw_desc + C2_TXP_LEN); | |
344 | __raw_writeq(0, | |
345 | elem->hw_desc + C2_TXP_ADDR); | |
dc544bc9 | 346 | __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE), |
f94b533d | 347 | elem->hw_desc + C2_TXP_FLAGS); |
687c75dc | 348 | c2_port->netdev->stats.tx_dropped++; |
f94b533d TT |
349 | break; |
350 | } else { | |
351 | __raw_writew(0, | |
352 | elem->hw_desc + C2_TXP_LEN); | |
dc544bc9 | 353 | __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL), |
f94b533d | 354 | elem->hw_desc + C2_TXP_ADDR); |
dc544bc9 | 355 | __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT), |
f94b533d TT |
356 | elem->hw_desc + C2_TXP_FLAGS); |
357 | } | |
358 | ||
359 | c2_tx_free(c2_port->c2dev, elem); | |
360 | ||
361 | } while ((elem = elem->next) != tx_ring->start); | |
362 | } while (retry); | |
363 | ||
364 | c2_port->tx_avail = c2_port->tx_ring.count - 1; | |
365 | c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start; | |
366 | ||
367 | if (c2_port->tx_avail > MAX_SKB_FRAGS + 1) | |
368 | netif_wake_queue(c2_port->netdev); | |
369 | ||
370 | spin_unlock_irqrestore(&c2_port->tx_lock, flags); | |
371 | } | |
372 | ||
373 | /* | |
374 | * Process transmit descriptors marked 'DONE' by the firmware, | |
375 | * freeing up their unneeded sk_buffs. | |
376 | */ | |
377 | static void c2_tx_interrupt(struct net_device *netdev) | |
378 | { | |
379 | struct c2_port *c2_port = netdev_priv(netdev); | |
380 | struct c2_dev *c2dev = c2_port->c2dev; | |
381 | struct c2_ring *tx_ring = &c2_port->tx_ring; | |
382 | struct c2_element *elem; | |
383 | struct c2_txp_desc txp_htxd; | |
384 | ||
385 | spin_lock(&c2_port->tx_lock); | |
386 | ||
387 | for (elem = tx_ring->to_clean; elem != tx_ring->to_use; | |
388 | elem = elem->next) { | |
389 | txp_htxd.flags = | |
dc544bc9 | 390 | be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS)); |
f94b533d TT |
391 | |
392 | if (txp_htxd.flags != TXP_HTXD_DONE) | |
393 | break; | |
394 | ||
395 | if (netif_msg_tx_done(c2_port)) { | |
396 | /* PCI reads are expensive in fast path */ | |
397 | txp_htxd.len = | |
dc544bc9 | 398 | be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN)); |
f94b533d TT |
399 | pr_debug("%s: tx done slot %3Zu status 0x%x len " |
400 | "%5u bytes\n", | |
401 | netdev->name, elem - tx_ring->start, | |
402 | txp_htxd.flags, txp_htxd.len); | |
403 | } | |
404 | ||
405 | c2_tx_free(c2dev, elem); | |
406 | ++(c2_port->tx_avail); | |
407 | } | |
408 | ||
409 | tx_ring->to_clean = elem; | |
410 | ||
411 | if (netif_queue_stopped(netdev) | |
412 | && c2_port->tx_avail > MAX_SKB_FRAGS + 1) | |
413 | netif_wake_queue(netdev); | |
414 | ||
415 | spin_unlock(&c2_port->tx_lock); | |
416 | } | |
417 | ||
418 | static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem) | |
419 | { | |
420 | struct c2_rx_desc *rx_desc = elem->ht_desc; | |
421 | struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data; | |
422 | ||
423 | if (rxp_hdr->status != RXP_HRXD_OK || | |
424 | rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) { | |
425 | pr_debug("BAD RXP_HRXD\n"); | |
426 | pr_debug(" rx_desc : %p\n", rx_desc); | |
427 | pr_debug(" index : %Zu\n", | |
428 | elem - c2_port->rx_ring.start); | |
429 | pr_debug(" len : %u\n", rx_desc->len); | |
430 | pr_debug(" rxp_hdr : %p [PA %p]\n", rxp_hdr, | |
431 | (void *) __pa((unsigned long) rxp_hdr)); | |
432 | pr_debug(" flags : 0x%x\n", rxp_hdr->flags); | |
433 | pr_debug(" status: 0x%x\n", rxp_hdr->status); | |
434 | pr_debug(" len : %u\n", rxp_hdr->len); | |
435 | pr_debug(" rsvd : 0x%x\n", rxp_hdr->rsvd); | |
436 | } | |
437 | ||
438 | /* Setup the skb for reuse since we're dropping this pkt */ | |
27a884dc ACM |
439 | elem->skb->data = elem->skb->head; |
440 | skb_reset_tail_pointer(elem->skb); | |
f94b533d TT |
441 | |
442 | /* Zero out the rxp hdr in the sk_buff */ | |
443 | memset(elem->skb->data, 0, sizeof(*rxp_hdr)); | |
444 | ||
445 | /* Write the descriptor to the adapter's rx ring */ | |
446 | __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); | |
447 | __raw_writew(0, elem->hw_desc + C2_RXP_COUNT); | |
dc544bc9 | 448 | __raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)), |
f94b533d | 449 | elem->hw_desc + C2_RXP_LEN); |
dc544bc9 RD |
450 | __raw_writeq((__force u64) cpu_to_be64(elem->mapaddr), |
451 | elem->hw_desc + C2_RXP_ADDR); | |
452 | __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY), | |
453 | elem->hw_desc + C2_RXP_FLAGS); | |
f94b533d TT |
454 | |
455 | pr_debug("packet dropped\n"); | |
687c75dc | 456 | c2_port->netdev->stats.rx_dropped++; |
f94b533d TT |
457 | } |
458 | ||
459 | static void c2_rx_interrupt(struct net_device *netdev) | |
460 | { | |
461 | struct c2_port *c2_port = netdev_priv(netdev); | |
462 | struct c2_dev *c2dev = c2_port->c2dev; | |
463 | struct c2_ring *rx_ring = &c2_port->rx_ring; | |
464 | struct c2_element *elem; | |
465 | struct c2_rx_desc *rx_desc; | |
466 | struct c2_rxp_hdr *rxp_hdr; | |
467 | struct sk_buff *skb; | |
468 | dma_addr_t mapaddr; | |
469 | u32 maplen, buflen; | |
470 | unsigned long flags; | |
471 | ||
472 | spin_lock_irqsave(&c2dev->lock, flags); | |
473 | ||
474 | /* Begin where we left off */ | |
475 | rx_ring->to_clean = rx_ring->start + c2dev->cur_rx; | |
476 | ||
477 | for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean; | |
478 | elem = elem->next) { | |
479 | rx_desc = elem->ht_desc; | |
480 | mapaddr = elem->mapaddr; | |
481 | maplen = elem->maplen; | |
482 | skb = elem->skb; | |
483 | rxp_hdr = (struct c2_rxp_hdr *) skb->data; | |
484 | ||
485 | if (rxp_hdr->flags != RXP_HRXD_DONE) | |
486 | break; | |
487 | buflen = rxp_hdr->len; | |
488 | ||
489 | /* Sanity check the RXP header */ | |
490 | if (rxp_hdr->status != RXP_HRXD_OK || | |
491 | buflen > (rx_desc->len - sizeof(*rxp_hdr))) { | |
492 | c2_rx_error(c2_port, elem); | |
493 | continue; | |
494 | } | |
495 | ||
496 | /* | |
497 | * Allocate and map a new skb for replenishing the host | |
498 | * RX desc | |
499 | */ | |
500 | if (c2_rx_alloc(c2_port, elem)) { | |
501 | c2_rx_error(c2_port, elem); | |
502 | continue; | |
503 | } | |
504 | ||
505 | /* Unmap the old skb */ | |
506 | pci_unmap_single(c2dev->pcidev, mapaddr, maplen, | |
507 | PCI_DMA_FROMDEVICE); | |
508 | ||
509 | prefetch(skb->data); | |
510 | ||
511 | /* | |
512 | * Skip past the leading 8 bytes comprising of the | |
513 | * "struct c2_rxp_hdr", prepended by the adapter | |
514 | * to the usual Ethernet header ("struct ethhdr"), | |
515 | * to the start of the raw Ethernet packet. | |
516 | * | |
517 | * Fix up the various fields in the sk_buff before | |
518 | * passing it up to netif_rx(). The transfer size | |
519 | * (in bytes) specified by the adapter len field of | |
520 | * the "struct rxp_hdr_t" does NOT include the | |
521 | * "sizeof(struct c2_rxp_hdr)". | |
522 | */ | |
523 | skb->data += sizeof(*rxp_hdr); | |
27a884dc | 524 | skb_set_tail_pointer(skb, buflen); |
f94b533d | 525 | skb->len = buflen; |
f94b533d TT |
526 | skb->protocol = eth_type_trans(skb, netdev); |
527 | ||
528 | netif_rx(skb); | |
529 | ||
687c75dc SH |
530 | netdev->stats.rx_packets++; |
531 | netdev->stats.rx_bytes += buflen; | |
f94b533d TT |
532 | } |
533 | ||
534 | /* Save where we left off */ | |
535 | rx_ring->to_clean = elem; | |
536 | c2dev->cur_rx = elem - rx_ring->start; | |
537 | C2_SET_CUR_RX(c2dev, c2dev->cur_rx); | |
538 | ||
539 | spin_unlock_irqrestore(&c2dev->lock, flags); | |
540 | } | |
541 | ||
542 | /* | |
543 | * Handle netisr0 TX & RX interrupts. | |
544 | */ | |
7d12e780 | 545 | static irqreturn_t c2_interrupt(int irq, void *dev_id) |
f94b533d TT |
546 | { |
547 | unsigned int netisr0, dmaisr; | |
548 | int handled = 0; | |
549 | struct c2_dev *c2dev = (struct c2_dev *) dev_id; | |
550 | ||
551 | /* Process CCILNET interrupts */ | |
552 | netisr0 = readl(c2dev->regs + C2_NISR0); | |
553 | if (netisr0) { | |
554 | ||
555 | /* | |
556 | * There is an issue with the firmware that always | |
557 | * provides the status of RX for both TX & RX | |
558 | * interrupts. So process both queues here. | |
559 | */ | |
560 | c2_rx_interrupt(c2dev->netdev); | |
561 | c2_tx_interrupt(c2dev->netdev); | |
562 | ||
563 | /* Clear the interrupt */ | |
564 | writel(netisr0, c2dev->regs + C2_NISR0); | |
565 | handled++; | |
566 | } | |
567 | ||
568 | /* Process RNIC interrupts */ | |
569 | dmaisr = readl(c2dev->regs + C2_DISR); | |
570 | if (dmaisr) { | |
571 | writel(dmaisr, c2dev->regs + C2_DISR); | |
572 | c2_rnic_interrupt(c2dev); | |
573 | handled++; | |
574 | } | |
575 | ||
576 | if (handled) { | |
577 | return IRQ_HANDLED; | |
578 | } else { | |
579 | return IRQ_NONE; | |
580 | } | |
581 | } | |
582 | ||
583 | static int c2_up(struct net_device *netdev) | |
584 | { | |
585 | struct c2_port *c2_port = netdev_priv(netdev); | |
586 | struct c2_dev *c2dev = c2_port->c2dev; | |
587 | struct c2_element *elem; | |
588 | struct c2_rxp_hdr *rxp_hdr; | |
589 | struct in_device *in_dev; | |
590 | size_t rx_size, tx_size; | |
591 | int ret, i; | |
592 | unsigned int netimr0; | |
593 | ||
594 | if (netif_msg_ifup(c2_port)) | |
595 | pr_debug("%s: enabling interface\n", netdev->name); | |
596 | ||
597 | /* Set the Rx buffer size based on MTU */ | |
598 | c2_set_rxbufsize(c2_port); | |
599 | ||
600 | /* Allocate DMA'able memory for Tx/Rx host descriptor rings */ | |
601 | rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc); | |
602 | tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc); | |
603 | ||
604 | c2_port->mem_size = tx_size + rx_size; | |
605 | c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size, | |
606 | &c2_port->dma); | |
607 | if (c2_port->mem == NULL) { | |
608 | pr_debug("Unable to allocate memory for " | |
609 | "host descriptor rings\n"); | |
610 | return -ENOMEM; | |
611 | } | |
612 | ||
613 | memset(c2_port->mem, 0, c2_port->mem_size); | |
614 | ||
615 | /* Create the Rx host descriptor ring */ | |
616 | if ((ret = | |
617 | c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma, | |
618 | c2dev->mmio_rxp_ring))) { | |
619 | pr_debug("Unable to create RX ring\n"); | |
620 | goto bail0; | |
621 | } | |
622 | ||
623 | /* Allocate Rx buffers for the host descriptor ring */ | |
624 | if (c2_rx_fill(c2_port)) { | |
625 | pr_debug("Unable to fill RX ring\n"); | |
626 | goto bail1; | |
627 | } | |
628 | ||
629 | /* Create the Tx host descriptor ring */ | |
630 | if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size, | |
631 | c2_port->dma + rx_size, | |
632 | c2dev->mmio_txp_ring))) { | |
633 | pr_debug("Unable to create TX ring\n"); | |
634 | goto bail1; | |
635 | } | |
636 | ||
637 | /* Set the TX pointer to where we left off */ | |
638 | c2_port->tx_avail = c2_port->tx_ring.count - 1; | |
639 | c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean = | |
640 | c2_port->tx_ring.start + c2dev->cur_tx; | |
641 | ||
642 | /* missing: Initialize MAC */ | |
643 | ||
644 | BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean); | |
645 | ||
646 | /* Reset the adapter, ensures the driver is in sync with the RXP */ | |
647 | c2_reset(c2_port); | |
648 | ||
649 | /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */ | |
650 | for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count; | |
651 | i++, elem++) { | |
652 | rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data; | |
653 | rxp_hdr->flags = 0; | |
dc544bc9 | 654 | __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY), |
f94b533d TT |
655 | elem->hw_desc + C2_RXP_FLAGS); |
656 | } | |
657 | ||
658 | /* Enable network packets */ | |
659 | netif_start_queue(netdev); | |
660 | ||
661 | /* Enable IRQ */ | |
662 | writel(0, c2dev->regs + C2_IDIS); | |
663 | netimr0 = readl(c2dev->regs + C2_NIMR0); | |
664 | netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT); | |
665 | writel(netimr0, c2dev->regs + C2_NIMR0); | |
666 | ||
667 | /* Tell the stack to ignore arp requests for ipaddrs bound to | |
668 | * other interfaces. This is needed to prevent the host stack | |
669 | * from responding to arp requests to the ipaddr bound on the | |
670 | * rdma interface. | |
671 | */ | |
672 | in_dev = in_dev_get(netdev); | |
42f811b8 | 673 | IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1); |
f94b533d TT |
674 | in_dev_put(in_dev); |
675 | ||
676 | return 0; | |
677 | ||
678 | bail1: | |
679 | c2_rx_clean(c2_port); | |
680 | kfree(c2_port->rx_ring.start); | |
681 | ||
682 | bail0: | |
683 | pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem, | |
684 | c2_port->dma); | |
685 | ||
686 | return ret; | |
687 | } | |
688 | ||
689 | static int c2_down(struct net_device *netdev) | |
690 | { | |
691 | struct c2_port *c2_port = netdev_priv(netdev); | |
692 | struct c2_dev *c2dev = c2_port->c2dev; | |
693 | ||
694 | if (netif_msg_ifdown(c2_port)) | |
695 | pr_debug("%s: disabling interface\n", | |
696 | netdev->name); | |
697 | ||
698 | /* Wait for all the queued packets to get sent */ | |
699 | c2_tx_interrupt(netdev); | |
700 | ||
701 | /* Disable network packets */ | |
702 | netif_stop_queue(netdev); | |
703 | ||
704 | /* Disable IRQs by clearing the interrupt mask */ | |
705 | writel(1, c2dev->regs + C2_IDIS); | |
706 | writel(0, c2dev->regs + C2_NIMR0); | |
707 | ||
708 | /* missing: Stop transmitter */ | |
709 | ||
710 | /* missing: Stop receiver */ | |
711 | ||
712 | /* Reset the adapter, ensures the driver is in sync with the RXP */ | |
713 | c2_reset(c2_port); | |
714 | ||
715 | /* missing: Turn off LEDs here */ | |
716 | ||
717 | /* Free all buffers in the host descriptor rings */ | |
718 | c2_tx_clean(c2_port); | |
719 | c2_rx_clean(c2_port); | |
720 | ||
721 | /* Free the host descriptor rings */ | |
722 | kfree(c2_port->rx_ring.start); | |
723 | kfree(c2_port->tx_ring.start); | |
724 | pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem, | |
725 | c2_port->dma); | |
726 | ||
727 | return 0; | |
728 | } | |
729 | ||
730 | static void c2_reset(struct c2_port *c2_port) | |
731 | { | |
732 | struct c2_dev *c2dev = c2_port->c2dev; | |
733 | unsigned int cur_rx = c2dev->cur_rx; | |
734 | ||
735 | /* Tell the hardware to quiesce */ | |
736 | C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI); | |
737 | ||
738 | /* | |
739 | * The hardware will reset the C2_PCI_HRX_QUI bit once | |
740 | * the RXP is quiesced. Wait 2 seconds for this. | |
741 | */ | |
742 | ssleep(2); | |
743 | ||
744 | cur_rx = C2_GET_CUR_RX(c2dev); | |
745 | ||
746 | if (cur_rx & C2_PCI_HRX_QUI) | |
747 | pr_debug("c2_reset: failed to quiesce the hardware!\n"); | |
748 | ||
749 | cur_rx &= ~C2_PCI_HRX_QUI; | |
750 | ||
751 | c2dev->cur_rx = cur_rx; | |
752 | ||
753 | pr_debug("Current RX: %u\n", c2dev->cur_rx); | |
754 | } | |
755 | ||
756 | static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |
757 | { | |
758 | struct c2_port *c2_port = netdev_priv(netdev); | |
759 | struct c2_dev *c2dev = c2_port->c2dev; | |
760 | struct c2_ring *tx_ring = &c2_port->tx_ring; | |
761 | struct c2_element *elem; | |
762 | dma_addr_t mapaddr; | |
763 | u32 maplen; | |
764 | unsigned long flags; | |
765 | unsigned int i; | |
766 | ||
767 | spin_lock_irqsave(&c2_port->tx_lock, flags); | |
768 | ||
769 | if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) { | |
770 | netif_stop_queue(netdev); | |
771 | spin_unlock_irqrestore(&c2_port->tx_lock, flags); | |
772 | ||
773 | pr_debug("%s: Tx ring full when queue awake!\n", | |
774 | netdev->name); | |
775 | return NETDEV_TX_BUSY; | |
776 | } | |
777 | ||
778 | maplen = skb_headlen(skb); | |
779 | mapaddr = | |
780 | pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE); | |
781 | ||
782 | elem = tx_ring->to_use; | |
783 | elem->skb = skb; | |
784 | elem->mapaddr = mapaddr; | |
785 | elem->maplen = maplen; | |
786 | ||
787 | /* Tell HW to xmit */ | |
dc544bc9 RD |
788 | __raw_writeq((__force u64) cpu_to_be64(mapaddr), |
789 | elem->hw_desc + C2_TXP_ADDR); | |
790 | __raw_writew((__force u16) cpu_to_be16(maplen), | |
791 | elem->hw_desc + C2_TXP_LEN); | |
792 | __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY), | |
793 | elem->hw_desc + C2_TXP_FLAGS); | |
f94b533d | 794 | |
687c75dc SH |
795 | netdev->stats.tx_packets++; |
796 | netdev->stats.tx_bytes += maplen; | |
f94b533d TT |
797 | |
798 | /* Loop thru additional data fragments and queue them */ | |
799 | if (skb_shinfo(skb)->nr_frags) { | |
800 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
801 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
802 | maplen = frag->size; | |
803 | mapaddr = | |
804 | pci_map_page(c2dev->pcidev, frag->page, | |
805 | frag->page_offset, maplen, | |
806 | PCI_DMA_TODEVICE); | |
807 | ||
808 | elem = elem->next; | |
809 | elem->skb = NULL; | |
810 | elem->mapaddr = mapaddr; | |
811 | elem->maplen = maplen; | |
812 | ||
813 | /* Tell HW to xmit */ | |
dc544bc9 | 814 | __raw_writeq((__force u64) cpu_to_be64(mapaddr), |
f94b533d | 815 | elem->hw_desc + C2_TXP_ADDR); |
dc544bc9 | 816 | __raw_writew((__force u16) cpu_to_be16(maplen), |
f94b533d | 817 | elem->hw_desc + C2_TXP_LEN); |
dc544bc9 | 818 | __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY), |
f94b533d TT |
819 | elem->hw_desc + C2_TXP_FLAGS); |
820 | ||
687c75dc SH |
821 | netdev->stats.tx_packets++; |
822 | netdev->stats.tx_bytes += maplen; | |
f94b533d TT |
823 | } |
824 | } | |
825 | ||
826 | tx_ring->to_use = elem->next; | |
827 | c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1); | |
828 | ||
829 | if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) { | |
830 | netif_stop_queue(netdev); | |
831 | if (netif_msg_tx_queued(c2_port)) | |
832 | pr_debug("%s: transmit queue full\n", | |
833 | netdev->name); | |
834 | } | |
835 | ||
836 | spin_unlock_irqrestore(&c2_port->tx_lock, flags); | |
837 | ||
838 | netdev->trans_start = jiffies; | |
839 | ||
840 | return NETDEV_TX_OK; | |
841 | } | |
842 | ||
f94b533d TT |
843 | static void c2_tx_timeout(struct net_device *netdev) |
844 | { | |
845 | struct c2_port *c2_port = netdev_priv(netdev); | |
846 | ||
847 | if (netif_msg_timer(c2_port)) | |
848 | pr_debug("%s: tx timeout\n", netdev->name); | |
849 | ||
850 | c2_tx_clean(c2_port); | |
851 | } | |
852 | ||
853 | static int c2_change_mtu(struct net_device *netdev, int new_mtu) | |
854 | { | |
855 | int ret = 0; | |
856 | ||
857 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) | |
858 | return -EINVAL; | |
859 | ||
860 | netdev->mtu = new_mtu; | |
861 | ||
862 | if (netif_running(netdev)) { | |
863 | c2_down(netdev); | |
864 | ||
865 | c2_up(netdev); | |
866 | } | |
867 | ||
868 | return ret; | |
869 | } | |
870 | ||
687c75dc SH |
871 | static const struct net_device_ops c2_netdev = { |
872 | .ndo_open = c2_up, | |
873 | .ndo_stop = c2_down, | |
874 | .ndo_start_xmit = c2_xmit_frame, | |
875 | .ndo_tx_timeout = c2_tx_timeout, | |
876 | .ndo_change_mtu = c2_change_mtu, | |
877 | .ndo_set_mac_address = eth_mac_addr, | |
878 | .ndo_validate_addr = eth_validate_addr, | |
879 | }; | |
880 | ||
f94b533d TT |
881 | /* Initialize network device */ |
882 | static struct net_device *c2_devinit(struct c2_dev *c2dev, | |
883 | void __iomem * mmio_addr) | |
884 | { | |
885 | struct c2_port *c2_port = NULL; | |
886 | struct net_device *netdev = alloc_etherdev(sizeof(*c2_port)); | |
887 | ||
888 | if (!netdev) { | |
889 | pr_debug("c2_port etherdev alloc failed"); | |
890 | return NULL; | |
891 | } | |
892 | ||
f94b533d TT |
893 | SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev); |
894 | ||
687c75dc | 895 | netdev->netdev_ops = &c2_netdev; |
f94b533d TT |
896 | netdev->watchdog_timeo = C2_TX_TIMEOUT; |
897 | netdev->irq = c2dev->pcidev->irq; | |
898 | ||
899 | c2_port = netdev_priv(netdev); | |
900 | c2_port->netdev = netdev; | |
901 | c2_port->c2dev = c2dev; | |
902 | c2_port->msg_enable = netif_msg_init(debug, default_msg); | |
903 | c2_port->tx_ring.count = C2_NUM_TX_DESC; | |
904 | c2_port->rx_ring.count = C2_NUM_RX_DESC; | |
905 | ||
906 | spin_lock_init(&c2_port->tx_lock); | |
907 | ||
908 | /* Copy our 48-bit ethernet hardware address */ | |
909 | memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6); | |
910 | ||
911 | /* Validate the MAC address */ | |
912 | if (!is_valid_ether_addr(netdev->dev_addr)) { | |
913 | pr_debug("Invalid MAC Address\n"); | |
914 | c2_print_macaddr(netdev); | |
915 | free_netdev(netdev); | |
916 | return NULL; | |
917 | } | |
918 | ||
919 | c2dev->netdev = netdev; | |
920 | ||
921 | return netdev; | |
922 | } | |
923 | ||
924 | static int __devinit c2_probe(struct pci_dev *pcidev, | |
925 | const struct pci_device_id *ent) | |
926 | { | |
927 | int ret = 0, i; | |
928 | unsigned long reg0_start, reg0_flags, reg0_len; | |
929 | unsigned long reg2_start, reg2_flags, reg2_len; | |
930 | unsigned long reg4_start, reg4_flags, reg4_len; | |
931 | unsigned kva_map_size; | |
932 | struct net_device *netdev = NULL; | |
933 | struct c2_dev *c2dev = NULL; | |
934 | void __iomem *mmio_regs = NULL; | |
935 | ||
936 | printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n", | |
937 | DRV_VERSION); | |
938 | ||
939 | /* Enable PCI device */ | |
940 | ret = pci_enable_device(pcidev); | |
941 | if (ret) { | |
942 | printk(KERN_ERR PFX "%s: Unable to enable PCI device\n", | |
943 | pci_name(pcidev)); | |
944 | goto bail0; | |
945 | } | |
946 | ||
947 | reg0_start = pci_resource_start(pcidev, BAR_0); | |
948 | reg0_len = pci_resource_len(pcidev, BAR_0); | |
949 | reg0_flags = pci_resource_flags(pcidev, BAR_0); | |
950 | ||
951 | reg2_start = pci_resource_start(pcidev, BAR_2); | |
952 | reg2_len = pci_resource_len(pcidev, BAR_2); | |
953 | reg2_flags = pci_resource_flags(pcidev, BAR_2); | |
954 | ||
955 | reg4_start = pci_resource_start(pcidev, BAR_4); | |
956 | reg4_len = pci_resource_len(pcidev, BAR_4); | |
957 | reg4_flags = pci_resource_flags(pcidev, BAR_4); | |
958 | ||
959 | pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len); | |
960 | pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len); | |
961 | pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len); | |
962 | ||
963 | /* Make sure PCI base addr are MMIO */ | |
964 | if (!(reg0_flags & IORESOURCE_MEM) || | |
965 | !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) { | |
966 | printk(KERN_ERR PFX "PCI regions not an MMIO resource\n"); | |
967 | ret = -ENODEV; | |
968 | goto bail1; | |
969 | } | |
970 | ||
971 | /* Check for weird/broken PCI region reporting */ | |
972 | if ((reg0_len < C2_REG0_SIZE) || | |
973 | (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) { | |
974 | printk(KERN_ERR PFX "Invalid PCI region sizes\n"); | |
975 | ret = -ENODEV; | |
976 | goto bail1; | |
977 | } | |
978 | ||
979 | /* Reserve PCI I/O and memory resources */ | |
980 | ret = pci_request_regions(pcidev, DRV_NAME); | |
981 | if (ret) { | |
982 | printk(KERN_ERR PFX "%s: Unable to request regions\n", | |
983 | pci_name(pcidev)); | |
984 | goto bail1; | |
985 | } | |
986 | ||
987 | if ((sizeof(dma_addr_t) > 4)) { | |
6a35528a | 988 | ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); |
f94b533d TT |
989 | if (ret < 0) { |
990 | printk(KERN_ERR PFX "64b DMA configuration failed\n"); | |
991 | goto bail2; | |
992 | } | |
993 | } else { | |
284901a9 | 994 | ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); |
f94b533d TT |
995 | if (ret < 0) { |
996 | printk(KERN_ERR PFX "32b DMA configuration failed\n"); | |
997 | goto bail2; | |
998 | } | |
999 | } | |
1000 | ||
1001 | /* Enables bus-mastering on the device */ | |
1002 | pci_set_master(pcidev); | |
1003 | ||
1004 | /* Remap the adapter PCI registers in BAR4 */ | |
1005 | mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, | |
1006 | sizeof(struct c2_adapter_pci_regs)); | |
4b290439 | 1007 | if (!mmio_regs) { |
f94b533d TT |
1008 | printk(KERN_ERR PFX |
1009 | "Unable to remap adapter PCI registers in BAR4\n"); | |
1010 | ret = -EIO; | |
1011 | goto bail2; | |
1012 | } | |
1013 | ||
1014 | /* Validate PCI regs magic */ | |
1015 | for (i = 0; i < sizeof(c2_magic); i++) { | |
1016 | if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) { | |
1017 | printk(KERN_ERR PFX "Downlevel Firmware boot loader " | |
1018 | "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash " | |
1019 | "utility to update your boot loader\n", | |
1020 | i + 1, sizeof(c2_magic), | |
1021 | readb(mmio_regs + C2_REGS_MAGIC + i), | |
1022 | c2_magic[i]); | |
1023 | printk(KERN_ERR PFX "Adapter not claimed\n"); | |
1024 | iounmap(mmio_regs); | |
1025 | ret = -EIO; | |
1026 | goto bail2; | |
1027 | } | |
1028 | } | |
1029 | ||
1030 | /* Validate the adapter version */ | |
dc544bc9 | 1031 | if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) { |
f94b533d TT |
1032 | printk(KERN_ERR PFX "Version mismatch " |
1033 | "[fw=%u, c2=%u], Adapter not claimed\n", | |
dc544bc9 | 1034 | be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)), |
f94b533d TT |
1035 | C2_VERSION); |
1036 | ret = -EINVAL; | |
1037 | iounmap(mmio_regs); | |
1038 | goto bail2; | |
1039 | } | |
1040 | ||
1041 | /* Validate the adapter IVN */ | |
dc544bc9 | 1042 | if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) { |
f94b533d TT |
1043 | printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using " |
1044 | "the OpenIB device support kit. " | |
1045 | "[fw=0x%x, c2=0x%x], Adapter not claimed\n", | |
dc544bc9 RD |
1046 | be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)), |
1047 | C2_IVN); | |
f94b533d TT |
1048 | ret = -EINVAL; |
1049 | iounmap(mmio_regs); | |
1050 | goto bail2; | |
1051 | } | |
1052 | ||
1053 | /* Allocate hardware structure */ | |
1054 | c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev)); | |
1055 | if (!c2dev) { | |
1056 | printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", | |
1057 | pci_name(pcidev)); | |
1058 | ret = -ENOMEM; | |
1059 | iounmap(mmio_regs); | |
1060 | goto bail2; | |
1061 | } | |
1062 | ||
1063 | memset(c2dev, 0, sizeof(*c2dev)); | |
1064 | spin_lock_init(&c2dev->lock); | |
1065 | c2dev->pcidev = pcidev; | |
1066 | c2dev->cur_tx = 0; | |
1067 | ||
1068 | /* Get the last RX index */ | |
1069 | c2dev->cur_rx = | |
dc544bc9 | 1070 | (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) - |
f94b533d TT |
1071 | 0xffffc000) / sizeof(struct c2_rxp_desc); |
1072 | ||
1073 | /* Request an interrupt line for the driver */ | |
38515e90 | 1074 | ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev); |
f94b533d TT |
1075 | if (ret) { |
1076 | printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n", | |
1077 | pci_name(pcidev), pcidev->irq); | |
1078 | iounmap(mmio_regs); | |
1079 | goto bail3; | |
1080 | } | |
1081 | ||
1082 | /* Set driver specific data */ | |
1083 | pci_set_drvdata(pcidev, c2dev); | |
1084 | ||
1085 | /* Initialize network device */ | |
1086 | if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { | |
1087 | iounmap(mmio_regs); | |
1088 | goto bail4; | |
1089 | } | |
1090 | ||
1091 | /* Save off the actual size prior to unmapping mmio_regs */ | |
dc544bc9 | 1092 | kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE)); |
f94b533d TT |
1093 | |
1094 | /* Unmap the adapter PCI registers in BAR4 */ | |
1095 | iounmap(mmio_regs); | |
1096 | ||
1097 | /* Register network device */ | |
1098 | ret = register_netdev(netdev); | |
1099 | if (ret) { | |
1100 | printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", | |
1101 | ret); | |
1102 | goto bail5; | |
1103 | } | |
1104 | ||
1105 | /* Disable network packets */ | |
1106 | netif_stop_queue(netdev); | |
1107 | ||
1108 | /* Remap the adapter HRXDQ PA space to kernel VA space */ | |
1109 | c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET, | |
1110 | C2_RXP_HRXDQ_SIZE); | |
4b290439 | 1111 | if (!c2dev->mmio_rxp_ring) { |
f94b533d TT |
1112 | printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n"); |
1113 | ret = -EIO; | |
1114 | goto bail6; | |
1115 | } | |
1116 | ||
1117 | /* Remap the adapter HTXDQ PA space to kernel VA space */ | |
1118 | c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET, | |
1119 | C2_TXP_HTXDQ_SIZE); | |
4b290439 | 1120 | if (!c2dev->mmio_txp_ring) { |
f94b533d TT |
1121 | printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n"); |
1122 | ret = -EIO; | |
1123 | goto bail7; | |
1124 | } | |
1125 | ||
1126 | /* Save off the current RX index in the last 4 bytes of the TXP Ring */ | |
1127 | C2_SET_CUR_RX(c2dev, c2dev->cur_rx); | |
1128 | ||
1129 | /* Remap the PCI registers in adapter BAR0 to kernel VA space */ | |
1130 | c2dev->regs = ioremap_nocache(reg0_start, reg0_len); | |
4b290439 | 1131 | if (!c2dev->regs) { |
f94b533d TT |
1132 | printk(KERN_ERR PFX "Unable to remap BAR0\n"); |
1133 | ret = -EIO; | |
1134 | goto bail8; | |
1135 | } | |
1136 | ||
1137 | /* Remap the PCI registers in adapter BAR4 to kernel VA space */ | |
1138 | c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET; | |
1139 | c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, | |
1140 | kva_map_size); | |
4b290439 | 1141 | if (!c2dev->kva) { |
f94b533d TT |
1142 | printk(KERN_ERR PFX "Unable to remap BAR4\n"); |
1143 | ret = -EIO; | |
1144 | goto bail9; | |
1145 | } | |
1146 | ||
1147 | /* Print out the MAC address */ | |
1148 | c2_print_macaddr(netdev); | |
1149 | ||
1150 | ret = c2_rnic_init(c2dev); | |
1151 | if (ret) { | |
1152 | printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret); | |
1153 | goto bail10; | |
1154 | } | |
1155 | ||
2ffcab6a TT |
1156 | if (c2_register_device(c2dev)) |
1157 | goto bail10; | |
f94b533d TT |
1158 | |
1159 | return 0; | |
1160 | ||
1161 | bail10: | |
1162 | iounmap(c2dev->kva); | |
1163 | ||
1164 | bail9: | |
1165 | iounmap(c2dev->regs); | |
1166 | ||
1167 | bail8: | |
1168 | iounmap(c2dev->mmio_txp_ring); | |
1169 | ||
1170 | bail7: | |
1171 | iounmap(c2dev->mmio_rxp_ring); | |
1172 | ||
1173 | bail6: | |
1174 | unregister_netdev(netdev); | |
1175 | ||
1176 | bail5: | |
1177 | free_netdev(netdev); | |
1178 | ||
1179 | bail4: | |
1180 | free_irq(pcidev->irq, c2dev); | |
1181 | ||
1182 | bail3: | |
1183 | ib_dealloc_device(&c2dev->ibdev); | |
1184 | ||
1185 | bail2: | |
1186 | pci_release_regions(pcidev); | |
1187 | ||
1188 | bail1: | |
1189 | pci_disable_device(pcidev); | |
1190 | ||
1191 | bail0: | |
1192 | return ret; | |
1193 | } | |
1194 | ||
1195 | static void __devexit c2_remove(struct pci_dev *pcidev) | |
1196 | { | |
1197 | struct c2_dev *c2dev = pci_get_drvdata(pcidev); | |
1198 | struct net_device *netdev = c2dev->netdev; | |
1199 | ||
1200 | /* Unregister with OpenIB */ | |
1201 | c2_unregister_device(c2dev); | |
1202 | ||
1203 | /* Clean up the RNIC resources */ | |
1204 | c2_rnic_term(c2dev); | |
1205 | ||
1206 | /* Remove network device from the kernel */ | |
1207 | unregister_netdev(netdev); | |
1208 | ||
1209 | /* Free network device */ | |
1210 | free_netdev(netdev); | |
1211 | ||
1212 | /* Free the interrupt line */ | |
1213 | free_irq(pcidev->irq, c2dev); | |
1214 | ||
1215 | /* missing: Turn LEDs off here */ | |
1216 | ||
1217 | /* Unmap adapter PA space */ | |
1218 | iounmap(c2dev->kva); | |
1219 | iounmap(c2dev->regs); | |
1220 | iounmap(c2dev->mmio_txp_ring); | |
1221 | iounmap(c2dev->mmio_rxp_ring); | |
1222 | ||
1223 | /* Free the hardware structure */ | |
1224 | ib_dealloc_device(&c2dev->ibdev); | |
1225 | ||
1226 | /* Release reserved PCI I/O and memory resources */ | |
1227 | pci_release_regions(pcidev); | |
1228 | ||
1229 | /* Disable PCI device */ | |
1230 | pci_disable_device(pcidev); | |
1231 | ||
1232 | /* Clear driver specific data */ | |
1233 | pci_set_drvdata(pcidev, NULL); | |
1234 | } | |
1235 | ||
1236 | static struct pci_driver c2_pci_driver = { | |
1237 | .name = DRV_NAME, | |
1238 | .id_table = c2_pci_table, | |
1239 | .probe = c2_probe, | |
1240 | .remove = __devexit_p(c2_remove), | |
1241 | }; | |
1242 | ||
1243 | static int __init c2_init_module(void) | |
1244 | { | |
d986a274 | 1245 | return pci_register_driver(&c2_pci_driver); |
f94b533d TT |
1246 | } |
1247 | ||
1248 | static void __exit c2_exit_module(void) | |
1249 | { | |
1250 | pci_unregister_driver(&c2_pci_driver); | |
1251 | } | |
1252 | ||
1253 | module_init(c2_init_module); | |
1254 | module_exit(c2_exit_module); |