]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * sgiseeq.c: Seeq8003 ethernet driver for SGI machines. | |
3 | * | |
4 | * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) | |
5 | */ | |
6 | ||
7 | #undef DEBUG | |
8 | ||
9 | #include <linux/kernel.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/string.h> | |
17 | #include <linux/delay.h> | |
18 | #include <linux/netdevice.h> | |
19 | #include <linux/platform_device.h> | |
20 | #include <linux/etherdevice.h> | |
21 | #include <linux/skbuff.h> | |
22 | ||
23 | #include <asm/sgi/hpc3.h> | |
24 | #include <asm/sgi/ip22.h> | |
25 | #include <asm/sgi/seeq.h> | |
26 | ||
27 | #include "sgiseeq.h" | |
28 | ||
29 | static char *sgiseeqstr = "SGI Seeq8003"; | |
30 | ||
31 | /* | |
32 | * If you want speed, you do something silly, it always has worked for me. So, | |
33 | * with that in mind, I've decided to make this driver look completely like a | |
34 | * stupid Lance from a driver architecture perspective. Only difference is that | |
35 | * here our "ring buffer" looks and acts like a real Lance one does but is | |
36 | * layed out like how the HPC DMA and the Seeq want it to. You'd be surprised | |
37 | * how a stupid idea like this can pay off in performance, not to mention | |
38 | * making this driver 2,000 times easier to write. ;-) | |
39 | */ | |
40 | ||
41 | /* Tune these if we tend to run out often etc. */ | |
42 | #define SEEQ_RX_BUFFERS 16 | |
43 | #define SEEQ_TX_BUFFERS 16 | |
44 | ||
45 | #define PKT_BUF_SZ 1584 | |
46 | ||
47 | #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1)) | |
48 | #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1)) | |
49 | #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1)) | |
50 | #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1)) | |
51 | ||
52 | #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \ | |
53 | sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ | |
54 | sp->tx_old - sp->tx_new - 1) | |
55 | ||
56 | #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \ | |
57 | (dma_addr_t)((unsigned long)(v) - \ | |
58 | (unsigned long)((sp)->rx_desc))) | |
59 | ||
60 | /* Copy frames shorter than rx_copybreak, otherwise pass on up in | |
61 | * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). | |
62 | */ | |
63 | static int rx_copybreak = 100; | |
64 | ||
65 | #define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *)) | |
66 | ||
67 | struct sgiseeq_rx_desc { | |
68 | volatile struct hpc_dma_desc rdma; | |
69 | u8 padding[PAD_SIZE]; | |
70 | struct sk_buff *skb; | |
71 | }; | |
72 | ||
73 | struct sgiseeq_tx_desc { | |
74 | volatile struct hpc_dma_desc tdma; | |
75 | u8 padding[PAD_SIZE]; | |
76 | struct sk_buff *skb; | |
77 | }; | |
78 | ||
79 | /* | |
80 | * Warning: This structure is layed out in a certain way because HPC dma | |
81 | * descriptors must be 8-byte aligned. So don't touch this without | |
82 | * some care. | |
83 | */ | |
84 | struct sgiseeq_init_block { /* Note the name ;-) */ | |
85 | struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS]; | |
86 | struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS]; | |
87 | }; | |
88 | ||
89 | struct sgiseeq_private { | |
90 | struct sgiseeq_init_block *srings; | |
91 | dma_addr_t srings_dma; | |
92 | ||
93 | /* Ptrs to the descriptors in uncached space. */ | |
94 | struct sgiseeq_rx_desc *rx_desc; | |
95 | struct sgiseeq_tx_desc *tx_desc; | |
96 | ||
97 | char *name; | |
98 | struct hpc3_ethregs *hregs; | |
99 | struct sgiseeq_regs *sregs; | |
100 | ||
101 | /* Ring entry counters. */ | |
102 | unsigned int rx_new, tx_new; | |
103 | unsigned int rx_old, tx_old; | |
104 | ||
105 | int is_edlc; | |
106 | unsigned char control; | |
107 | unsigned char mode; | |
108 | ||
109 | spinlock_t tx_lock; | |
110 | }; | |
111 | ||
112 | static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr) | |
113 | { | |
114 | dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), | |
115 | DMA_FROM_DEVICE); | |
116 | } | |
117 | ||
118 | static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) | |
119 | { | |
120 | dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), | |
121 | DMA_TO_DEVICE); | |
122 | } | |
123 | ||
124 | static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs) | |
125 | { | |
126 | hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ; | |
127 | udelay(20); | |
128 | hregs->reset = 0; | |
129 | } | |
130 | ||
131 | static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs, | |
132 | struct sgiseeq_regs *sregs) | |
133 | { | |
134 | hregs->rx_ctrl = hregs->tx_ctrl = 0; | |
135 | hpc3_eth_reset(hregs); | |
136 | } | |
137 | ||
138 | #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \ | |
139 | SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC) | |
140 | ||
141 | static inline void seeq_go(struct sgiseeq_private *sp, | |
142 | struct hpc3_ethregs *hregs, | |
143 | struct sgiseeq_regs *sregs) | |
144 | { | |
145 | sregs->rstat = sp->mode | RSTAT_GO_BITS; | |
146 | hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE; | |
147 | } | |
148 | ||
149 | static inline void __sgiseeq_set_mac_address(struct net_device *dev) | |
150 | { | |
151 | struct sgiseeq_private *sp = netdev_priv(dev); | |
152 | struct sgiseeq_regs *sregs = sp->sregs; | |
153 | int i; | |
154 | ||
155 | sregs->tstat = SEEQ_TCMD_RB0; | |
156 | for (i = 0; i < 6; i++) | |
157 | sregs->rw.eth_addr[i] = dev->dev_addr[i]; | |
158 | } | |
159 | ||
160 | static int sgiseeq_set_mac_address(struct net_device *dev, void *addr) | |
161 | { | |
162 | struct sgiseeq_private *sp = netdev_priv(dev); | |
163 | struct sockaddr *sa = addr; | |
164 | ||
165 | memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); | |
166 | ||
167 | spin_lock_irq(&sp->tx_lock); | |
168 | __sgiseeq_set_mac_address(dev); | |
169 | spin_unlock_irq(&sp->tx_lock); | |
170 | ||
171 | return 0; | |
172 | } | |
173 | ||
174 | #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD) | |
175 | #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE) | |
176 | #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT)) | |
177 | ||
178 | static int seeq_init_ring(struct net_device *dev) | |
179 | { | |
180 | struct sgiseeq_private *sp = netdev_priv(dev); | |
181 | int i; | |
182 | ||
183 | netif_stop_queue(dev); | |
184 | sp->rx_new = sp->tx_new = 0; | |
185 | sp->rx_old = sp->tx_old = 0; | |
186 | ||
187 | __sgiseeq_set_mac_address(dev); | |
188 | ||
189 | /* Setup tx ring. */ | |
190 | for(i = 0; i < SEEQ_TX_BUFFERS; i++) { | |
191 | sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; | |
192 | dma_sync_desc_dev(dev, &sp->tx_desc[i]); | |
193 | } | |
194 | ||
195 | /* And now the rx ring. */ | |
196 | for (i = 0; i < SEEQ_RX_BUFFERS; i++) { | |
197 | if (!sp->rx_desc[i].skb) { | |
198 | dma_addr_t dma_addr; | |
199 | struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ); | |
200 | ||
201 | if (skb == NULL) | |
202 | return -ENOMEM; | |
203 | skb_reserve(skb, 2); | |
204 | dma_addr = dma_map_single(dev->dev.parent, | |
205 | skb->data - 2, | |
206 | PKT_BUF_SZ, DMA_FROM_DEVICE); | |
207 | sp->rx_desc[i].skb = skb; | |
208 | sp->rx_desc[i].rdma.pbuf = dma_addr; | |
209 | } | |
210 | sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; | |
211 | dma_sync_desc_dev(dev, &sp->rx_desc[i]); | |
212 | } | |
213 | sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; | |
214 | dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]); | |
215 | return 0; | |
216 | } | |
217 | ||
218 | static void seeq_purge_ring(struct net_device *dev) | |
219 | { | |
220 | struct sgiseeq_private *sp = netdev_priv(dev); | |
221 | int i; | |
222 | ||
223 | /* clear tx ring. */ | |
224 | for (i = 0; i < SEEQ_TX_BUFFERS; i++) { | |
225 | if (sp->tx_desc[i].skb) { | |
226 | dev_kfree_skb(sp->tx_desc[i].skb); | |
227 | sp->tx_desc[i].skb = NULL; | |
228 | } | |
229 | } | |
230 | ||
231 | /* And now the rx ring. */ | |
232 | for (i = 0; i < SEEQ_RX_BUFFERS; i++) { | |
233 | if (sp->rx_desc[i].skb) { | |
234 | dev_kfree_skb(sp->rx_desc[i].skb); | |
235 | sp->rx_desc[i].skb = NULL; | |
236 | } | |
237 | } | |
238 | } | |
239 | ||
240 | #ifdef DEBUG | |
241 | static struct sgiseeq_private *gpriv; | |
242 | static struct net_device *gdev; | |
243 | ||
244 | static void sgiseeq_dump_rings(void) | |
245 | { | |
246 | static int once; | |
247 | struct sgiseeq_rx_desc *r = gpriv->rx_desc; | |
248 | struct sgiseeq_tx_desc *t = gpriv->tx_desc; | |
249 | struct hpc3_ethregs *hregs = gpriv->hregs; | |
250 | int i; | |
251 | ||
252 | if (once) | |
253 | return; | |
254 | once++; | |
255 | printk("RING DUMP:\n"); | |
256 | for (i = 0; i < SEEQ_RX_BUFFERS; i++) { | |
257 | printk("RX [%d]: @(%p) [%08x,%08x,%08x] ", | |
258 | i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, | |
259 | r[i].rdma.pnext); | |
260 | i += 1; | |
261 | printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", | |
262 | i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, | |
263 | r[i].rdma.pnext); | |
264 | } | |
265 | for (i = 0; i < SEEQ_TX_BUFFERS; i++) { | |
266 | printk("TX [%d]: @(%p) [%08x,%08x,%08x] ", | |
267 | i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, | |
268 | t[i].tdma.pnext); | |
269 | i += 1; | |
270 | printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", | |
271 | i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, | |
272 | t[i].tdma.pnext); | |
273 | } | |
274 | printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n", | |
275 | gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old); | |
276 | printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n", | |
277 | hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl); | |
278 | printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n", | |
279 | hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl); | |
280 | } | |
281 | #endif | |
282 | ||
283 | #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF) | |
284 | #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2) | |
285 | ||
286 | static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, | |
287 | struct sgiseeq_regs *sregs) | |
288 | { | |
289 | struct hpc3_ethregs *hregs = sp->hregs; | |
290 | int err; | |
291 | ||
292 | reset_hpc3_and_seeq(hregs, sregs); | |
293 | err = seeq_init_ring(dev); | |
294 | if (err) | |
295 | return err; | |
296 | ||
297 | /* Setup to field the proper interrupt types. */ | |
298 | if (sp->is_edlc) { | |
299 | sregs->tstat = TSTAT_INIT_EDLC; | |
300 | sregs->rw.wregs.control = sp->control; | |
301 | sregs->rw.wregs.frame_gap = 0; | |
302 | } else { | |
303 | sregs->tstat = TSTAT_INIT_SEEQ; | |
304 | } | |
305 | ||
306 | hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc); | |
307 | hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc); | |
308 | ||
309 | seeq_go(sp, hregs, sregs); | |
310 | return 0; | |
311 | } | |
312 | ||
313 | static void record_rx_errors(struct net_device *dev, unsigned char status) | |
314 | { | |
315 | if (status & SEEQ_RSTAT_OVERF || | |
316 | status & SEEQ_RSTAT_SFRAME) | |
317 | dev->stats.rx_over_errors++; | |
318 | if (status & SEEQ_RSTAT_CERROR) | |
319 | dev->stats.rx_crc_errors++; | |
320 | if (status & SEEQ_RSTAT_DERROR) | |
321 | dev->stats.rx_frame_errors++; | |
322 | if (status & SEEQ_RSTAT_REOF) | |
323 | dev->stats.rx_errors++; | |
324 | } | |
325 | ||
326 | static inline void rx_maybe_restart(struct sgiseeq_private *sp, | |
327 | struct hpc3_ethregs *hregs, | |
328 | struct sgiseeq_regs *sregs) | |
329 | { | |
330 | if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) { | |
331 | hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new); | |
332 | seeq_go(sp, hregs, sregs); | |
333 | } | |
334 | } | |
335 | ||
336 | static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, | |
337 | struct hpc3_ethregs *hregs, | |
338 | struct sgiseeq_regs *sregs) | |
339 | { | |
340 | struct sgiseeq_rx_desc *rd; | |
341 | struct sk_buff *skb = NULL; | |
342 | struct sk_buff *newskb; | |
343 | unsigned char pkt_status; | |
344 | int len = 0; | |
345 | unsigned int orig_end = PREV_RX(sp->rx_new); | |
346 | ||
347 | /* Service every received packet. */ | |
348 | rd = &sp->rx_desc[sp->rx_new]; | |
349 | dma_sync_desc_cpu(dev, rd); | |
350 | while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { | |
351 | len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; | |
352 | dma_unmap_single(dev->dev.parent, rd->rdma.pbuf, | |
353 | PKT_BUF_SZ, DMA_FROM_DEVICE); | |
354 | pkt_status = rd->skb->data[len]; | |
355 | if (pkt_status & SEEQ_RSTAT_FIG) { | |
356 | /* Packet is OK. */ | |
357 | /* We don't want to receive our own packets */ | |
358 | if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) { | |
359 | if (len > rx_copybreak) { | |
360 | skb = rd->skb; | |
361 | newskb = netdev_alloc_skb(dev, PKT_BUF_SZ); | |
362 | if (!newskb) { | |
363 | newskb = skb; | |
364 | skb = NULL; | |
365 | goto memory_squeeze; | |
366 | } | |
367 | skb_reserve(newskb, 2); | |
368 | } else { | |
369 | skb = netdev_alloc_skb_ip_align(dev, len); | |
370 | if (skb) | |
371 | skb_copy_to_linear_data(skb, rd->skb->data, len); | |
372 | ||
373 | newskb = rd->skb; | |
374 | } | |
375 | memory_squeeze: | |
376 | if (skb) { | |
377 | skb_put(skb, len); | |
378 | skb->protocol = eth_type_trans(skb, dev); | |
379 | netif_rx(skb); | |
380 | dev->stats.rx_packets++; | |
381 | dev->stats.rx_bytes += len; | |
382 | } else { | |
383 | printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", | |
384 | dev->name); | |
385 | dev->stats.rx_dropped++; | |
386 | } | |
387 | } else { | |
388 | /* Silently drop my own packets */ | |
389 | newskb = rd->skb; | |
390 | } | |
391 | } else { | |
392 | record_rx_errors(dev, pkt_status); | |
393 | newskb = rd->skb; | |
394 | } | |
395 | rd->skb = newskb; | |
396 | rd->rdma.pbuf = dma_map_single(dev->dev.parent, | |
397 | newskb->data - 2, | |
398 | PKT_BUF_SZ, DMA_FROM_DEVICE); | |
399 | ||
400 | /* Return the entry to the ring pool. */ | |
401 | rd->rdma.cntinfo = RCNTINFO_INIT; | |
402 | sp->rx_new = NEXT_RX(sp->rx_new); | |
403 | dma_sync_desc_dev(dev, rd); | |
404 | rd = &sp->rx_desc[sp->rx_new]; | |
405 | dma_sync_desc_cpu(dev, rd); | |
406 | } | |
407 | dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]); | |
408 | sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); | |
409 | dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]); | |
410 | dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); | |
411 | sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; | |
412 | dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); | |
413 | rx_maybe_restart(sp, hregs, sregs); | |
414 | } | |
415 | ||
416 | static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp, | |
417 | struct sgiseeq_regs *sregs) | |
418 | { | |
419 | if (sp->is_edlc) { | |
420 | sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT); | |
421 | sregs->rw.wregs.control = sp->control; | |
422 | } | |
423 | } | |
424 | ||
425 | static inline void kick_tx(struct net_device *dev, | |
426 | struct sgiseeq_private *sp, | |
427 | struct hpc3_ethregs *hregs) | |
428 | { | |
429 | struct sgiseeq_tx_desc *td; | |
430 | int i = sp->tx_old; | |
431 | ||
432 | /* If the HPC aint doin nothin, and there are more packets | |
433 | * with ETXD cleared and XIU set we must make very certain | |
434 | * that we restart the HPC else we risk locking up the | |
435 | * adapter. The following code is only safe iff the HPCDMA | |
436 | * is not active! | |
437 | */ | |
438 | td = &sp->tx_desc[i]; | |
439 | dma_sync_desc_cpu(dev, td); | |
440 | while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) == | |
441 | (HPCDMA_XIU | HPCDMA_ETXD)) { | |
442 | i = NEXT_TX(i); | |
443 | td = &sp->tx_desc[i]; | |
444 | dma_sync_desc_cpu(dev, td); | |
445 | } | |
446 | if (td->tdma.cntinfo & HPCDMA_XIU) { | |
447 | hregs->tx_ndptr = VIRT_TO_DMA(sp, td); | |
448 | hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; | |
449 | } | |
450 | } | |
451 | ||
452 | static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp, | |
453 | struct hpc3_ethregs *hregs, | |
454 | struct sgiseeq_regs *sregs) | |
455 | { | |
456 | struct sgiseeq_tx_desc *td; | |
457 | unsigned long status = hregs->tx_ctrl; | |
458 | int j; | |
459 | ||
460 | tx_maybe_reset_collisions(sp, sregs); | |
461 | ||
462 | if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) { | |
463 | /* Oops, HPC detected some sort of error. */ | |
464 | if (status & SEEQ_TSTAT_R16) | |
465 | dev->stats.tx_aborted_errors++; | |
466 | if (status & SEEQ_TSTAT_UFLOW) | |
467 | dev->stats.tx_fifo_errors++; | |
468 | if (status & SEEQ_TSTAT_LCLS) | |
469 | dev->stats.collisions++; | |
470 | } | |
471 | ||
472 | /* Ack 'em... */ | |
473 | for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { | |
474 | td = &sp->tx_desc[j]; | |
475 | ||
476 | dma_sync_desc_cpu(dev, td); | |
477 | if (!(td->tdma.cntinfo & (HPCDMA_XIU))) | |
478 | break; | |
479 | if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) { | |
480 | if (!(status & HPC3_ETXCTRL_ACTIVE)) { | |
481 | hregs->tx_ndptr = VIRT_TO_DMA(sp, td); | |
482 | hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; | |
483 | } | |
484 | break; | |
485 | } | |
486 | dev->stats.tx_packets++; | |
487 | sp->tx_old = NEXT_TX(sp->tx_old); | |
488 | td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); | |
489 | td->tdma.cntinfo |= HPCDMA_EOX; | |
490 | if (td->skb) { | |
491 | dev_kfree_skb_any(td->skb); | |
492 | td->skb = NULL; | |
493 | } | |
494 | dma_sync_desc_dev(dev, td); | |
495 | } | |
496 | } | |
497 | ||
498 | static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id) | |
499 | { | |
500 | struct net_device *dev = (struct net_device *) dev_id; | |
501 | struct sgiseeq_private *sp = netdev_priv(dev); | |
502 | struct hpc3_ethregs *hregs = sp->hregs; | |
503 | struct sgiseeq_regs *sregs = sp->sregs; | |
504 | ||
505 | spin_lock(&sp->tx_lock); | |
506 | ||
507 | /* Ack the IRQ and set software state. */ | |
508 | hregs->reset = HPC3_ERST_CLRIRQ; | |
509 | ||
510 | /* Always check for received packets. */ | |
511 | sgiseeq_rx(dev, sp, hregs, sregs); | |
512 | ||
513 | /* Only check for tx acks if we have something queued. */ | |
514 | if (sp->tx_old != sp->tx_new) | |
515 | sgiseeq_tx(dev, sp, hregs, sregs); | |
516 | ||
517 | if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) { | |
518 | netif_wake_queue(dev); | |
519 | } | |
520 | spin_unlock(&sp->tx_lock); | |
521 | ||
522 | return IRQ_HANDLED; | |
523 | } | |
524 | ||
525 | static int sgiseeq_open(struct net_device *dev) | |
526 | { | |
527 | struct sgiseeq_private *sp = netdev_priv(dev); | |
528 | struct sgiseeq_regs *sregs = sp->sregs; | |
529 | unsigned int irq = dev->irq; | |
530 | int err; | |
531 | ||
532 | if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) { | |
533 | printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq); | |
534 | err = -EAGAIN; | |
535 | } | |
536 | ||
537 | err = init_seeq(dev, sp, sregs); | |
538 | if (err) | |
539 | goto out_free_irq; | |
540 | ||
541 | netif_start_queue(dev); | |
542 | ||
543 | return 0; | |
544 | ||
545 | out_free_irq: | |
546 | free_irq(irq, dev); | |
547 | ||
548 | return err; | |
549 | } | |
550 | ||
551 | static int sgiseeq_close(struct net_device *dev) | |
552 | { | |
553 | struct sgiseeq_private *sp = netdev_priv(dev); | |
554 | struct sgiseeq_regs *sregs = sp->sregs; | |
555 | unsigned int irq = dev->irq; | |
556 | ||
557 | netif_stop_queue(dev); | |
558 | ||
559 | /* Shutdown the Seeq. */ | |
560 | reset_hpc3_and_seeq(sp->hregs, sregs); | |
561 | free_irq(irq, dev); | |
562 | seeq_purge_ring(dev); | |
563 | ||
564 | return 0; | |
565 | } | |
566 | ||
567 | static inline int sgiseeq_reset(struct net_device *dev) | |
568 | { | |
569 | struct sgiseeq_private *sp = netdev_priv(dev); | |
570 | struct sgiseeq_regs *sregs = sp->sregs; | |
571 | int err; | |
572 | ||
573 | err = init_seeq(dev, sp, sregs); | |
574 | if (err) | |
575 | return err; | |
576 | ||
577 | dev->trans_start = jiffies; /* prevent tx timeout */ | |
578 | netif_wake_queue(dev); | |
579 | ||
580 | return 0; | |
581 | } | |
582 | ||
583 | static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
584 | { | |
585 | struct sgiseeq_private *sp = netdev_priv(dev); | |
586 | struct hpc3_ethregs *hregs = sp->hregs; | |
587 | unsigned long flags; | |
588 | struct sgiseeq_tx_desc *td; | |
589 | int len, entry; | |
590 | ||
591 | spin_lock_irqsave(&sp->tx_lock, flags); | |
592 | ||
593 | /* Setup... */ | |
594 | len = skb->len; | |
595 | if (len < ETH_ZLEN) { | |
596 | if (skb_padto(skb, ETH_ZLEN)) { | |
597 | spin_unlock_irqrestore(&sp->tx_lock, flags); | |
598 | return NETDEV_TX_OK; | |
599 | } | |
600 | len = ETH_ZLEN; | |
601 | } | |
602 | ||
603 | dev->stats.tx_bytes += len; | |
604 | entry = sp->tx_new; | |
605 | td = &sp->tx_desc[entry]; | |
606 | dma_sync_desc_cpu(dev, td); | |
607 | ||
608 | /* Create entry. There are so many races with adding a new | |
609 | * descriptor to the chain: | |
610 | * 1) Assume that the HPC is off processing a DMA chain while | |
611 | * we are changing all of the following. | |
612 | * 2) Do no allow the HPC to look at a new descriptor until | |
613 | * we have completely set up it's state. This means, do | |
614 | * not clear HPCDMA_EOX in the current last descritptor | |
615 | * until the one we are adding looks consistent and could | |
616 | * be processes right now. | |
617 | * 3) The tx interrupt code must notice when we've added a new | |
618 | * entry and the HPC got to the end of the chain before we | |
619 | * added this new entry and restarted it. | |
620 | */ | |
621 | td->skb = skb; | |
622 | td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data, | |
623 | len, DMA_TO_DEVICE); | |
624 | td->tdma.cntinfo = (len & HPCDMA_BCNT) | | |
625 | HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX; | |
626 | dma_sync_desc_dev(dev, td); | |
627 | if (sp->tx_old != sp->tx_new) { | |
628 | struct sgiseeq_tx_desc *backend; | |
629 | ||
630 | backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; | |
631 | dma_sync_desc_cpu(dev, backend); | |
632 | backend->tdma.cntinfo &= ~HPCDMA_EOX; | |
633 | dma_sync_desc_dev(dev, backend); | |
634 | } | |
635 | sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ | |
636 | ||
637 | /* Maybe kick the HPC back into motion. */ | |
638 | if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE)) | |
639 | kick_tx(dev, sp, hregs); | |
640 | ||
641 | if (!TX_BUFFS_AVAIL(sp)) | |
642 | netif_stop_queue(dev); | |
643 | spin_unlock_irqrestore(&sp->tx_lock, flags); | |
644 | ||
645 | return NETDEV_TX_OK; | |
646 | } | |
647 | ||
648 | static void timeout(struct net_device *dev) | |
649 | { | |
650 | printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name); | |
651 | sgiseeq_reset(dev); | |
652 | ||
653 | dev->trans_start = jiffies; /* prevent tx timeout */ | |
654 | netif_wake_queue(dev); | |
655 | } | |
656 | ||
657 | static void sgiseeq_set_multicast(struct net_device *dev) | |
658 | { | |
659 | struct sgiseeq_private *sp = netdev_priv(dev); | |
660 | unsigned char oldmode = sp->mode; | |
661 | ||
662 | if(dev->flags & IFF_PROMISC) | |
663 | sp->mode = SEEQ_RCMD_RANY; | |
664 | else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) | |
665 | sp->mode = SEEQ_RCMD_RBMCAST; | |
666 | else | |
667 | sp->mode = SEEQ_RCMD_RBCAST; | |
668 | ||
669 | /* XXX I know this sucks, but is there a better way to reprogram | |
670 | * XXX the receiver? At least, this shouldn't happen too often. | |
671 | */ | |
672 | ||
673 | if (oldmode != sp->mode) | |
674 | sgiseeq_reset(dev); | |
675 | } | |
676 | ||
677 | static inline void setup_tx_ring(struct net_device *dev, | |
678 | struct sgiseeq_tx_desc *buf, | |
679 | int nbufs) | |
680 | { | |
681 | struct sgiseeq_private *sp = netdev_priv(dev); | |
682 | int i = 0; | |
683 | ||
684 | while (i < (nbufs - 1)) { | |
685 | buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); | |
686 | buf[i].tdma.pbuf = 0; | |
687 | dma_sync_desc_dev(dev, &buf[i]); | |
688 | i++; | |
689 | } | |
690 | buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf); | |
691 | dma_sync_desc_dev(dev, &buf[i]); | |
692 | } | |
693 | ||
694 | static inline void setup_rx_ring(struct net_device *dev, | |
695 | struct sgiseeq_rx_desc *buf, | |
696 | int nbufs) | |
697 | { | |
698 | struct sgiseeq_private *sp = netdev_priv(dev); | |
699 | int i = 0; | |
700 | ||
701 | while (i < (nbufs - 1)) { | |
702 | buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); | |
703 | buf[i].rdma.pbuf = 0; | |
704 | dma_sync_desc_dev(dev, &buf[i]); | |
705 | i++; | |
706 | } | |
707 | buf[i].rdma.pbuf = 0; | |
708 | buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf); | |
709 | dma_sync_desc_dev(dev, &buf[i]); | |
710 | } | |
711 | ||
712 | static const struct net_device_ops sgiseeq_netdev_ops = { | |
713 | .ndo_open = sgiseeq_open, | |
714 | .ndo_stop = sgiseeq_close, | |
715 | .ndo_start_xmit = sgiseeq_start_xmit, | |
716 | .ndo_tx_timeout = timeout, | |
717 | .ndo_set_multicast_list = sgiseeq_set_multicast, | |
718 | .ndo_set_mac_address = sgiseeq_set_mac_address, | |
719 | .ndo_change_mtu = eth_change_mtu, | |
720 | .ndo_validate_addr = eth_validate_addr, | |
721 | }; | |
722 | ||
723 | static int __devinit sgiseeq_probe(struct platform_device *pdev) | |
724 | { | |
725 | struct sgiseeq_platform_data *pd = pdev->dev.platform_data; | |
726 | struct hpc3_regs *hpcregs = pd->hpc; | |
727 | struct sgiseeq_init_block *sr; | |
728 | unsigned int irq = pd->irq; | |
729 | struct sgiseeq_private *sp; | |
730 | struct net_device *dev; | |
731 | int err; | |
732 | ||
733 | dev = alloc_etherdev(sizeof (struct sgiseeq_private)); | |
734 | if (!dev) { | |
735 | printk(KERN_ERR "Sgiseeq: Etherdev alloc failed, aborting.\n"); | |
736 | err = -ENOMEM; | |
737 | goto err_out; | |
738 | } | |
739 | ||
740 | platform_set_drvdata(pdev, dev); | |
741 | sp = netdev_priv(dev); | |
742 | ||
743 | /* Make private data page aligned */ | |
744 | sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings), | |
745 | &sp->srings_dma, GFP_KERNEL); | |
746 | if (!sr) { | |
747 | printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); | |
748 | err = -ENOMEM; | |
749 | goto err_out_free_dev; | |
750 | } | |
751 | sp->srings = sr; | |
752 | sp->rx_desc = sp->srings->rxvector; | |
753 | sp->tx_desc = sp->srings->txvector; | |
754 | ||
755 | /* A couple calculations now, saves many cycles later. */ | |
756 | setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS); | |
757 | setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS); | |
758 | ||
759 | memcpy(dev->dev_addr, pd->mac, ETH_ALEN); | |
760 | ||
761 | #ifdef DEBUG | |
762 | gpriv = sp; | |
763 | gdev = dev; | |
764 | #endif | |
765 | sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0]; | |
766 | sp->hregs = &hpcregs->ethregs; | |
767 | sp->name = sgiseeqstr; | |
768 | sp->mode = SEEQ_RCMD_RBCAST; | |
769 | ||
770 | /* Setup PIO and DMA transfer timing */ | |
771 | sp->hregs->pconfig = 0x161; | |
772 | sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | | |
773 | HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; | |
774 | ||
775 | /* Setup PIO and DMA transfer timing */ | |
776 | sp->hregs->pconfig = 0x161; | |
777 | sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | | |
778 | HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; | |
779 | ||
780 | /* Reset the chip. */ | |
781 | hpc3_eth_reset(sp->hregs); | |
782 | ||
783 | sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff); | |
784 | if (sp->is_edlc) | |
785 | sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT | | |
786 | SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT | | |
787 | SEEQ_CTRL_ENCARR; | |
788 | ||
789 | dev->netdev_ops = &sgiseeq_netdev_ops; | |
790 | dev->watchdog_timeo = (200 * HZ) / 1000; | |
791 | dev->irq = irq; | |
792 | ||
793 | if (register_netdev(dev)) { | |
794 | printk(KERN_ERR "Sgiseeq: Cannot register net device, " | |
795 | "aborting.\n"); | |
796 | err = -ENODEV; | |
797 | goto err_out_free_page; | |
798 | } | |
799 | ||
800 | printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); | |
801 | ||
802 | return 0; | |
803 | ||
804 | err_out_free_page: | |
805 | free_page((unsigned long) sp->srings); | |
806 | err_out_free_dev: | |
807 | free_netdev(dev); | |
808 | ||
809 | err_out: | |
810 | return err; | |
811 | } | |
812 | ||
813 | static int __exit sgiseeq_remove(struct platform_device *pdev) | |
814 | { | |
815 | struct net_device *dev = platform_get_drvdata(pdev); | |
816 | struct sgiseeq_private *sp = netdev_priv(dev); | |
817 | ||
818 | unregister_netdev(dev); | |
819 | dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, | |
820 | sp->srings_dma); | |
821 | free_netdev(dev); | |
822 | platform_set_drvdata(pdev, NULL); | |
823 | ||
824 | return 0; | |
825 | } | |
826 | ||
827 | static struct platform_driver sgiseeq_driver = { | |
828 | .probe = sgiseeq_probe, | |
829 | .remove = __exit_p(sgiseeq_remove), | |
830 | .driver = { | |
831 | .name = "sgiseeq", | |
832 | .owner = THIS_MODULE, | |
833 | } | |
834 | }; | |
835 | ||
836 | static int __init sgiseeq_module_init(void) | |
837 | { | |
838 | if (platform_driver_register(&sgiseeq_driver)) { | |
839 | printk(KERN_ERR "Driver registration failed\n"); | |
840 | return -ENODEV; | |
841 | } | |
842 | ||
843 | return 0; | |
844 | } | |
845 | ||
846 | static void __exit sgiseeq_module_exit(void) | |
847 | { | |
848 | platform_driver_unregister(&sgiseeq_driver); | |
849 | } | |
850 | ||
851 | module_init(sgiseeq_module_init); | |
852 | module_exit(sgiseeq_module_exit); | |
853 | ||
854 | MODULE_DESCRIPTION("SGI Seeq 8003 driver"); | |
855 | MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>"); | |
856 | MODULE_LICENSE("GPL"); | |
857 | MODULE_ALIAS("platform:sgiseeq"); |