]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/irda/w83977af_ir.c
nsc-ircc: default to dongle type 9 on IBM hardware
[net-next-2.6.git] / drivers / net / irda / w83977af_ir.c
CommitLineData
1da177e4
LT
1/*********************************************************************
2 *
3 * Filename: w83977af_ir.c
4 * Version: 1.0
5 * Description: FIR driver for the Winbond W83977AF Super I/O chip
6 * Status: Experimental.
7 * Author: Paul VanderSpek
8 * Created at: Wed Nov 4 11:46:16 1998
9 * Modified at: Fri Jan 28 12:10:59 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 * Copyright (c) 1998-1999 Rebel.com
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
21 * warranty for any of this software. This material is provided "AS-IS"
22 * and at no charge.
23 *
24 * If you find bugs in this file, its very likely that the same bug
25 * will also be in pc87108.c since the implementations are quite
26 * similar.
27 *
28 * Notice that all functions that needs to access the chip in _any_
29 * way, must save BSR register on entry, and restore it on exit.
30 * It is _very_ important to follow this policy!
31 *
32 * __u8 bank;
33 *
34 * bank = inb( iobase+BSR);
35 *
36 * do_your_stuff_here();
37 *
38 * outb( bank, iobase+BSR);
39 *
40 ********************************************************************/
41
42#include <linux/module.h>
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/types.h>
45#include <linux/skbuff.h>
46#include <linux/netdevice.h>
47#include <linux/ioport.h>
48#include <linux/delay.h>
49#include <linux/slab.h>
50#include <linux/init.h>
51#include <linux/rtnetlink.h>
52#include <linux/dma-mapping.h>
53
54#include <asm/io.h>
55#include <asm/dma.h>
56#include <asm/byteorder.h>
57
58#include <net/irda/irda.h>
59#include <net/irda/wrapper.h>
60#include <net/irda/irda_device.h>
61#include "w83977af.h"
62#include "w83977af_ir.h"
63
64#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
65#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
66#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
67#endif
68#undef CONFIG_USE_INTERNAL_TIMER /* Just cannot make that timer work */
69#define CONFIG_USE_W977_PNP /* Currently needed */
70#define PIO_MAX_SPEED 115200
71
72static char *driver_name = "w83977af_ir";
73static int qos_mtt_bits = 0x07; /* 1 ms or more */
74
75#define CHIP_IO_EXTENT 8
76
77static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
78#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
79static unsigned int irq[] = { 6, 0, 0, 0 };
80#else
81static unsigned int irq[] = { 11, 0, 0, 0 };
82#endif
83static unsigned int dma[] = { 1, 0, 0, 0 };
84static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
85static unsigned int efio = W977_EFIO_BASE;
86
87static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
88
89/* Some prototypes */
90static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
91 unsigned int dma);
92static int w83977af_close(struct w83977af_ir *self);
93static int w83977af_probe(int iobase, int irq, int dma);
94static int w83977af_dma_receive(struct w83977af_ir *self);
95static int w83977af_dma_receive_complete(struct w83977af_ir *self);
96static int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev);
97static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
98static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
99static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
100static int w83977af_is_receiving(struct w83977af_ir *self);
101
102static int w83977af_net_open(struct net_device *dev);
103static int w83977af_net_close(struct net_device *dev);
104static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
105static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev);
106
107/*
108 * Function w83977af_init ()
109 *
110 * Initialize chip. Just try to find out how many chips we are dealing with
111 * and where they are
112 */
113static int __init w83977af_init(void)
114{
115 int i;
116
117 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
118
9c3bd683 119 for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) {
1da177e4
LT
120 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
121 return 0;
122 }
123 return -ENODEV;
124}
125
126/*
127 * Function w83977af_cleanup ()
128 *
129 * Close all configured chips
130 *
131 */
132static void __exit w83977af_cleanup(void)
133{
134 int i;
135
136 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
137
9c3bd683 138 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
1da177e4
LT
139 if (dev_self[i])
140 w83977af_close(dev_self[i]);
141 }
142}
143
144/*
145 * Function w83977af_open (iobase, irq)
146 *
147 * Open driver instance
148 *
149 */
150int w83977af_open(int i, unsigned int iobase, unsigned int irq,
151 unsigned int dma)
152{
153 struct net_device *dev;
154 struct w83977af_ir *self;
155 int err;
156
157 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
158
159 /* Lock the port that we need */
160 if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
161 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
162 __FUNCTION__ , iobase);
163 return -ENODEV;
164 }
165
166 if (w83977af_probe(iobase, irq, dma) == -1) {
167 err = -1;
168 goto err_out;
169 }
170 /*
171 * Allocate new instance of the driver
172 */
173 dev = alloc_irdadev(sizeof(struct w83977af_ir));
174 if (dev == NULL) {
175 printk( KERN_ERR "IrDA: Can't allocate memory for "
176 "IrDA control block!\n");
177 err = -ENOMEM;
178 goto err_out;
179 }
180
181 self = dev->priv;
182 spin_lock_init(&self->lock);
183
184
185 /* Initialize IO */
186 self->io.fir_base = iobase;
187 self->io.irq = irq;
188 self->io.fir_ext = CHIP_IO_EXTENT;
189 self->io.dma = dma;
190 self->io.fifo_size = 32;
191
192 /* Initialize QoS for this device */
193 irda_init_max_qos_capabilies(&self->qos);
194
195 /* The only value we must override it the baudrate */
196
197 /* FIXME: The HP HDLS-1100 does not support 1152000! */
198 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
199 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
200
201 /* The HP HDLS-1100 needs 1 ms according to the specs */
202 self->qos.min_turn_time.bits = qos_mtt_bits;
203 irda_qos_bits_to_value(&self->qos);
204
205 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
206 self->rx_buff.truesize = 14384;
207 self->tx_buff.truesize = 4000;
208
209 /* Allocate memory if needed */
210 self->rx_buff.head =
211 dma_alloc_coherent(NULL, self->rx_buff.truesize,
212 &self->rx_buff_dma, GFP_KERNEL);
213 if (self->rx_buff.head == NULL) {
214 err = -ENOMEM;
215 goto err_out1;
216 }
217
218 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
219
220 self->tx_buff.head =
221 dma_alloc_coherent(NULL, self->tx_buff.truesize,
222 &self->tx_buff_dma, GFP_KERNEL);
223 if (self->tx_buff.head == NULL) {
224 err = -ENOMEM;
225 goto err_out2;
226 }
227 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
228
229 self->rx_buff.in_frame = FALSE;
230 self->rx_buff.state = OUTSIDE_FRAME;
231 self->tx_buff.data = self->tx_buff.head;
232 self->rx_buff.data = self->rx_buff.head;
233 self->netdev = dev;
234
1da177e4
LT
235 /* Override the network functions we need to use */
236 dev->hard_start_xmit = w83977af_hard_xmit;
237 dev->open = w83977af_net_open;
238 dev->stop = w83977af_net_close;
239 dev->do_ioctl = w83977af_net_ioctl;
240 dev->get_stats = w83977af_net_get_stats;
241
242 err = register_netdev(dev);
243 if (err) {
244 IRDA_ERROR("%s(), register_netdevice() failed!\n", __FUNCTION__);
245 goto err_out3;
246 }
247 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
248
249 /* Need to store self somewhere */
250 dev_self[i] = self;
251
252 return 0;
253err_out3:
254 dma_free_coherent(NULL, self->tx_buff.truesize,
255 self->tx_buff.head, self->tx_buff_dma);
256err_out2:
257 dma_free_coherent(NULL, self->rx_buff.truesize,
258 self->rx_buff.head, self->rx_buff_dma);
259err_out1:
260 free_netdev(dev);
261err_out:
262 release_region(iobase, CHIP_IO_EXTENT);
263 return err;
264}
265
266/*
267 * Function w83977af_close (self)
268 *
269 * Close driver instance
270 *
271 */
272static int w83977af_close(struct w83977af_ir *self)
273{
274 int iobase;
275
276 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
277
278 iobase = self->io.fir_base;
279
280#ifdef CONFIG_USE_W977_PNP
281 /* enter PnP configuration mode */
282 w977_efm_enter(efio);
283
284 w977_select_device(W977_DEVICE_IR, efio);
285
286 /* Deactivate device */
287 w977_write_reg(0x30, 0x00, efio);
288
289 w977_efm_exit(efio);
290#endif /* CONFIG_USE_W977_PNP */
291
292 /* Remove netdevice */
293 unregister_netdev(self->netdev);
294
295 /* Release the PORT that this driver is using */
296 IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
297 __FUNCTION__ , self->io.fir_base);
298 release_region(self->io.fir_base, self->io.fir_ext);
299
300 if (self->tx_buff.head)
301 dma_free_coherent(NULL, self->tx_buff.truesize,
302 self->tx_buff.head, self->tx_buff_dma);
303
304 if (self->rx_buff.head)
305 dma_free_coherent(NULL, self->rx_buff.truesize,
306 self->rx_buff.head, self->rx_buff_dma);
307
308 free_netdev(self->netdev);
309
310 return 0;
311}
312
313int w83977af_probe( int iobase, int irq, int dma)
314{
315 int version;
316 int i;
317
318 for (i=0; i < 2; i++) {
319 IRDA_DEBUG( 0, "%s()\n", __FUNCTION__ );
320#ifdef CONFIG_USE_W977_PNP
321 /* Enter PnP configuration mode */
322 w977_efm_enter(efbase[i]);
323
324 w977_select_device(W977_DEVICE_IR, efbase[i]);
325
326 /* Configure PnP port, IRQ, and DMA channel */
327 w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
328 w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
329
330 w977_write_reg(0x70, irq, efbase[i]);
331#ifdef CONFIG_ARCH_NETWINDER
332 /* Netwinder uses 1 higher than Linux */
333 w977_write_reg(0x74, dma+1, efbase[i]);
334#else
335 w977_write_reg(0x74, dma, efbase[i]);
336#endif /*CONFIG_ARCH_NETWINDER */
337 w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
338
339 /* Set append hardware CRC, enable IR bank selection */
340 w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
341
342 /* Activate device */
343 w977_write_reg(0x30, 0x01, efbase[i]);
344
345 w977_efm_exit(efbase[i]);
346#endif /* CONFIG_USE_W977_PNP */
347 /* Disable Advanced mode */
348 switch_bank(iobase, SET2);
349 outb(iobase+2, 0x00);
350
351 /* Turn on UART (global) interrupts */
352 switch_bank(iobase, SET0);
353 outb(HCR_EN_IRQ, iobase+HCR);
354
355 /* Switch to advanced mode */
356 switch_bank(iobase, SET2);
357 outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
358
359 /* Set default IR-mode */
360 switch_bank(iobase, SET0);
361 outb(HCR_SIR, iobase+HCR);
362
363 /* Read the Advanced IR ID */
364 switch_bank(iobase, SET3);
365 version = inb(iobase+AUID);
366
367 /* Should be 0x1? */
368 if (0x10 == (version & 0xf0)) {
369 efio = efbase[i];
370
371 /* Set FIFO size to 32 */
372 switch_bank(iobase, SET2);
373 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
374
375 /* Set FIFO threshold to TX17, RX16 */
376 switch_bank(iobase, SET0);
377 outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
378 UFR_EN_FIFO,iobase+UFR);
379
380 /* Receiver frame length */
381 switch_bank(iobase, SET4);
382 outb(2048 & 0xff, iobase+6);
383 outb((2048 >> 8) & 0x1f, iobase+7);
384
385 /*
386 * Init HP HSDL-1100 transceiver.
387 *
388 * Set IRX_MSL since we have 2 * receive paths IRRX,
389 * and IRRXH. Clear IRSL0D since we want IRSL0 * to
390 * be a input pin used for IRRXH
391 *
392 * IRRX pin 37 connected to receiver
393 * IRTX pin 38 connected to transmitter
394 * FIRRX pin 39 connected to receiver (IRSL0)
395 * CIRRX pin 40 connected to pin 37
396 */
397 switch_bank(iobase, SET7);
398 outb(0x40, iobase+7);
399
400 IRDA_MESSAGE("W83977AF (IR) driver loaded. "
401 "Version: 0x%02x\n", version);
402
403 return 0;
404 } else {
405 /* Try next extented function register address */
406 IRDA_DEBUG( 0, "%s(), Wrong chip version", __FUNCTION__ );
407 }
408 }
409 return -1;
410}
411
412void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
413{
414 int ir_mode = HCR_SIR;
415 int iobase;
416 __u8 set;
417
418 iobase = self->io.fir_base;
419
420 /* Update accounting for new speed */
421 self->io.speed = speed;
422
423 /* Save current bank */
424 set = inb(iobase+SSR);
425
426 /* Disable interrupts */
427 switch_bank(iobase, SET0);
428 outb(0, iobase+ICR);
429
430 /* Select Set 2 */
431 switch_bank(iobase, SET2);
432 outb(0x00, iobase+ABHL);
433
434 switch (speed) {
435 case 9600: outb(0x0c, iobase+ABLL); break;
436 case 19200: outb(0x06, iobase+ABLL); break;
437 case 38400: outb(0x03, iobase+ABLL); break;
438 case 57600: outb(0x02, iobase+ABLL); break;
439 case 115200: outb(0x01, iobase+ABLL); break;
440 case 576000:
441 ir_mode = HCR_MIR_576;
442 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__ );
443 break;
444 case 1152000:
445 ir_mode = HCR_MIR_1152;
446 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __FUNCTION__ );
447 break;
448 case 4000000:
449 ir_mode = HCR_FIR;
450 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __FUNCTION__ );
451 break;
452 default:
453 ir_mode = HCR_FIR;
454 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __FUNCTION__ , speed);
455 break;
456 }
457
458 /* Set speed mode */
459 switch_bank(iobase, SET0);
460 outb(ir_mode, iobase+HCR);
461
462 /* set FIFO size to 32 */
463 switch_bank(iobase, SET2);
464 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
465
466 /* set FIFO threshold to TX17, RX16 */
467 switch_bank(iobase, SET0);
468 outb(0x00, iobase+UFR); /* Reset */
469 outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
470 outb(0xa7, iobase+UFR);
471
472 netif_wake_queue(self->netdev);
473
474 /* Enable some interrupts so we can receive frames */
475 switch_bank(iobase, SET0);
476 if (speed > PIO_MAX_SPEED) {
477 outb(ICR_EFSFI, iobase+ICR);
478 w83977af_dma_receive(self);
479 } else
480 outb(ICR_ERBRI, iobase+ICR);
481
482 /* Restore SSR */
483 outb(set, iobase+SSR);
484}
485
486/*
487 * Function w83977af_hard_xmit (skb, dev)
488 *
489 * Sets up a DMA transfer to send the current frame.
490 *
491 */
492int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
493{
494 struct w83977af_ir *self;
495 __s32 speed;
496 int iobase;
497 __u8 set;
498 int mtt;
499
500 self = (struct w83977af_ir *) dev->priv;
501
502 iobase = self->io.fir_base;
503
504 IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __FUNCTION__ , jiffies,
505 (int) skb->len);
506
507 /* Lock transmit buffer */
508 netif_stop_queue(dev);
509
510 /* Check if we need to change the speed */
511 speed = irda_get_next_speed(skb);
512 if ((speed != self->io.speed) && (speed != -1)) {
513 /* Check for empty frame */
514 if (!skb->len) {
515 w83977af_change_speed(self, speed);
516 dev->trans_start = jiffies;
517 dev_kfree_skb(skb);
518 return 0;
519 } else
520 self->new_speed = speed;
521 }
522
523 /* Save current set */
524 set = inb(iobase+SSR);
525
526 /* Decide if we should use PIO or DMA transfer */
527 if (self->io.speed > PIO_MAX_SPEED) {
528 self->tx_buff.data = self->tx_buff.head;
d626f62b 529 skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
1da177e4
LT
530 self->tx_buff.len = skb->len;
531
532 mtt = irda_get_mtt(skb);
533#ifdef CONFIG_USE_INTERNAL_TIMER
534 if (mtt > 50) {
535 /* Adjust for timer resolution */
536 mtt /= 1000+1;
537
538 /* Setup timer */
539 switch_bank(iobase, SET4);
540 outb(mtt & 0xff, iobase+TMRL);
541 outb((mtt >> 8) & 0x0f, iobase+TMRH);
542
543 /* Start timer */
544 outb(IR_MSL_EN_TMR, iobase+IR_MSL);
545 self->io.direction = IO_XMIT;
546
547 /* Enable timer interrupt */
548 switch_bank(iobase, SET0);
549 outb(ICR_ETMRI, iobase+ICR);
550 } else {
551#endif
552 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __FUNCTION__ , jiffies, mtt);
553 if (mtt)
554 udelay(mtt);
555
556 /* Enable DMA interrupt */
557 switch_bank(iobase, SET0);
558 outb(ICR_EDMAI, iobase+ICR);
559 w83977af_dma_write(self, iobase);
560#ifdef CONFIG_USE_INTERNAL_TIMER
561 }
562#endif
563 } else {
564 self->tx_buff.data = self->tx_buff.head;
565 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
566 self->tx_buff.truesize);
567
568 /* Add interrupt on tx low level (will fire immediately) */
569 switch_bank(iobase, SET0);
570 outb(ICR_ETXTHI, iobase+ICR);
571 }
572 dev->trans_start = jiffies;
573 dev_kfree_skb(skb);
574
575 /* Restore set register */
576 outb(set, iobase+SSR);
577
578 return 0;
579}
580
581/*
582 * Function w83977af_dma_write (self, iobase)
583 *
584 * Send frame using DMA
585 *
586 */
587static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
588{
589 __u8 set;
590#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
591 unsigned long flags;
592 __u8 hcr;
593#endif
594 IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__ , self->tx_buff.len);
595
596 /* Save current set */
597 set = inb(iobase+SSR);
598
599 /* Disable DMA */
600 switch_bank(iobase, SET0);
601 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
602
603 /* Choose transmit DMA channel */
604 switch_bank(iobase, SET2);
605 outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
606#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
607 spin_lock_irqsave(&self->lock, flags);
608
609 disable_dma(self->io.dma);
610 clear_dma_ff(self->io.dma);
611 set_dma_mode(self->io.dma, DMA_MODE_READ);
612 set_dma_addr(self->io.dma, self->tx_buff_dma);
613 set_dma_count(self->io.dma, self->tx_buff.len);
614#else
615 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
616 DMA_MODE_WRITE);
617#endif
618 self->io.direction = IO_XMIT;
619
620 /* Enable DMA */
621 switch_bank(iobase, SET0);
622#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
623 hcr = inb(iobase+HCR);
624 outb(hcr | HCR_EN_DMA, iobase+HCR);
625 enable_dma(self->io.dma);
626 spin_unlock_irqrestore(&self->lock, flags);
627#else
628 outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
629#endif
630
631 /* Restore set register */
632 outb(set, iobase+SSR);
633}
634
635/*
636 * Function w83977af_pio_write (iobase, buf, len, fifo_size)
637 *
638 *
639 *
640 */
641static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
642{
643 int actual = 0;
644 __u8 set;
645
646 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
647
648 /* Save current bank */
649 set = inb(iobase+SSR);
650
651 switch_bank(iobase, SET0);
652 if (!(inb_p(iobase+USR) & USR_TSRE)) {
653 IRDA_DEBUG(4,
654 "%s(), warning, FIFO not empty yet!\n", __FUNCTION__ );
655
656 fifo_size -= 17;
657 IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
658 __FUNCTION__ , fifo_size);
659 }
660
661 /* Fill FIFO with current frame */
662 while ((fifo_size-- > 0) && (actual < len)) {
663 /* Transmit next byte */
664 outb(buf[actual++], iobase+TBR);
665 }
666
667 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
668 __FUNCTION__ , fifo_size, actual, len);
669
670 /* Restore bank */
671 outb(set, iobase+SSR);
672
673 return actual;
674}
675
676/*
677 * Function w83977af_dma_xmit_complete (self)
678 *
679 * The transfer of a frame in finished. So do the necessary things
680 *
681 *
682 */
683static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
684{
685 int iobase;
686 __u8 set;
687
688 IRDA_DEBUG(4, "%s(%ld)\n", __FUNCTION__ , jiffies);
689
690 IRDA_ASSERT(self != NULL, return;);
691
692 iobase = self->io.fir_base;
693
694 /* Save current set */
695 set = inb(iobase+SSR);
696
697 /* Disable DMA */
698 switch_bank(iobase, SET0);
699 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
700
701 /* Check for underrrun! */
702 if (inb(iobase+AUDR) & AUDR_UNDR) {
703 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __FUNCTION__ );
704
705 self->stats.tx_errors++;
706 self->stats.tx_fifo_errors++;
707
708 /* Clear bit, by writing 1 to it */
709 outb(AUDR_UNDR, iobase+AUDR);
710 } else
711 self->stats.tx_packets++;
712
713
714 if (self->new_speed) {
715 w83977af_change_speed(self, self->new_speed);
716 self->new_speed = 0;
717 }
718
719 /* Unlock tx_buff and request another frame */
720 /* Tell the network layer, that we want more frames */
721 netif_wake_queue(self->netdev);
722
723 /* Restore set */
724 outb(set, iobase+SSR);
725}
726
727/*
728 * Function w83977af_dma_receive (self)
729 *
730 * Get ready for receiving a frame. The device will initiate a DMA
731 * if it starts to receive a frame.
732 *
733 */
734int w83977af_dma_receive(struct w83977af_ir *self)
735{
736 int iobase;
737 __u8 set;
738#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
739 unsigned long flags;
740 __u8 hcr;
741#endif
742 IRDA_ASSERT(self != NULL, return -1;);
743
744 IRDA_DEBUG(4, "%s\n", __FUNCTION__ );
745
746 iobase= self->io.fir_base;
747
748 /* Save current set */
749 set = inb(iobase+SSR);
750
751 /* Disable DMA */
752 switch_bank(iobase, SET0);
753 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
754
755 /* Choose DMA Rx, DMA Fairness, and Advanced mode */
756 switch_bank(iobase, SET2);
757 outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
758 iobase+ADCR1);
759
760 self->io.direction = IO_RECV;
761 self->rx_buff.data = self->rx_buff.head;
762
763#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
764 spin_lock_irqsave(&self->lock, flags);
765
766 disable_dma(self->io.dma);
767 clear_dma_ff(self->io.dma);
768 set_dma_mode(self->io.dma, DMA_MODE_READ);
769 set_dma_addr(self->io.dma, self->rx_buff_dma);
770 set_dma_count(self->io.dma, self->rx_buff.truesize);
771#else
772 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
773 DMA_MODE_READ);
774#endif
775 /*
776 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
777 * important that we don't reset the Tx FIFO since it might not
778 * be finished transmitting yet
779 */
780 switch_bank(iobase, SET0);
781 outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
782 self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
783
784 /* Enable DMA */
785 switch_bank(iobase, SET0);
786#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
787 hcr = inb(iobase+HCR);
788 outb(hcr | HCR_EN_DMA, iobase+HCR);
789 enable_dma(self->io.dma);
790 spin_unlock_irqrestore(&self->lock, flags);
791#else
792 outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
793#endif
794 /* Restore set */
795 outb(set, iobase+SSR);
796
797 return 0;
798}
799
800/*
801 * Function w83977af_receive_complete (self)
802 *
803 * Finished with receiving a frame
804 *
805 */
806int w83977af_dma_receive_complete(struct w83977af_ir *self)
807{
808 struct sk_buff *skb;
809 struct st_fifo *st_fifo;
810 int len;
811 int iobase;
812 __u8 set;
813 __u8 status;
814
815 IRDA_DEBUG(4, "%s\n", __FUNCTION__ );
816
817 st_fifo = &self->st_fifo;
818
819 iobase = self->io.fir_base;
820
821 /* Save current set */
822 set = inb(iobase+SSR);
823
824 iobase = self->io.fir_base;
825
826 /* Read status FIFO */
827 switch_bank(iobase, SET5);
828 while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
829 st_fifo->entries[st_fifo->tail].status = status;
830
831 st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
832 st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
833
834 st_fifo->tail++;
835 st_fifo->len++;
836 }
837
838 while (st_fifo->len) {
839 /* Get first entry */
840 status = st_fifo->entries[st_fifo->head].status;
841 len = st_fifo->entries[st_fifo->head].len;
842 st_fifo->head++;
843 st_fifo->len--;
844
845 /* Check for errors */
846 if (status & FS_FO_ERR_MSK) {
847 if (status & FS_FO_LST_FR) {
848 /* Add number of lost frames to stats */
849 self->stats.rx_errors += len;
850 } else {
851 /* Skip frame */
852 self->stats.rx_errors++;
853
854 self->rx_buff.data += len;
855
856 if (status & FS_FO_MX_LEX)
857 self->stats.rx_length_errors++;
858
859 if (status & FS_FO_PHY_ERR)
860 self->stats.rx_frame_errors++;
861
862 if (status & FS_FO_CRC_ERR)
863 self->stats.rx_crc_errors++;
864 }
865 /* The errors below can be reported in both cases */
866 if (status & FS_FO_RX_OV)
867 self->stats.rx_fifo_errors++;
868
869 if (status & FS_FO_FSF_OV)
870 self->stats.rx_fifo_errors++;
871
872 } else {
873 /* Check if we have transferred all data to memory */
874 switch_bank(iobase, SET0);
875 if (inb(iobase+USR) & USR_RDR) {
876#ifdef CONFIG_USE_INTERNAL_TIMER
877 /* Put this entry back in fifo */
878 st_fifo->head--;
879 st_fifo->len++;
880 st_fifo->entries[st_fifo->head].status = status;
881 st_fifo->entries[st_fifo->head].len = len;
882
883 /* Restore set register */
884 outb(set, iobase+SSR);
885
886 return FALSE; /* I'll be back! */
887#else
888 udelay(80); /* Should be enough!? */
889#endif
890 }
891
892 skb = dev_alloc_skb(len+1);
893 if (skb == NULL) {
894 printk(KERN_INFO
895 "%s(), memory squeeze, dropping frame.\n", __FUNCTION__);
896 /* Restore set register */
897 outb(set, iobase+SSR);
898
899 return FALSE;
900 }
901
902 /* Align to 20 bytes */
903 skb_reserve(skb, 1);
904
905 /* Copy frame without CRC */
906 if (self->io.speed < 4000000) {
907 skb_put(skb, len-2);
27d7ff46
ACM
908 skb_copy_to_linear_data(skb,
909 self->rx_buff.data,
910 len - 2);
1da177e4
LT
911 } else {
912 skb_put(skb, len-4);
27d7ff46
ACM
913 skb_copy_to_linear_data(skb,
914 self->rx_buff.data,
915 len - 4);
1da177e4
LT
916 }
917
918 /* Move to next frame */
919 self->rx_buff.data += len;
920 self->stats.rx_packets++;
921
922 skb->dev = self->netdev;
459a98ed 923 skb_reset_mac_header(skb);
1da177e4
LT
924 skb->protocol = htons(ETH_P_IRDA);
925 netif_rx(skb);
926 self->netdev->last_rx = jiffies;
927 }
928 }
929 /* Restore set register */
930 outb(set, iobase+SSR);
931
932 return TRUE;
933}
934
935/*
936 * Function pc87108_pio_receive (self)
937 *
938 * Receive all data in receiver FIFO
939 *
940 */
941static void w83977af_pio_receive(struct w83977af_ir *self)
942{
943 __u8 byte = 0x00;
944 int iobase;
945
946 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
947
948 IRDA_ASSERT(self != NULL, return;);
949
950 iobase = self->io.fir_base;
951
952 /* Receive all characters in Rx FIFO */
953 do {
954 byte = inb(iobase+RBR);
955 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
956 byte);
957 } while (inb(iobase+USR) & USR_RDR); /* Data available */
958}
959
960/*
961 * Function w83977af_sir_interrupt (self, eir)
962 *
963 * Handle SIR interrupt
964 *
965 */
966static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
967{
968 int actual;
969 __u8 new_icr = 0;
970 __u8 set;
971 int iobase;
972
973 IRDA_DEBUG(4, "%s(), isr=%#x\n", __FUNCTION__ , isr);
974
975 iobase = self->io.fir_base;
976 /* Transmit FIFO low on data */
977 if (isr & ISR_TXTH_I) {
978 /* Write data left in transmit buffer */
979 actual = w83977af_pio_write(self->io.fir_base,
980 self->tx_buff.data,
981 self->tx_buff.len,
982 self->io.fifo_size);
983
984 self->tx_buff.data += actual;
985 self->tx_buff.len -= actual;
986
987 self->io.direction = IO_XMIT;
988
989 /* Check if finished */
990 if (self->tx_buff.len > 0) {
991 new_icr |= ICR_ETXTHI;
992 } else {
993 set = inb(iobase+SSR);
994 switch_bank(iobase, SET0);
995 outb(AUDR_SFEND, iobase+AUDR);
996 outb(set, iobase+SSR);
997
998 self->stats.tx_packets++;
999
1000 /* Feed me more packets */
1001 netif_wake_queue(self->netdev);
1002 new_icr |= ICR_ETBREI;
1003 }
1004 }
1005 /* Check if transmission has completed */
1006 if (isr & ISR_TXEMP_I) {
1007 /* Check if we need to change the speed? */
1008 if (self->new_speed) {
1009 IRDA_DEBUG(2,
1010 "%s(), Changing speed!\n", __FUNCTION__ );
1011 w83977af_change_speed(self, self->new_speed);
1012 self->new_speed = 0;
1013 }
1014
1015 /* Turn around and get ready to receive some data */
1016 self->io.direction = IO_RECV;
1017 new_icr |= ICR_ERBRI;
1018 }
1019
1020 /* Rx FIFO threshold or timeout */
1021 if (isr & ISR_RXTH_I) {
1022 w83977af_pio_receive(self);
1023
1024 /* Keep receiving */
1025 new_icr |= ICR_ERBRI;
1026 }
1027 return new_icr;
1028}
1029
1030/*
1031 * Function pc87108_fir_interrupt (self, eir)
1032 *
1033 * Handle MIR/FIR interrupt
1034 *
1035 */
1036static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
1037{
1038 __u8 new_icr = 0;
1039 __u8 set;
1040 int iobase;
1041
1042 iobase = self->io.fir_base;
1043 set = inb(iobase+SSR);
1044
1045 /* End of frame detected in FIFO */
1046 if (isr & (ISR_FEND_I|ISR_FSF_I)) {
1047 if (w83977af_dma_receive_complete(self)) {
1048
1049 /* Wait for next status FIFO interrupt */
1050 new_icr |= ICR_EFSFI;
1051 } else {
1052 /* DMA not finished yet */
1053
1054 /* Set timer value, resolution 1 ms */
1055 switch_bank(iobase, SET4);
1056 outb(0x01, iobase+TMRL); /* 1 ms */
1057 outb(0x00, iobase+TMRH);
1058
1059 /* Start timer */
1060 outb(IR_MSL_EN_TMR, iobase+IR_MSL);
1061
1062 new_icr |= ICR_ETMRI;
1063 }
1064 }
1065 /* Timer finished */
1066 if (isr & ISR_TMR_I) {
1067 /* Disable timer */
1068 switch_bank(iobase, SET4);
1069 outb(0, iobase+IR_MSL);
1070
1071 /* Clear timer event */
1072 /* switch_bank(iobase, SET0); */
1073/* outb(ASCR_CTE, iobase+ASCR); */
1074
1075 /* Check if this is a TX timer interrupt */
1076 if (self->io.direction == IO_XMIT) {
1077 w83977af_dma_write(self, iobase);
1078
1079 new_icr |= ICR_EDMAI;
1080 } else {
1081 /* Check if DMA has now finished */
1082 w83977af_dma_receive_complete(self);
1083
1084 new_icr |= ICR_EFSFI;
1085 }
1086 }
1087 /* Finished with DMA */
1088 if (isr & ISR_DMA_I) {
1089 w83977af_dma_xmit_complete(self);
1090
1091 /* Check if there are more frames to be transmitted */
1092 /* if (irda_device_txqueue_empty(self)) { */
1093
1094 /* Prepare for receive
1095 *
1096 * ** Netwinder Tx DMA likes that we do this anyway **
1097 */
1098 w83977af_dma_receive(self);
1099 new_icr = ICR_EFSFI;
1100 /* } */
1101 }
1102
1103 /* Restore set */
1104 outb(set, iobase+SSR);
1105
1106 return new_icr;
1107}
1108
1109/*
1110 * Function w83977af_interrupt (irq, dev_id, regs)
1111 *
1112 * An interrupt from the chip has arrived. Time to do some work
1113 *
1114 */
7d12e780 1115static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
1da177e4 1116{
c31f28e7 1117 struct net_device *dev = dev_id;
1da177e4
LT
1118 struct w83977af_ir *self;
1119 __u8 set, icr, isr;
1120 int iobase;
1121
c31f28e7 1122 self = dev->priv;
1da177e4
LT
1123
1124 iobase = self->io.fir_base;
1125
1126 /* Save current bank */
1127 set = inb(iobase+SSR);
1128 switch_bank(iobase, SET0);
1129
1130 icr = inb(iobase+ICR);
1131 isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
1132
1133 outb(0, iobase+ICR); /* Disable interrupts */
1134
1135 if (isr) {
1136 /* Dispatch interrupt handler for the current speed */
1137 if (self->io.speed > PIO_MAX_SPEED )
1138 icr = w83977af_fir_interrupt(self, isr);
1139 else
1140 icr = w83977af_sir_interrupt(self, isr);
1141 }
1142
1143 outb(icr, iobase+ICR); /* Restore (new) interrupts */
1144 outb(set, iobase+SSR); /* Restore bank register */
1145 return IRQ_RETVAL(isr);
1146}
1147
1148/*
1149 * Function w83977af_is_receiving (self)
1150 *
1151 * Return TRUE is we are currently receiving a frame
1152 *
1153 */
1154static int w83977af_is_receiving(struct w83977af_ir *self)
1155{
1156 int status = FALSE;
1157 int iobase;
1158 __u8 set;
1159
1160 IRDA_ASSERT(self != NULL, return FALSE;);
1161
1162 if (self->io.speed > 115200) {
1163 iobase = self->io.fir_base;
1164
1165 /* Check if rx FIFO is not empty */
1166 set = inb(iobase+SSR);
1167 switch_bank(iobase, SET2);
1168 if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
1169 /* We are receiving something */
1170 status = TRUE;
1171 }
1172 outb(set, iobase+SSR);
1173 } else
1174 status = (self->rx_buff.state != OUTSIDE_FRAME);
1175
1176 return status;
1177}
1178
1179/*
1180 * Function w83977af_net_open (dev)
1181 *
1182 * Start the device
1183 *
1184 */
1185static int w83977af_net_open(struct net_device *dev)
1186{
1187 struct w83977af_ir *self;
1188 int iobase;
1189 char hwname[32];
1190 __u8 set;
1191
1192 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
1193
1194 IRDA_ASSERT(dev != NULL, return -1;);
1195 self = (struct w83977af_ir *) dev->priv;
1196
1197 IRDA_ASSERT(self != NULL, return 0;);
1198
1199 iobase = self->io.fir_base;
1200
1201 if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
1202 (void *) dev)) {
1203 return -EAGAIN;
1204 }
1205 /*
1206 * Always allocate the DMA channel after the IRQ,
1207 * and clean up on failure.
1208 */
1209 if (request_dma(self->io.dma, dev->name)) {
1210 free_irq(self->io.irq, self);
1211 return -EAGAIN;
1212 }
1213
1214 /* Save current set */
1215 set = inb(iobase+SSR);
1216
1217 /* Enable some interrupts so we can receive frames again */
1218 switch_bank(iobase, SET0);
1219 if (self->io.speed > 115200) {
1220 outb(ICR_EFSFI, iobase+ICR);
1221 w83977af_dma_receive(self);
1222 } else
1223 outb(ICR_ERBRI, iobase+ICR);
1224
1225 /* Restore bank register */
1226 outb(set, iobase+SSR);
1227
1228 /* Ready to play! */
1229 netif_start_queue(dev);
1230
1231 /* Give self a hardware name */
1232 sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
1233
1234 /*
1235 * Open new IrLAP layer instance, now that everything should be
1236 * initialized properly
1237 */
1238 self->irlap = irlap_open(dev, &self->qos, hwname);
1239
1240 return 0;
1241}
1242
1243/*
1244 * Function w83977af_net_close (dev)
1245 *
1246 * Stop the device
1247 *
1248 */
1249static int w83977af_net_close(struct net_device *dev)
1250{
1251 struct w83977af_ir *self;
1252 int iobase;
1253 __u8 set;
1254
1255 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
1256
1257 IRDA_ASSERT(dev != NULL, return -1;);
1258
1259 self = (struct w83977af_ir *) dev->priv;
1260
1261 IRDA_ASSERT(self != NULL, return 0;);
1262
1263 iobase = self->io.fir_base;
1264
1265 /* Stop device */
1266 netif_stop_queue(dev);
1267
1268 /* Stop and remove instance of IrLAP */
1269 if (self->irlap)
1270 irlap_close(self->irlap);
1271 self->irlap = NULL;
1272
1273 disable_dma(self->io.dma);
1274
1275 /* Save current set */
1276 set = inb(iobase+SSR);
1277
1278 /* Disable interrupts */
1279 switch_bank(iobase, SET0);
1280 outb(0, iobase+ICR);
1281
1282 free_irq(self->io.irq, dev);
1283 free_dma(self->io.dma);
1284
1285 /* Restore bank register */
1286 outb(set, iobase+SSR);
1287
1288 return 0;
1289}
1290
1291/*
1292 * Function w83977af_net_ioctl (dev, rq, cmd)
1293 *
1294 * Process IOCTL commands for this device
1295 *
1296 */
1297static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1298{
1299 struct if_irda_req *irq = (struct if_irda_req *) rq;
1300 struct w83977af_ir *self;
1301 unsigned long flags;
1302 int ret = 0;
1303
1304 IRDA_ASSERT(dev != NULL, return -1;);
1305
1306 self = dev->priv;
1307
1308 IRDA_ASSERT(self != NULL, return -1;);
1309
1310 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__ , dev->name, cmd);
1311
1312 spin_lock_irqsave(&self->lock, flags);
1313
1314 switch (cmd) {
1315 case SIOCSBANDWIDTH: /* Set bandwidth */
1316 if (!capable(CAP_NET_ADMIN)) {
1317 ret = -EPERM;
1318 goto out;
1319 }
1320 w83977af_change_speed(self, irq->ifr_baudrate);
1321 break;
1322 case SIOCSMEDIABUSY: /* Set media busy */
1323 if (!capable(CAP_NET_ADMIN)) {
1324 ret = -EPERM;
1325 goto out;
1326 }
1327 irda_device_set_media_busy(self->netdev, TRUE);
1328 break;
1329 case SIOCGRECEIVING: /* Check if we are receiving right now */
1330 irq->ifr_receiving = w83977af_is_receiving(self);
1331 break;
1332 default:
1333 ret = -EOPNOTSUPP;
1334 }
1335out:
1336 spin_unlock_irqrestore(&self->lock, flags);
1337 return ret;
1338}
1339
1340static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev)
1341{
1342 struct w83977af_ir *self = (struct w83977af_ir *) dev->priv;
1343
1344 return &self->stats;
1345}
1346
1347MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1348MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1349MODULE_LICENSE("GPL");
1350
1351
1352module_param(qos_mtt_bits, int, 0);
1353MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
1354module_param_array(io, int, NULL, 0);
1355MODULE_PARM_DESC(io, "Base I/O addresses");
1356module_param_array(irq, int, NULL, 0);
1357MODULE_PARM_DESC(irq, "IRQ lines");
1358
1359/*
1360 * Function init_module (void)
1361 *
1362 *
1363 *
1364 */
1365module_init(w83977af_init);
1366
1367/*
1368 * Function cleanup_module (void)
1369 *
1370 *
1371 *
1372 */
1373module_exit(w83977af_cleanup);