]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/irda/w83977af_ir.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[net-next-2.6.git] / drivers / net / irda / w83977af_ir.c
CommitLineData
1da177e4
LT
1/*********************************************************************
2 *
3 * Filename: w83977af_ir.c
4 * Version: 1.0
5 * Description: FIR driver for the Winbond W83977AF Super I/O chip
6 * Status: Experimental.
7 * Author: Paul VanderSpek
8 * Created at: Wed Nov 4 11:46:16 1998
9 * Modified at: Fri Jan 28 12:10:59 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 * Copyright (c) 1998-1999 Rebel.com
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
21 * warranty for any of this software. This material is provided "AS-IS"
22 * and at no charge.
23 *
24 * If you find bugs in this file, its very likely that the same bug
25 * will also be in pc87108.c since the implementations are quite
26 * similar.
27 *
28 * Notice that all functions that needs to access the chip in _any_
29 * way, must save BSR register on entry, and restore it on exit.
30 * It is _very_ important to follow this policy!
31 *
32 * __u8 bank;
33 *
34 * bank = inb( iobase+BSR);
35 *
36 * do_your_stuff_here();
37 *
38 * outb( bank, iobase+BSR);
39 *
40 ********************************************************************/
41
42#include <linux/module.h>
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/types.h>
45#include <linux/skbuff.h>
46#include <linux/netdevice.h>
47#include <linux/ioport.h>
48#include <linux/delay.h>
1da177e4
LT
49#include <linux/init.h>
50#include <linux/rtnetlink.h>
51#include <linux/dma-mapping.h>
5a0e3ad6 52#include <linux/gfp.h>
1da177e4
LT
53
54#include <asm/io.h>
55#include <asm/dma.h>
56#include <asm/byteorder.h>
57
58#include <net/irda/irda.h>
59#include <net/irda/wrapper.h>
60#include <net/irda/irda_device.h>
61#include "w83977af.h"
62#include "w83977af_ir.h"
63
64#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
65#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
66#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
67#endif
1da177e4
LT
68#define CONFIG_USE_W977_PNP /* Currently needed */
69#define PIO_MAX_SPEED 115200
70
71static char *driver_name = "w83977af_ir";
72static int qos_mtt_bits = 0x07; /* 1 ms or more */
73
74#define CHIP_IO_EXTENT 8
75
76static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
77#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
78static unsigned int irq[] = { 6, 0, 0, 0 };
79#else
80static unsigned int irq[] = { 11, 0, 0, 0 };
81#endif
82static unsigned int dma[] = { 1, 0, 0, 0 };
83static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
84static unsigned int efio = W977_EFIO_BASE;
85
86static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
87
88/* Some prototypes */
89static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
90 unsigned int dma);
91static int w83977af_close(struct w83977af_ir *self);
92static int w83977af_probe(int iobase, int irq, int dma);
93static int w83977af_dma_receive(struct w83977af_ir *self);
94static int w83977af_dma_receive_complete(struct w83977af_ir *self);
6518bbb8
SH
95static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
96 struct net_device *dev);
1da177e4
LT
97static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
98static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
99static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
100static int w83977af_is_receiving(struct w83977af_ir *self);
101
102static int w83977af_net_open(struct net_device *dev);
103static int w83977af_net_close(struct net_device *dev);
104static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
1da177e4
LT
105
106/*
107 * Function w83977af_init ()
108 *
109 * Initialize chip. Just try to find out how many chips we are dealing with
110 * and where they are
111 */
112static int __init w83977af_init(void)
113{
114 int i;
115
a97a6f10 116 IRDA_DEBUG(0, "%s()\n", __func__ );
1da177e4 117
5d5ceb8b 118 for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
1da177e4
LT
119 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
120 return 0;
121 }
122 return -ENODEV;
123}
124
125/*
126 * Function w83977af_cleanup ()
127 *
128 * Close all configured chips
129 *
130 */
131static void __exit w83977af_cleanup(void)
132{
133 int i;
134
a97a6f10 135 IRDA_DEBUG(4, "%s()\n", __func__ );
1da177e4 136
9c3bd683 137 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
1da177e4
LT
138 if (dev_self[i])
139 w83977af_close(dev_self[i]);
140 }
141}
142
4113a1a6
SH
143static const struct net_device_ops w83977_netdev_ops = {
144 .ndo_open = w83977af_net_open,
145 .ndo_stop = w83977af_net_close,
146 .ndo_start_xmit = w83977af_hard_xmit,
147 .ndo_do_ioctl = w83977af_net_ioctl,
148};
149
1da177e4
LT
150/*
151 * Function w83977af_open (iobase, irq)
152 *
153 * Open driver instance
154 *
155 */
0e49e645
HE
156static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
157 unsigned int dma)
1da177e4
LT
158{
159 struct net_device *dev;
160 struct w83977af_ir *self;
161 int err;
162
a97a6f10 163 IRDA_DEBUG(0, "%s()\n", __func__ );
1da177e4
LT
164
165 /* Lock the port that we need */
166 if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
167 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
a97a6f10 168 __func__ , iobase);
1da177e4
LT
169 return -ENODEV;
170 }
171
172 if (w83977af_probe(iobase, irq, dma) == -1) {
173 err = -1;
174 goto err_out;
175 }
176 /*
177 * Allocate new instance of the driver
178 */
179 dev = alloc_irdadev(sizeof(struct w83977af_ir));
180 if (dev == NULL) {
181 printk( KERN_ERR "IrDA: Can't allocate memory for "
182 "IrDA control block!\n");
183 err = -ENOMEM;
184 goto err_out;
185 }
186
4cf1653a 187 self = netdev_priv(dev);
1da177e4
LT
188 spin_lock_init(&self->lock);
189
190
191 /* Initialize IO */
192 self->io.fir_base = iobase;
193 self->io.irq = irq;
194 self->io.fir_ext = CHIP_IO_EXTENT;
195 self->io.dma = dma;
196 self->io.fifo_size = 32;
197
198 /* Initialize QoS for this device */
199 irda_init_max_qos_capabilies(&self->qos);
200
201 /* The only value we must override it the baudrate */
202
203 /* FIXME: The HP HDLS-1100 does not support 1152000! */
204 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
205 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
206
207 /* The HP HDLS-1100 needs 1 ms according to the specs */
208 self->qos.min_turn_time.bits = qos_mtt_bits;
209 irda_qos_bits_to_value(&self->qos);
210
211 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
212 self->rx_buff.truesize = 14384;
213 self->tx_buff.truesize = 4000;
214
215 /* Allocate memory if needed */
216 self->rx_buff.head =
217 dma_alloc_coherent(NULL, self->rx_buff.truesize,
218 &self->rx_buff_dma, GFP_KERNEL);
219 if (self->rx_buff.head == NULL) {
220 err = -ENOMEM;
221 goto err_out1;
222 }
223
224 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
225
226 self->tx_buff.head =
227 dma_alloc_coherent(NULL, self->tx_buff.truesize,
228 &self->tx_buff_dma, GFP_KERNEL);
229 if (self->tx_buff.head == NULL) {
230 err = -ENOMEM;
231 goto err_out2;
232 }
233 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
234
235 self->rx_buff.in_frame = FALSE;
236 self->rx_buff.state = OUTSIDE_FRAME;
237 self->tx_buff.data = self->tx_buff.head;
238 self->rx_buff.data = self->rx_buff.head;
239 self->netdev = dev;
240
4113a1a6 241 dev->netdev_ops = &w83977_netdev_ops;
1da177e4
LT
242
243 err = register_netdev(dev);
244 if (err) {
a97a6f10 245 IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
1da177e4
LT
246 goto err_out3;
247 }
248 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
249
250 /* Need to store self somewhere */
251 dev_self[i] = self;
252
253 return 0;
254err_out3:
255 dma_free_coherent(NULL, self->tx_buff.truesize,
256 self->tx_buff.head, self->tx_buff_dma);
257err_out2:
258 dma_free_coherent(NULL, self->rx_buff.truesize,
259 self->rx_buff.head, self->rx_buff_dma);
260err_out1:
261 free_netdev(dev);
262err_out:
263 release_region(iobase, CHIP_IO_EXTENT);
264 return err;
265}
266
267/*
268 * Function w83977af_close (self)
269 *
270 * Close driver instance
271 *
272 */
273static int w83977af_close(struct w83977af_ir *self)
274{
275 int iobase;
276
a97a6f10 277 IRDA_DEBUG(0, "%s()\n", __func__ );
1da177e4
LT
278
279 iobase = self->io.fir_base;
280
281#ifdef CONFIG_USE_W977_PNP
282 /* enter PnP configuration mode */
283 w977_efm_enter(efio);
284
285 w977_select_device(W977_DEVICE_IR, efio);
286
287 /* Deactivate device */
288 w977_write_reg(0x30, 0x00, efio);
289
290 w977_efm_exit(efio);
291#endif /* CONFIG_USE_W977_PNP */
292
293 /* Remove netdevice */
294 unregister_netdev(self->netdev);
295
296 /* Release the PORT that this driver is using */
297 IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
a97a6f10 298 __func__ , self->io.fir_base);
1da177e4
LT
299 release_region(self->io.fir_base, self->io.fir_ext);
300
301 if (self->tx_buff.head)
302 dma_free_coherent(NULL, self->tx_buff.truesize,
303 self->tx_buff.head, self->tx_buff_dma);
304
305 if (self->rx_buff.head)
306 dma_free_coherent(NULL, self->rx_buff.truesize,
307 self->rx_buff.head, self->rx_buff_dma);
308
309 free_netdev(self->netdev);
310
311 return 0;
312}
313
0e49e645 314static int w83977af_probe(int iobase, int irq, int dma)
1da177e4
LT
315{
316 int version;
317 int i;
318
319 for (i=0; i < 2; i++) {
a97a6f10 320 IRDA_DEBUG( 0, "%s()\n", __func__ );
1da177e4
LT
321#ifdef CONFIG_USE_W977_PNP
322 /* Enter PnP configuration mode */
323 w977_efm_enter(efbase[i]);
324
325 w977_select_device(W977_DEVICE_IR, efbase[i]);
326
327 /* Configure PnP port, IRQ, and DMA channel */
328 w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
329 w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
330
331 w977_write_reg(0x70, irq, efbase[i]);
332#ifdef CONFIG_ARCH_NETWINDER
333 /* Netwinder uses 1 higher than Linux */
334 w977_write_reg(0x74, dma+1, efbase[i]);
335#else
336 w977_write_reg(0x74, dma, efbase[i]);
337#endif /*CONFIG_ARCH_NETWINDER */
338 w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
339
340 /* Set append hardware CRC, enable IR bank selection */
341 w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
342
343 /* Activate device */
344 w977_write_reg(0x30, 0x01, efbase[i]);
345
346 w977_efm_exit(efbase[i]);
347#endif /* CONFIG_USE_W977_PNP */
348 /* Disable Advanced mode */
349 switch_bank(iobase, SET2);
350 outb(iobase+2, 0x00);
351
352 /* Turn on UART (global) interrupts */
353 switch_bank(iobase, SET0);
354 outb(HCR_EN_IRQ, iobase+HCR);
355
356 /* Switch to advanced mode */
357 switch_bank(iobase, SET2);
358 outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
359
360 /* Set default IR-mode */
361 switch_bank(iobase, SET0);
362 outb(HCR_SIR, iobase+HCR);
363
364 /* Read the Advanced IR ID */
365 switch_bank(iobase, SET3);
366 version = inb(iobase+AUID);
367
368 /* Should be 0x1? */
369 if (0x10 == (version & 0xf0)) {
370 efio = efbase[i];
371
372 /* Set FIFO size to 32 */
373 switch_bank(iobase, SET2);
374 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
375
376 /* Set FIFO threshold to TX17, RX16 */
377 switch_bank(iobase, SET0);
378 outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
379 UFR_EN_FIFO,iobase+UFR);
380
381 /* Receiver frame length */
382 switch_bank(iobase, SET4);
383 outb(2048 & 0xff, iobase+6);
384 outb((2048 >> 8) & 0x1f, iobase+7);
385
386 /*
387 * Init HP HSDL-1100 transceiver.
388 *
389 * Set IRX_MSL since we have 2 * receive paths IRRX,
390 * and IRRXH. Clear IRSL0D since we want IRSL0 * to
391 * be a input pin used for IRRXH
392 *
393 * IRRX pin 37 connected to receiver
394 * IRTX pin 38 connected to transmitter
395 * FIRRX pin 39 connected to receiver (IRSL0)
396 * CIRRX pin 40 connected to pin 37
397 */
398 switch_bank(iobase, SET7);
399 outb(0x40, iobase+7);
400
401 IRDA_MESSAGE("W83977AF (IR) driver loaded. "
402 "Version: 0x%02x\n", version);
403
404 return 0;
405 } else {
406 /* Try next extented function register address */
a97a6f10 407 IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
1da177e4
LT
408 }
409 }
410 return -1;
411}
412
0e49e645 413static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
1da177e4
LT
414{
415 int ir_mode = HCR_SIR;
416 int iobase;
417 __u8 set;
418
419 iobase = self->io.fir_base;
420
421 /* Update accounting for new speed */
422 self->io.speed = speed;
423
424 /* Save current bank */
425 set = inb(iobase+SSR);
426
427 /* Disable interrupts */
428 switch_bank(iobase, SET0);
429 outb(0, iobase+ICR);
430
431 /* Select Set 2 */
432 switch_bank(iobase, SET2);
433 outb(0x00, iobase+ABHL);
434
435 switch (speed) {
436 case 9600: outb(0x0c, iobase+ABLL); break;
437 case 19200: outb(0x06, iobase+ABLL); break;
438 case 38400: outb(0x03, iobase+ABLL); break;
439 case 57600: outb(0x02, iobase+ABLL); break;
440 case 115200: outb(0x01, iobase+ABLL); break;
441 case 576000:
442 ir_mode = HCR_MIR_576;
a97a6f10 443 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
1da177e4
LT
444 break;
445 case 1152000:
446 ir_mode = HCR_MIR_1152;
a97a6f10 447 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
1da177e4
LT
448 break;
449 case 4000000:
450 ir_mode = HCR_FIR;
a97a6f10 451 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
1da177e4
LT
452 break;
453 default:
454 ir_mode = HCR_FIR;
a97a6f10 455 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
1da177e4
LT
456 break;
457 }
458
459 /* Set speed mode */
460 switch_bank(iobase, SET0);
461 outb(ir_mode, iobase+HCR);
462
463 /* set FIFO size to 32 */
464 switch_bank(iobase, SET2);
465 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
466
467 /* set FIFO threshold to TX17, RX16 */
468 switch_bank(iobase, SET0);
469 outb(0x00, iobase+UFR); /* Reset */
470 outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
471 outb(0xa7, iobase+UFR);
472
473 netif_wake_queue(self->netdev);
474
475 /* Enable some interrupts so we can receive frames */
476 switch_bank(iobase, SET0);
477 if (speed > PIO_MAX_SPEED) {
478 outb(ICR_EFSFI, iobase+ICR);
479 w83977af_dma_receive(self);
480 } else
481 outb(ICR_ERBRI, iobase+ICR);
482
483 /* Restore SSR */
484 outb(set, iobase+SSR);
485}
486
487/*
488 * Function w83977af_hard_xmit (skb, dev)
489 *
490 * Sets up a DMA transfer to send the current frame.
491 *
492 */
6518bbb8
SH
493static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
494 struct net_device *dev)
1da177e4
LT
495{
496 struct w83977af_ir *self;
497 __s32 speed;
498 int iobase;
499 __u8 set;
500 int mtt;
501
4cf1653a 502 self = netdev_priv(dev);
1da177e4
LT
503
504 iobase = self->io.fir_base;
505
a97a6f10 506 IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
1da177e4
LT
507 (int) skb->len);
508
509 /* Lock transmit buffer */
510 netif_stop_queue(dev);
511
512 /* Check if we need to change the speed */
513 speed = irda_get_next_speed(skb);
514 if ((speed != self->io.speed) && (speed != -1)) {
515 /* Check for empty frame */
516 if (!skb->len) {
517 w83977af_change_speed(self, speed);
518 dev->trans_start = jiffies;
519 dev_kfree_skb(skb);
6ed10654 520 return NETDEV_TX_OK;
1da177e4
LT
521 } else
522 self->new_speed = speed;
523 }
524
525 /* Save current set */
526 set = inb(iobase+SSR);
527
528 /* Decide if we should use PIO or DMA transfer */
529 if (self->io.speed > PIO_MAX_SPEED) {
530 self->tx_buff.data = self->tx_buff.head;
d626f62b 531 skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
1da177e4
LT
532 self->tx_buff.len = skb->len;
533
534 mtt = irda_get_mtt(skb);
a97a6f10 535 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
1da177e4
LT
536 if (mtt)
537 udelay(mtt);
538
539 /* Enable DMA interrupt */
540 switch_bank(iobase, SET0);
541 outb(ICR_EDMAI, iobase+ICR);
542 w83977af_dma_write(self, iobase);
1da177e4
LT
543 } else {
544 self->tx_buff.data = self->tx_buff.head;
545 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
546 self->tx_buff.truesize);
547
548 /* Add interrupt on tx low level (will fire immediately) */
549 switch_bank(iobase, SET0);
550 outb(ICR_ETXTHI, iobase+ICR);
551 }
552 dev->trans_start = jiffies;
553 dev_kfree_skb(skb);
554
555 /* Restore set register */
556 outb(set, iobase+SSR);
557
6ed10654 558 return NETDEV_TX_OK;
1da177e4
LT
559}
560
561/*
562 * Function w83977af_dma_write (self, iobase)
563 *
564 * Send frame using DMA
565 *
566 */
567static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
568{
569 __u8 set;
570#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
571 unsigned long flags;
572 __u8 hcr;
573#endif
a97a6f10 574 IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
1da177e4
LT
575
576 /* Save current set */
577 set = inb(iobase+SSR);
578
579 /* Disable DMA */
580 switch_bank(iobase, SET0);
581 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
582
583 /* Choose transmit DMA channel */
584 switch_bank(iobase, SET2);
585 outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
586#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
587 spin_lock_irqsave(&self->lock, flags);
588
589 disable_dma(self->io.dma);
590 clear_dma_ff(self->io.dma);
591 set_dma_mode(self->io.dma, DMA_MODE_READ);
592 set_dma_addr(self->io.dma, self->tx_buff_dma);
593 set_dma_count(self->io.dma, self->tx_buff.len);
594#else
595 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
596 DMA_MODE_WRITE);
597#endif
598 self->io.direction = IO_XMIT;
599
600 /* Enable DMA */
601 switch_bank(iobase, SET0);
602#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
603 hcr = inb(iobase+HCR);
604 outb(hcr | HCR_EN_DMA, iobase+HCR);
605 enable_dma(self->io.dma);
606 spin_unlock_irqrestore(&self->lock, flags);
607#else
608 outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
609#endif
610
611 /* Restore set register */
612 outb(set, iobase+SSR);
613}
614
615/*
616 * Function w83977af_pio_write (iobase, buf, len, fifo_size)
617 *
618 *
619 *
620 */
621static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
622{
623 int actual = 0;
624 __u8 set;
625
a97a6f10 626 IRDA_DEBUG(4, "%s()\n", __func__ );
1da177e4
LT
627
628 /* Save current bank */
629 set = inb(iobase+SSR);
630
631 switch_bank(iobase, SET0);
632 if (!(inb_p(iobase+USR) & USR_TSRE)) {
633 IRDA_DEBUG(4,
a97a6f10 634 "%s(), warning, FIFO not empty yet!\n", __func__ );
1da177e4
LT
635
636 fifo_size -= 17;
637 IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
a97a6f10 638 __func__ , fifo_size);
1da177e4
LT
639 }
640
641 /* Fill FIFO with current frame */
642 while ((fifo_size-- > 0) && (actual < len)) {
643 /* Transmit next byte */
644 outb(buf[actual++], iobase+TBR);
645 }
646
647 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
a97a6f10 648 __func__ , fifo_size, actual, len);
1da177e4
LT
649
650 /* Restore bank */
651 outb(set, iobase+SSR);
652
653 return actual;
654}
655
656/*
657 * Function w83977af_dma_xmit_complete (self)
658 *
659 * The transfer of a frame in finished. So do the necessary things
660 *
661 *
662 */
663static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
664{
665 int iobase;
666 __u8 set;
667
a97a6f10 668 IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
1da177e4
LT
669
670 IRDA_ASSERT(self != NULL, return;);
671
672 iobase = self->io.fir_base;
673
674 /* Save current set */
675 set = inb(iobase+SSR);
676
677 /* Disable DMA */
678 switch_bank(iobase, SET0);
679 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
680
681 /* Check for underrrun! */
682 if (inb(iobase+AUDR) & AUDR_UNDR) {
a97a6f10 683 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
1da177e4 684
af049081
SH
685 self->netdev->stats.tx_errors++;
686 self->netdev->stats.tx_fifo_errors++;
1da177e4
LT
687
688 /* Clear bit, by writing 1 to it */
689 outb(AUDR_UNDR, iobase+AUDR);
690 } else
af049081 691 self->netdev->stats.tx_packets++;
1da177e4
LT
692
693
694 if (self->new_speed) {
695 w83977af_change_speed(self, self->new_speed);
696 self->new_speed = 0;
697 }
698
699 /* Unlock tx_buff and request another frame */
700 /* Tell the network layer, that we want more frames */
701 netif_wake_queue(self->netdev);
702
703 /* Restore set */
704 outb(set, iobase+SSR);
705}
706
707/*
708 * Function w83977af_dma_receive (self)
709 *
710 * Get ready for receiving a frame. The device will initiate a DMA
711 * if it starts to receive a frame.
712 *
713 */
0e49e645 714static int w83977af_dma_receive(struct w83977af_ir *self)
1da177e4
LT
715{
716 int iobase;
717 __u8 set;
718#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
719 unsigned long flags;
720 __u8 hcr;
721#endif
722 IRDA_ASSERT(self != NULL, return -1;);
723
a97a6f10 724 IRDA_DEBUG(4, "%s\n", __func__ );
1da177e4
LT
725
726 iobase= self->io.fir_base;
727
728 /* Save current set */
729 set = inb(iobase+SSR);
730
731 /* Disable DMA */
732 switch_bank(iobase, SET0);
733 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
734
735 /* Choose DMA Rx, DMA Fairness, and Advanced mode */
736 switch_bank(iobase, SET2);
737 outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
738 iobase+ADCR1);
739
740 self->io.direction = IO_RECV;
741 self->rx_buff.data = self->rx_buff.head;
742
743#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
744 spin_lock_irqsave(&self->lock, flags);
745
746 disable_dma(self->io.dma);
747 clear_dma_ff(self->io.dma);
748 set_dma_mode(self->io.dma, DMA_MODE_READ);
749 set_dma_addr(self->io.dma, self->rx_buff_dma);
750 set_dma_count(self->io.dma, self->rx_buff.truesize);
751#else
752 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
753 DMA_MODE_READ);
754#endif
755 /*
756 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
757 * important that we don't reset the Tx FIFO since it might not
758 * be finished transmitting yet
759 */
760 switch_bank(iobase, SET0);
761 outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
762 self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
763
764 /* Enable DMA */
765 switch_bank(iobase, SET0);
766#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
767 hcr = inb(iobase+HCR);
768 outb(hcr | HCR_EN_DMA, iobase+HCR);
769 enable_dma(self->io.dma);
770 spin_unlock_irqrestore(&self->lock, flags);
771#else
772 outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
773#endif
774 /* Restore set */
775 outb(set, iobase+SSR);
776
777 return 0;
778}
779
780/*
781 * Function w83977af_receive_complete (self)
782 *
783 * Finished with receiving a frame
784 *
785 */
0e49e645 786static int w83977af_dma_receive_complete(struct w83977af_ir *self)
1da177e4
LT
787{
788 struct sk_buff *skb;
789 struct st_fifo *st_fifo;
790 int len;
791 int iobase;
792 __u8 set;
793 __u8 status;
794
a97a6f10 795 IRDA_DEBUG(4, "%s\n", __func__ );
1da177e4
LT
796
797 st_fifo = &self->st_fifo;
798
799 iobase = self->io.fir_base;
800
801 /* Save current set */
802 set = inb(iobase+SSR);
803
804 iobase = self->io.fir_base;
805
806 /* Read status FIFO */
807 switch_bank(iobase, SET5);
808 while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
809 st_fifo->entries[st_fifo->tail].status = status;
810
811 st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
812 st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
813
814 st_fifo->tail++;
815 st_fifo->len++;
816 }
817
818 while (st_fifo->len) {
819 /* Get first entry */
820 status = st_fifo->entries[st_fifo->head].status;
821 len = st_fifo->entries[st_fifo->head].len;
822 st_fifo->head++;
823 st_fifo->len--;
824
825 /* Check for errors */
826 if (status & FS_FO_ERR_MSK) {
827 if (status & FS_FO_LST_FR) {
828 /* Add number of lost frames to stats */
af049081 829 self->netdev->stats.rx_errors += len;
1da177e4
LT
830 } else {
831 /* Skip frame */
af049081 832 self->netdev->stats.rx_errors++;
1da177e4
LT
833
834 self->rx_buff.data += len;
835
836 if (status & FS_FO_MX_LEX)
af049081 837 self->netdev->stats.rx_length_errors++;
1da177e4
LT
838
839 if (status & FS_FO_PHY_ERR)
af049081 840 self->netdev->stats.rx_frame_errors++;
1da177e4
LT
841
842 if (status & FS_FO_CRC_ERR)
af049081 843 self->netdev->stats.rx_crc_errors++;
1da177e4
LT
844 }
845 /* The errors below can be reported in both cases */
846 if (status & FS_FO_RX_OV)
af049081 847 self->netdev->stats.rx_fifo_errors++;
1da177e4
LT
848
849 if (status & FS_FO_FSF_OV)
af049081 850 self->netdev->stats.rx_fifo_errors++;
1da177e4
LT
851
852 } else {
853 /* Check if we have transferred all data to memory */
854 switch_bank(iobase, SET0);
855 if (inb(iobase+USR) & USR_RDR) {
1da177e4 856 udelay(80); /* Should be enough!? */
1da177e4
LT
857 }
858
859 skb = dev_alloc_skb(len+1);
860 if (skb == NULL) {
861 printk(KERN_INFO
a97a6f10 862 "%s(), memory squeeze, dropping frame.\n", __func__);
1da177e4
LT
863 /* Restore set register */
864 outb(set, iobase+SSR);
865
866 return FALSE;
867 }
868
869 /* Align to 20 bytes */
870 skb_reserve(skb, 1);
871
872 /* Copy frame without CRC */
873 if (self->io.speed < 4000000) {
874 skb_put(skb, len-2);
27d7ff46
ACM
875 skb_copy_to_linear_data(skb,
876 self->rx_buff.data,
877 len - 2);
1da177e4
LT
878 } else {
879 skb_put(skb, len-4);
27d7ff46
ACM
880 skb_copy_to_linear_data(skb,
881 self->rx_buff.data,
882 len - 4);
1da177e4
LT
883 }
884
885 /* Move to next frame */
886 self->rx_buff.data += len;
af049081 887 self->netdev->stats.rx_packets++;
1da177e4
LT
888
889 skb->dev = self->netdev;
459a98ed 890 skb_reset_mac_header(skb);
1da177e4
LT
891 skb->protocol = htons(ETH_P_IRDA);
892 netif_rx(skb);
1da177e4
LT
893 }
894 }
895 /* Restore set register */
896 outb(set, iobase+SSR);
897
898 return TRUE;
899}
900
901/*
902 * Function pc87108_pio_receive (self)
903 *
904 * Receive all data in receiver FIFO
905 *
906 */
907static void w83977af_pio_receive(struct w83977af_ir *self)
908{
909 __u8 byte = 0x00;
910 int iobase;
911
a97a6f10 912 IRDA_DEBUG(4, "%s()\n", __func__ );
1da177e4
LT
913
914 IRDA_ASSERT(self != NULL, return;);
915
916 iobase = self->io.fir_base;
917
918 /* Receive all characters in Rx FIFO */
919 do {
920 byte = inb(iobase+RBR);
af049081 921 async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
1da177e4
LT
922 byte);
923 } while (inb(iobase+USR) & USR_RDR); /* Data available */
924}
925
926/*
927 * Function w83977af_sir_interrupt (self, eir)
928 *
929 * Handle SIR interrupt
930 *
931 */
932static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
933{
934 int actual;
935 __u8 new_icr = 0;
936 __u8 set;
937 int iobase;
938
a97a6f10 939 IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
1da177e4
LT
940
941 iobase = self->io.fir_base;
942 /* Transmit FIFO low on data */
943 if (isr & ISR_TXTH_I) {
944 /* Write data left in transmit buffer */
945 actual = w83977af_pio_write(self->io.fir_base,
946 self->tx_buff.data,
947 self->tx_buff.len,
948 self->io.fifo_size);
949
950 self->tx_buff.data += actual;
951 self->tx_buff.len -= actual;
952
953 self->io.direction = IO_XMIT;
954
955 /* Check if finished */
956 if (self->tx_buff.len > 0) {
957 new_icr |= ICR_ETXTHI;
958 } else {
959 set = inb(iobase+SSR);
960 switch_bank(iobase, SET0);
961 outb(AUDR_SFEND, iobase+AUDR);
962 outb(set, iobase+SSR);
963
af049081 964 self->netdev->stats.tx_packets++;
1da177e4
LT
965
966 /* Feed me more packets */
967 netif_wake_queue(self->netdev);
968 new_icr |= ICR_ETBREI;
969 }
970 }
971 /* Check if transmission has completed */
972 if (isr & ISR_TXEMP_I) {
973 /* Check if we need to change the speed? */
974 if (self->new_speed) {
975 IRDA_DEBUG(2,
a97a6f10 976 "%s(), Changing speed!\n", __func__ );
1da177e4
LT
977 w83977af_change_speed(self, self->new_speed);
978 self->new_speed = 0;
979 }
980
981 /* Turn around and get ready to receive some data */
982 self->io.direction = IO_RECV;
983 new_icr |= ICR_ERBRI;
984 }
985
986 /* Rx FIFO threshold or timeout */
987 if (isr & ISR_RXTH_I) {
988 w83977af_pio_receive(self);
989
990 /* Keep receiving */
991 new_icr |= ICR_ERBRI;
992 }
993 return new_icr;
994}
995
996/*
997 * Function pc87108_fir_interrupt (self, eir)
998 *
999 * Handle MIR/FIR interrupt
1000 *
1001 */
1002static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
1003{
1004 __u8 new_icr = 0;
1005 __u8 set;
1006 int iobase;
1007
1008 iobase = self->io.fir_base;
1009 set = inb(iobase+SSR);
1010
1011 /* End of frame detected in FIFO */
1012 if (isr & (ISR_FEND_I|ISR_FSF_I)) {
1013 if (w83977af_dma_receive_complete(self)) {
1014
1015 /* Wait for next status FIFO interrupt */
1016 new_icr |= ICR_EFSFI;
1017 } else {
1018 /* DMA not finished yet */
1019
1020 /* Set timer value, resolution 1 ms */
1021 switch_bank(iobase, SET4);
1022 outb(0x01, iobase+TMRL); /* 1 ms */
1023 outb(0x00, iobase+TMRH);
1024
1025 /* Start timer */
1026 outb(IR_MSL_EN_TMR, iobase+IR_MSL);
1027
1028 new_icr |= ICR_ETMRI;
1029 }
1030 }
1031 /* Timer finished */
1032 if (isr & ISR_TMR_I) {
1033 /* Disable timer */
1034 switch_bank(iobase, SET4);
1035 outb(0, iobase+IR_MSL);
1036
1037 /* Clear timer event */
1038 /* switch_bank(iobase, SET0); */
1039/* outb(ASCR_CTE, iobase+ASCR); */
1040
1041 /* Check if this is a TX timer interrupt */
1042 if (self->io.direction == IO_XMIT) {
1043 w83977af_dma_write(self, iobase);
1044
1045 new_icr |= ICR_EDMAI;
1046 } else {
1047 /* Check if DMA has now finished */
1048 w83977af_dma_receive_complete(self);
1049
1050 new_icr |= ICR_EFSFI;
1051 }
1052 }
1053 /* Finished with DMA */
1054 if (isr & ISR_DMA_I) {
1055 w83977af_dma_xmit_complete(self);
1056
1057 /* Check if there are more frames to be transmitted */
1058 /* if (irda_device_txqueue_empty(self)) { */
1059
1060 /* Prepare for receive
1061 *
1062 * ** Netwinder Tx DMA likes that we do this anyway **
1063 */
1064 w83977af_dma_receive(self);
1065 new_icr = ICR_EFSFI;
1066 /* } */
1067 }
1068
1069 /* Restore set */
1070 outb(set, iobase+SSR);
1071
1072 return new_icr;
1073}
1074
1075/*
1076 * Function w83977af_interrupt (irq, dev_id, regs)
1077 *
1078 * An interrupt from the chip has arrived. Time to do some work
1079 *
1080 */
7d12e780 1081static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
1da177e4 1082{
c31f28e7 1083 struct net_device *dev = dev_id;
1da177e4
LT
1084 struct w83977af_ir *self;
1085 __u8 set, icr, isr;
1086 int iobase;
1087
4cf1653a 1088 self = netdev_priv(dev);
1da177e4
LT
1089
1090 iobase = self->io.fir_base;
1091
1092 /* Save current bank */
1093 set = inb(iobase+SSR);
1094 switch_bank(iobase, SET0);
1095
1096 icr = inb(iobase+ICR);
1097 isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
1098
1099 outb(0, iobase+ICR); /* Disable interrupts */
1100
1101 if (isr) {
1102 /* Dispatch interrupt handler for the current speed */
1103 if (self->io.speed > PIO_MAX_SPEED )
1104 icr = w83977af_fir_interrupt(self, isr);
1105 else
1106 icr = w83977af_sir_interrupt(self, isr);
1107 }
1108
1109 outb(icr, iobase+ICR); /* Restore (new) interrupts */
1110 outb(set, iobase+SSR); /* Restore bank register */
1111 return IRQ_RETVAL(isr);
1112}
1113
1114/*
1115 * Function w83977af_is_receiving (self)
1116 *
1117 * Return TRUE is we are currently receiving a frame
1118 *
1119 */
1120static int w83977af_is_receiving(struct w83977af_ir *self)
1121{
1122 int status = FALSE;
1123 int iobase;
1124 __u8 set;
1125
1126 IRDA_ASSERT(self != NULL, return FALSE;);
1127
1128 if (self->io.speed > 115200) {
1129 iobase = self->io.fir_base;
1130
1131 /* Check if rx FIFO is not empty */
1132 set = inb(iobase+SSR);
1133 switch_bank(iobase, SET2);
1134 if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
1135 /* We are receiving something */
1136 status = TRUE;
1137 }
1138 outb(set, iobase+SSR);
1139 } else
1140 status = (self->rx_buff.state != OUTSIDE_FRAME);
1141
1142 return status;
1143}
1144
1145/*
1146 * Function w83977af_net_open (dev)
1147 *
1148 * Start the device
1149 *
1150 */
1151static int w83977af_net_open(struct net_device *dev)
1152{
1153 struct w83977af_ir *self;
1154 int iobase;
1155 char hwname[32];
1156 __u8 set;
1157
a97a6f10 1158 IRDA_DEBUG(0, "%s()\n", __func__ );
1da177e4
LT
1159
1160 IRDA_ASSERT(dev != NULL, return -1;);
4cf1653a 1161 self = netdev_priv(dev);
1da177e4
LT
1162
1163 IRDA_ASSERT(self != NULL, return 0;);
1164
1165 iobase = self->io.fir_base;
1166
1167 if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
1168 (void *) dev)) {
1169 return -EAGAIN;
1170 }
1171 /*
1172 * Always allocate the DMA channel after the IRQ,
1173 * and clean up on failure.
1174 */
1175 if (request_dma(self->io.dma, dev->name)) {
1176 free_irq(self->io.irq, self);
1177 return -EAGAIN;
1178 }
1179
1180 /* Save current set */
1181 set = inb(iobase+SSR);
1182
1183 /* Enable some interrupts so we can receive frames again */
1184 switch_bank(iobase, SET0);
1185 if (self->io.speed > 115200) {
1186 outb(ICR_EFSFI, iobase+ICR);
1187 w83977af_dma_receive(self);
1188 } else
1189 outb(ICR_ERBRI, iobase+ICR);
1190
1191 /* Restore bank register */
1192 outb(set, iobase+SSR);
1193
1194 /* Ready to play! */
1195 netif_start_queue(dev);
1196
1197 /* Give self a hardware name */
1198 sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
1199
1200 /*
1201 * Open new IrLAP layer instance, now that everything should be
1202 * initialized properly
1203 */
1204 self->irlap = irlap_open(dev, &self->qos, hwname);
1205
1206 return 0;
1207}
1208
1209/*
1210 * Function w83977af_net_close (dev)
1211 *
1212 * Stop the device
1213 *
1214 */
1215static int w83977af_net_close(struct net_device *dev)
1216{
1217 struct w83977af_ir *self;
1218 int iobase;
1219 __u8 set;
1220
a97a6f10 1221 IRDA_DEBUG(0, "%s()\n", __func__ );
1da177e4
LT
1222
1223 IRDA_ASSERT(dev != NULL, return -1;);
1224
4cf1653a 1225 self = netdev_priv(dev);
1da177e4
LT
1226
1227 IRDA_ASSERT(self != NULL, return 0;);
1228
1229 iobase = self->io.fir_base;
1230
1231 /* Stop device */
1232 netif_stop_queue(dev);
1233
1234 /* Stop and remove instance of IrLAP */
1235 if (self->irlap)
1236 irlap_close(self->irlap);
1237 self->irlap = NULL;
1238
1239 disable_dma(self->io.dma);
1240
1241 /* Save current set */
1242 set = inb(iobase+SSR);
1243
1244 /* Disable interrupts */
1245 switch_bank(iobase, SET0);
1246 outb(0, iobase+ICR);
1247
1248 free_irq(self->io.irq, dev);
1249 free_dma(self->io.dma);
1250
1251 /* Restore bank register */
1252 outb(set, iobase+SSR);
1253
1254 return 0;
1255}
1256
1257/*
1258 * Function w83977af_net_ioctl (dev, rq, cmd)
1259 *
1260 * Process IOCTL commands for this device
1261 *
1262 */
1263static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1264{
1265 struct if_irda_req *irq = (struct if_irda_req *) rq;
1266 struct w83977af_ir *self;
1267 unsigned long flags;
1268 int ret = 0;
1269
1270 IRDA_ASSERT(dev != NULL, return -1;);
1271
4cf1653a 1272 self = netdev_priv(dev);
1da177e4
LT
1273
1274 IRDA_ASSERT(self != NULL, return -1;);
1275
a97a6f10 1276 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
1da177e4
LT
1277
1278 spin_lock_irqsave(&self->lock, flags);
1279
1280 switch (cmd) {
1281 case SIOCSBANDWIDTH: /* Set bandwidth */
1282 if (!capable(CAP_NET_ADMIN)) {
1283 ret = -EPERM;
1284 goto out;
1285 }
1286 w83977af_change_speed(self, irq->ifr_baudrate);
1287 break;
1288 case SIOCSMEDIABUSY: /* Set media busy */
1289 if (!capable(CAP_NET_ADMIN)) {
1290 ret = -EPERM;
1291 goto out;
1292 }
1293 irda_device_set_media_busy(self->netdev, TRUE);
1294 break;
1295 case SIOCGRECEIVING: /* Check if we are receiving right now */
1296 irq->ifr_receiving = w83977af_is_receiving(self);
1297 break;
1298 default:
1299 ret = -EOPNOTSUPP;
1300 }
1301out:
1302 spin_unlock_irqrestore(&self->lock, flags);
1303 return ret;
1304}
1305
1da177e4
LT
1306MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1307MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1308MODULE_LICENSE("GPL");
1309
1310
1311module_param(qos_mtt_bits, int, 0);
1312MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
1313module_param_array(io, int, NULL, 0);
1314MODULE_PARM_DESC(io, "Base I/O addresses");
1315module_param_array(irq, int, NULL, 0);
1316MODULE_PARM_DESC(irq, "IRQ lines");
1317
1318/*
1319 * Function init_module (void)
1320 *
1321 *
1322 *
1323 */
1324module_init(w83977af_init);
1325
1326/*
1327 * Function cleanup_module (void)
1328 *
1329 *
1330 *
1331 */
1332module_exit(w83977af_cleanup);