]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tulip/xircom_tulip_cb.c
[netdrvr] epic100: minor cleanups
[net-next-2.6.git] / drivers / net / tulip / xircom_tulip_cb.c
CommitLineData
1da177e4
LT
1/* xircom_tulip_cb.c: A Xircom CBE-100 ethernet driver for Linux. */
2/*
3 Written/copyright 1994-1999 by Donald Becker.
4
5 This software may be used and distributed according to the terms
6 of the GNU General Public License, incorporated herein by reference.
7
8 The author may be reached as becker@scyld.com, or C/O
9 Scyld Computing Corporation
10 410 Severn Ave., Suite 210
11 Annapolis MD 21403
12
13 -----------------------------------------------------------
14
15 Linux kernel-specific changes:
16
17 LK1.0 (Ion Badulescu)
18 - Major cleanup
19 - Use 2.4 PCI API
20 - Support ethtool
21 - Rewrite perfect filter/hash code
22 - Use interrupts for media changes
23
24 LK1.1 (Ion Badulescu)
25 - Disallow negotiation of unsupported full-duplex modes
26*/
27
28#define DRV_NAME "xircom_tulip_cb"
29#define DRV_VERSION "0.91+LK1.1"
30#define DRV_RELDATE "October 11, 2001"
31
32#define CARDBUS 1
33
34/* A few user-configurable values. */
35
36#define xircom_debug debug
37#ifdef XIRCOM_DEBUG
38static int xircom_debug = XIRCOM_DEBUG;
39#else
40static int xircom_debug = 1;
41#endif
42
43/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
44static int max_interrupt_work = 25;
45
46#define MAX_UNITS 4
47/* Used to pass the full-duplex flag, etc. */
48static int full_duplex[MAX_UNITS];
49static int options[MAX_UNITS];
50static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
51
52/* Keep the ring sizes a power of two for efficiency.
53 Making the Tx ring too large decreases the effectiveness of channel
54 bonding and packet priority.
55 There are no ill effects from too-large receive rings. */
56#define TX_RING_SIZE 16
57#define RX_RING_SIZE 32
58
59/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
60#ifdef __alpha__
61static int rx_copybreak = 1518;
62#else
63static int rx_copybreak = 100;
64#endif
65
66/*
67 Set the bus performance register.
68 Typical: Set 16 longword cache alignment, no burst limit.
69 Cache alignment bits 15:14 Burst length 13:8
70 0000 No alignment 0x00000000 unlimited 0800 8 longwords
71 4000 8 longwords 0100 1 longword 1000 16 longwords
72 8000 16 longwords 0200 2 longwords 2000 32 longwords
73 C000 32 longwords 0400 4 longwords
74 Warning: many older 486 systems are broken and require setting 0x00A04800
75 8 longword cache alignment, 8 longword burst.
76 ToDo: Non-Intel setting could be better.
77*/
78
79#if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
80static int csr0 = 0x01A00000 | 0xE000;
81#elif defined(__powerpc__)
82static int csr0 = 0x01B00000 | 0x8000;
83#elif defined(__sparc__)
84static int csr0 = 0x01B00080 | 0x8000;
85#elif defined(__i386__)
86static int csr0 = 0x01A00000 | 0x8000;
87#else
88#warning Processor architecture undefined!
89static int csr0 = 0x00A00000 | 0x4800;
90#endif
91
92/* Operational parameters that usually are not changed. */
93/* Time in jiffies before concluding the transmitter is hung. */
94#define TX_TIMEOUT (4 * HZ)
95#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
96#define PKT_SETUP_SZ 192 /* Size of the setup frame */
97
98/* PCI registers */
99#define PCI_POWERMGMT 0x40
100
1da177e4
LT
101#include <linux/module.h>
102#include <linux/moduleparam.h>
103#include <linux/kernel.h>
104#include <linux/pci.h>
105#include <linux/netdevice.h>
106#include <linux/etherdevice.h>
107#include <linux/delay.h>
108#include <linux/init.h>
109#include <linux/mii.h>
110#include <linux/ethtool.h>
111#include <linux/crc32.h>
112
113#include <asm/io.h>
114#include <asm/processor.h> /* Processor type for cache alignment. */
115#include <asm/uaccess.h>
116
117
118/* These identify the driver base version and may not be removed. */
119static char version[] __devinitdata =
120KERN_INFO DRV_NAME ".c derived from tulip.c:v0.91 4/14/99 becker@scyld.com\n"
121KERN_INFO " unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE "\n";
122
123MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
124MODULE_DESCRIPTION("Xircom CBE-100 ethernet driver");
125MODULE_LICENSE("GPL v2");
126MODULE_VERSION(DRV_VERSION);
127
128module_param(debug, int, 0);
129module_param(max_interrupt_work, int, 0);
130module_param(rx_copybreak, int, 0);
131module_param(csr0, int, 0);
132
133module_param_array(options, int, NULL, 0);
134module_param_array(full_duplex, int, NULL, 0);
135
136#define RUN_AT(x) (jiffies + (x))
137
138/*
139 Theory of Operation
140
141I. Board Compatibility
142
143This device driver was forked from the driver for the DECchip "Tulip",
144Digital's single-chip ethernet controllers for PCI. It supports Xircom's
145almost-Tulip-compatible CBE-100 CardBus adapters.
146
147II. Board-specific settings
148
149PCI bus devices are configured by the system at boot time, so no jumpers
150need to be set on the board. The system BIOS preferably should assign the
151PCI INTA signal to an otherwise unused system IRQ line.
152
153III. Driver operation
154
155IIIa. Ring buffers
156
157The Xircom can use either ring buffers or lists of Tx and Rx descriptors.
158This driver uses statically allocated rings of Rx and Tx descriptors, set at
159compile time by RX/TX_RING_SIZE. This version of the driver allocates skbuffs
160for the Rx ring buffers at open() time and passes the skb->data field to the
161Xircom as receive data buffers. When an incoming frame is less than
162RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is
163copied to the new skbuff. When the incoming frame is larger, the skbuff is
164passed directly up the protocol stack and replaced by a newly allocated
165skbuff.
166
167The RX_COPYBREAK value is chosen to trade-off the memory wasted by
168using a full-sized skbuff for small frames vs. the copying costs of larger
169frames. For small frames the copying cost is negligible (esp. considering
170that we are pre-loading the cache with immediately useful header
171information). For large frames the copying cost is non-trivial, and the
172larger copy might flush the cache of useful data. A subtle aspect of this
173choice is that the Xircom only receives into longword aligned buffers, thus
174the IP header at offset 14 isn't longword aligned for further processing.
175Copied frames are put into the new skbuff at an offset of "+2", thus copying
176has the beneficial effect of aligning the IP header and preloading the
177cache.
178
179IIIC. Synchronization
180The driver runs as two independent, single-threaded flows of control. One
181is the send-packet routine, which enforces single-threaded use by the
182dev->tbusy flag. The other thread is the interrupt handler, which is single
183threaded by the hardware and other software.
184
185The send packet thread has partial control over the Tx ring and 'dev->tbusy'
186flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
187queue slot is empty, it clears the tbusy flag when finished otherwise it sets
188the 'tp->tx_full' flag.
189
190The interrupt handler has exclusive control over the Rx ring and records stats
191from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
192we can't avoid the interrupt overhead by having the Tx routine reap the Tx
193stats.) After reaping the stats, it marks the queue entry as empty by setting
194the 'base' to zero. Iff the 'tp->tx_full' flag is set, it clears both the
195tx_full and tbusy flags.
196
197IV. Notes
198
199IVb. References
200
201http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
202http://www.digital.com (search for current 21*4* datasheets and "21X4 SROM")
203http://www.national.com/pf/DP/DP83840A.html
204
205IVc. Errata
206
207*/
208
209/* A full-duplex map for media types. */
210enum MediaIs {
211 MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
212 MediaIs100=16};
213static const char media_cap[] =
214{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20 };
215
216/* Offsets to the Command and Status Registers, "CSRs". All accesses
217 must be longword instructions and quadword aligned. */
218enum xircom_offsets {
219 CSR0=0, CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
220 CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
221 CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78, CSR16=0x04, };
222
223/* The bits in the CSR5 status registers, mostly interrupt sources. */
224enum status_bits {
225 LinkChange=0x08000000,
226 NormalIntr=0x10000, NormalIntrMask=0x00014045,
227 AbnormalIntr=0x8000, AbnormalIntrMask=0x0a00a5a2,
228 ReservedIntrMask=0xe0001a18,
229 EarlyRxIntr=0x4000, BusErrorIntr=0x2000,
230 EarlyTxIntr=0x400, RxDied=0x100, RxNoBuf=0x80, RxIntr=0x40,
231 TxFIFOUnderflow=0x20, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
232};
233
234enum csr0_control_bits {
235 EnableMWI=0x01000000, EnableMRL=0x00800000,
236 EnableMRM=0x00200000, EqualBusPrio=0x02,
237 SoftwareReset=0x01,
238};
239
240enum csr6_control_bits {
241 ReceiveAllBit=0x40000000, AllMultiBit=0x80, PromiscBit=0x40,
242 HashFilterBit=0x01, FullDuplexBit=0x0200,
243 TxThresh10=0x400000, TxStoreForw=0x200000,
244 TxThreshMask=0xc000, TxThreshShift=14,
245 EnableTx=0x2000, EnableRx=0x02,
246 ReservedZeroMask=0x8d930134, ReservedOneMask=0x320c0000,
247 EnableTxRx=(EnableTx | EnableRx),
248};
249
250
251enum tbl_flag {
252 HAS_MII=1, HAS_ACPI=2,
253};
254static struct xircom_chip_table {
255 char *chip_name;
256 int valid_intrs; /* CSR7 interrupt enable settings */
257 int flags;
258} xircom_tbl[] = {
259 { "Xircom Cardbus Adapter",
260 LinkChange | NormalIntr | AbnormalIntr | BusErrorIntr |
261 RxDied | RxNoBuf | RxIntr | TxFIFOUnderflow | TxNoBuf | TxDied | TxIntr,
262 HAS_MII | HAS_ACPI, },
263 { NULL, },
264};
265/* This matches the table above. */
266enum chips {
267 X3201_3,
268};
269
270
271/* The Xircom Rx and Tx buffer descriptors. */
272struct xircom_rx_desc {
273 s32 status;
274 s32 length;
275 u32 buffer1, buffer2;
276};
277
278struct xircom_tx_desc {
279 s32 status;
280 s32 length;
281 u32 buffer1, buffer2; /* We use only buffer 1. */
282};
283
284enum tx_desc0_status_bits {
285 Tx0DescOwned=0x80000000, Tx0DescError=0x8000, Tx0NoCarrier=0x0800,
286 Tx0LateColl=0x0200, Tx0ManyColl=0x0100, Tx0Underflow=0x02,
287};
288enum tx_desc1_status_bits {
289 Tx1ComplIntr=0x80000000, Tx1LastSeg=0x40000000, Tx1FirstSeg=0x20000000,
290 Tx1SetupPkt=0x08000000, Tx1DisableCRC=0x04000000, Tx1RingWrap=0x02000000,
291 Tx1ChainDesc=0x01000000, Tx1NoPad=0x800000, Tx1HashSetup=0x400000,
292 Tx1WholePkt=(Tx1FirstSeg | Tx1LastSeg),
293};
294enum rx_desc0_status_bits {
295 Rx0DescOwned=0x80000000, Rx0DescError=0x8000, Rx0NoSpace=0x4000,
296 Rx0Runt=0x0800, Rx0McastPkt=0x0400, Rx0FirstSeg=0x0200, Rx0LastSeg=0x0100,
297 Rx0HugeFrame=0x80, Rx0CRCError=0x02,
298 Rx0WholePkt=(Rx0FirstSeg | Rx0LastSeg),
299};
300enum rx_desc1_status_bits {
301 Rx1RingWrap=0x02000000, Rx1ChainDesc=0x01000000,
302};
303
304struct xircom_private {
305 struct xircom_rx_desc rx_ring[RX_RING_SIZE];
306 struct xircom_tx_desc tx_ring[TX_RING_SIZE];
307 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
308 struct sk_buff* tx_skbuff[TX_RING_SIZE];
309#ifdef CARDBUS
310 /* The X3201-3 requires 4-byte aligned tx bufs */
311 struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE];
312#endif
313 /* The addresses of receive-in-place skbuffs. */
314 struct sk_buff* rx_skbuff[RX_RING_SIZE];
315 u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */
316 int chip_id;
317 struct net_device_stats stats;
318 unsigned int cur_rx, cur_tx; /* The next free ring entry */
319 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
320 unsigned int tx_full:1; /* The Tx queue is full. */
321 unsigned int speed100:1;
322 unsigned int full_duplex:1; /* Full-duplex operation requested. */
323 unsigned int autoneg:1;
324 unsigned int default_port:4; /* Last dev->if_port value. */
325 unsigned int open:1;
326 unsigned int csr0; /* CSR0 setting. */
327 unsigned int csr6; /* Current CSR6 control settings. */
328 u16 to_advertise; /* NWay capabilities advertised. */
329 u16 advertising[4];
330 signed char phys[4], mii_cnt; /* MII device addresses. */
331 int saved_if_port;
332 struct pci_dev *pdev;
333 spinlock_t lock;
334};
335
336static int mdio_read(struct net_device *dev, int phy_id, int location);
337static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
338static void xircom_up(struct net_device *dev);
339static void xircom_down(struct net_device *dev);
340static int xircom_open(struct net_device *dev);
341static void xircom_tx_timeout(struct net_device *dev);
342static void xircom_init_ring(struct net_device *dev);
343static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev);
344static int xircom_rx(struct net_device *dev);
345static void xircom_media_change(struct net_device *dev);
346static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
347static int xircom_close(struct net_device *dev);
348static struct net_device_stats *xircom_get_stats(struct net_device *dev);
349static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
350static void set_rx_mode(struct net_device *dev);
351static void check_duplex(struct net_device *dev);
352static struct ethtool_ops ops;
353
354
355/* The Xircom cards are picky about when certain bits in CSR6 can be
356 manipulated. Keith Owens <kaos@ocs.com.au>. */
357static void outl_CSR6(u32 newcsr6, long ioaddr)
358{
359 const int strict_bits =
360 TxThresh10 | TxStoreForw | TxThreshMask | EnableTxRx | FullDuplexBit;
361 int csr5, csr5_22_20, csr5_19_17, currcsr6, attempts = 200;
362 unsigned long flags;
363 save_flags(flags);
364 cli();
365 /* mask out the reserved bits that always read 0 on the Xircom cards */
366 newcsr6 &= ~ReservedZeroMask;
367 /* or in the reserved bits that always read 1 */
368 newcsr6 |= ReservedOneMask;
369 currcsr6 = inl(ioaddr + CSR6);
370 if (((newcsr6 & strict_bits) == (currcsr6 & strict_bits)) ||
371 ((currcsr6 & ~EnableTxRx) == 0)) {
372 outl(newcsr6, ioaddr + CSR6); /* safe */
373 restore_flags(flags);
374 return;
375 }
376 /* make sure the transmitter and receiver are stopped first */
377 currcsr6 &= ~EnableTxRx;
378 while (1) {
379 csr5 = inl(ioaddr + CSR5);
380 if (csr5 == 0xffffffff)
381 break; /* cannot read csr5, card removed? */
382 csr5_22_20 = csr5 & 0x700000;
383 csr5_19_17 = csr5 & 0x0e0000;
384 if ((csr5_22_20 == 0 || csr5_22_20 == 0x600000) &&
385 (csr5_19_17 == 0 || csr5_19_17 == 0x80000 || csr5_19_17 == 0xc0000))
386 break; /* both are stopped or suspended */
387 if (!--attempts) {
388 printk(KERN_INFO DRV_NAME ": outl_CSR6 too many attempts,"
389 "csr5=0x%08x\n", csr5);
390 outl(newcsr6, ioaddr + CSR6); /* unsafe but do it anyway */
391 restore_flags(flags);
392 return;
393 }
394 outl(currcsr6, ioaddr + CSR6);
395 udelay(1);
396 }
397 /* now it is safe to change csr6 */
398 outl(newcsr6, ioaddr + CSR6);
399 restore_flags(flags);
400}
401
402
403static void __devinit read_mac_address(struct net_device *dev)
404{
405 long ioaddr = dev->base_addr;
406 int i, j;
407 unsigned char tuple, link, data_id, data_count;
408
409 /* Xircom has its address stored in the CIS;
410 * we access it through the boot rom interface for now
411 * this might not work, as the CIS is not parsed but I
412 * (danilo) use the offset I found on my card's CIS !!!
413 *
414 * Doug Ledford: I changed this routine around so that it
415 * walks the CIS memory space, parsing the config items, and
416 * finds the proper lan_node_id tuple and uses the data
417 * stored there.
418 */
419 outl(1 << 12, ioaddr + CSR9); /* enable boot rom access */
420 for (i = 0x100; i < 0x1f7; i += link+2) {
421 outl(i, ioaddr + CSR10);
422 tuple = inl(ioaddr + CSR9) & 0xff;
423 outl(i + 1, ioaddr + CSR10);
424 link = inl(ioaddr + CSR9) & 0xff;
425 outl(i + 2, ioaddr + CSR10);
426 data_id = inl(ioaddr + CSR9) & 0xff;
427 outl(i + 3, ioaddr + CSR10);
428 data_count = inl(ioaddr + CSR9) & 0xff;
429 if ( (tuple == 0x22) &&
430 (data_id == 0x04) && (data_count == 0x06) ) {
431 /*
432 * This is it. We have the data we want.
433 */
434 for (j = 0; j < 6; j++) {
435 outl(i + j + 4, ioaddr + CSR10);
436 dev->dev_addr[j] = inl(ioaddr + CSR9) & 0xff;
437 }
438 break;
439 } else if (link == 0) {
440 break;
441 }
442 }
443}
444
445
446/*
447 * locate the MII interfaces and initialize them.
448 * we disable full-duplex modes here,
449 * because we don't know how to handle them.
450 */
451static void find_mii_transceivers(struct net_device *dev)
452{
453 struct xircom_private *tp = netdev_priv(dev);
454 int phy, phy_idx;
455
456 if (media_cap[tp->default_port] & MediaIsMII) {
457 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
458 tp->to_advertise = media2advert[tp->default_port - 9];
459 } else
460 tp->to_advertise =
461 /*ADVERTISE_100BASE4 | ADVERTISE_100FULL |*/ ADVERTISE_100HALF |
462 /*ADVERTISE_10FULL |*/ ADVERTISE_10HALF | ADVERTISE_CSMA;
463
464 /* Find the connected MII xcvrs.
465 Doing this in open() would allow detecting external xcvrs later,
466 but takes much time. */
467 for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
468 int mii_status = mdio_read(dev, phy, MII_BMSR);
469 if ((mii_status & (BMSR_100BASE4 | BMSR_100HALF | BMSR_10HALF)) == BMSR_100BASE4 ||
470 ((mii_status & BMSR_100BASE4) == 0 &&
471 (mii_status & (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | BMSR_10HALF)) != 0)) {
472 int mii_reg0 = mdio_read(dev, phy, MII_BMCR);
473 int mii_advert = mdio_read(dev, phy, MII_ADVERTISE);
474 int reg4 = ((mii_status >> 6) & tp->to_advertise) | ADVERTISE_CSMA;
475 tp->phys[phy_idx] = phy;
476 tp->advertising[phy_idx++] = reg4;
477 printk(KERN_INFO "%s: MII transceiver #%d "
478 "config %4.4x status %4.4x advertising %4.4x.\n",
479 dev->name, phy, mii_reg0, mii_status, mii_advert);
480 }
481 }
482 tp->mii_cnt = phy_idx;
483 if (phy_idx == 0) {
484 printk(KERN_INFO "%s: ***WARNING***: No MII transceiver found!\n",
485 dev->name);
486 tp->phys[0] = 0;
487 }
488}
489
490
491/*
492 * To quote Arjan van de Ven:
493 * transceiver_voodoo() enables the external UTP plug thingy.
494 * it's called voodoo as I stole this code and cannot cross-reference
495 * it with the specification.
496 * Actually it seems to go like this:
497 * - GPIO2 enables the MII itself so we can talk to it. The MII gets reset
498 * so any prior MII settings are lost.
499 * - GPIO0 enables the TP port so the MII can talk to the network.
500 * - a software reset will reset both GPIO pins.
501 * I also moved the software reset here, because doing it in xircom_up()
502 * required enabling the GPIO pins each time, which reset the MII each time.
503 * Thus we couldn't control the MII -- which sucks because we don't know
504 * how to handle full-duplex modes so we *must* disable them.
505 */
506static void transceiver_voodoo(struct net_device *dev)
507{
508 struct xircom_private *tp = netdev_priv(dev);
509 long ioaddr = dev->base_addr;
510
511 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
512 outl(SoftwareReset, ioaddr + CSR0);
513 udelay(2);
514
515 /* Deassert reset. */
516 outl(tp->csr0, ioaddr + CSR0);
517
518 /* Reset the xcvr interface and turn on heartbeat. */
519 outl(0x0008, ioaddr + CSR15);
520 udelay(5); /* The delays are Xircom-recommended to give the
521 * chipset time to reset the actual hardware
522 * on the PCMCIA card
523 */
524 outl(0xa8050000, ioaddr + CSR15);
525 udelay(5);
526 outl(0xa00f0000, ioaddr + CSR15);
527 udelay(5);
528
529 outl_CSR6(0, ioaddr);
530 //outl_CSR6(FullDuplexBit, ioaddr);
531}
532
533
534static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
535{
536 struct net_device *dev;
537 struct xircom_private *tp;
538 static int board_idx = -1;
539 int chip_idx = id->driver_data;
540 long ioaddr;
541 int i;
542 u8 chip_rev;
543
544/* when built into the kernel, we only print version if device is found */
545#ifndef MODULE
546 static int printed_version;
547 if (!printed_version++)
548 printk(version);
549#endif
550
551 //printk(KERN_INFO "xircom_init_one(%s)\n", pci_name(pdev));
552
553 board_idx++;
554
555 if (pci_enable_device(pdev))
556 return -ENODEV;
557
558 pci_set_master(pdev);
559
560 ioaddr = pci_resource_start(pdev, 0);
561 dev = alloc_etherdev(sizeof(*tp));
562 if (!dev) {
563 printk (KERN_ERR DRV_NAME "%d: cannot alloc etherdev, aborting\n", board_idx);
564 return -ENOMEM;
565 }
566 SET_MODULE_OWNER(dev);
567 SET_NETDEV_DEV(dev, &pdev->dev);
568
569 dev->base_addr = ioaddr;
570 dev->irq = pdev->irq;
571
572 if (pci_request_regions(pdev, dev->name)) {
573 printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", board_idx);
574 goto err_out_free_netdev;
575 }
576
577 /* Bring the chip out of sleep mode.
578 Caution: Snooze mode does not work with some boards! */
579 if (xircom_tbl[chip_idx].flags & HAS_ACPI)
580 pci_write_config_dword(pdev, PCI_POWERMGMT, 0);
581
582 /* Stop the chip's Tx and Rx processes. */
583 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
584 /* Clear the missed-packet counter. */
585 (volatile int)inl(ioaddr + CSR8);
586
587 tp = netdev_priv(dev);
588
589 spin_lock_init(&tp->lock);
590 tp->pdev = pdev;
591 tp->chip_id = chip_idx;
592 /* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles. */
593 /* XXX: is this necessary for Xircom? */
594 tp->csr0 = csr0 & ~EnableMWI;
595
596 pci_set_drvdata(pdev, dev);
597
598 /* The lower four bits are the media type. */
599 if (board_idx >= 0 && board_idx < MAX_UNITS) {
600 tp->default_port = options[board_idx] & 15;
601 if ((options[board_idx] & 0x90) || full_duplex[board_idx] > 0)
602 tp->full_duplex = 1;
603 if (mtu[board_idx] > 0)
604 dev->mtu = mtu[board_idx];
605 }
606 if (dev->mem_start)
607 tp->default_port = dev->mem_start;
608 if (tp->default_port) {
609 if (media_cap[tp->default_port] & MediaAlwaysFD)
610 tp->full_duplex = 1;
611 }
612 if (tp->full_duplex)
613 tp->autoneg = 0;
614 else
615 tp->autoneg = 1;
616 tp->speed100 = 1;
617
618 /* The Xircom-specific entries in the device structure. */
619 dev->open = &xircom_open;
620 dev->hard_start_xmit = &xircom_start_xmit;
621 dev->stop = &xircom_close;
622 dev->get_stats = &xircom_get_stats;
623 dev->do_ioctl = &xircom_ioctl;
624#ifdef HAVE_MULTICAST
625 dev->set_multicast_list = &set_rx_mode;
626#endif
627 dev->tx_timeout = xircom_tx_timeout;
628 dev->watchdog_timeo = TX_TIMEOUT;
629 SET_ETHTOOL_OPS(dev, &ops);
630
631 transceiver_voodoo(dev);
632
633 read_mac_address(dev);
634
635 if (register_netdev(dev))
636 goto err_out_cleardev;
637
638 pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
639 printk(KERN_INFO "%s: %s rev %d at %#3lx,",
640 dev->name, xircom_tbl[chip_idx].chip_name, chip_rev, ioaddr);
641 for (i = 0; i < 6; i++)
642 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
643 printk(", IRQ %d.\n", dev->irq);
644
645 if (xircom_tbl[chip_idx].flags & HAS_MII) {
646 find_mii_transceivers(dev);
647 check_duplex(dev);
648 }
649
650 return 0;
651
652err_out_cleardev:
653 pci_set_drvdata(pdev, NULL);
654 pci_release_regions(pdev);
655err_out_free_netdev:
656 free_netdev(dev);
657 return -ENODEV;
658}
659
660
661/* MII transceiver control section.
662 Read and write the MII registers using software-generated serial
663 MDIO protocol. See the MII specifications or DP83840A data sheet
664 for details. */
665
666/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
667 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
668 "overclocking" issues or future 66Mhz PCI. */
669#define mdio_delay() inl(mdio_addr)
670
671/* Read and write the MII registers using software-generated serial
672 MDIO protocol. It is just different enough from the EEPROM protocol
673 to not share code. The maxium data clock rate is 2.5 Mhz. */
674#define MDIO_SHIFT_CLK 0x10000
675#define MDIO_DATA_WRITE0 0x00000
676#define MDIO_DATA_WRITE1 0x20000
677#define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
678#define MDIO_ENB_IN 0x40000
679#define MDIO_DATA_READ 0x80000
680
681static int mdio_read(struct net_device *dev, int phy_id, int location)
682{
683 int i;
684 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
685 int retval = 0;
686 long ioaddr = dev->base_addr;
687 long mdio_addr = ioaddr + CSR9;
688
689 /* Establish sync by sending at least 32 logic ones. */
690 for (i = 32; i >= 0; i--) {
691 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
692 mdio_delay();
693 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
694 mdio_delay();
695 }
696 /* Shift the read command bits out. */
697 for (i = 15; i >= 0; i--) {
698 int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
699
700 outl(MDIO_ENB | dataval, mdio_addr);
701 mdio_delay();
702 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
703 mdio_delay();
704 }
705 /* Read the two transition, 16 data, and wire-idle bits. */
706 for (i = 19; i > 0; i--) {
707 outl(MDIO_ENB_IN, mdio_addr);
708 mdio_delay();
709 retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
710 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
711 mdio_delay();
712 }
713 return (retval>>1) & 0xffff;
714}
715
716
717static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
718{
719 int i;
720 int cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
721 long ioaddr = dev->base_addr;
722 long mdio_addr = ioaddr + CSR9;
723
724 /* Establish sync by sending 32 logic ones. */
725 for (i = 32; i >= 0; i--) {
726 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
727 mdio_delay();
728 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
729 mdio_delay();
730 }
731 /* Shift the command bits out. */
732 for (i = 31; i >= 0; i--) {
733 int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
734 outl(MDIO_ENB | dataval, mdio_addr);
735 mdio_delay();
736 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
737 mdio_delay();
738 }
739 /* Clear out extra bits. */
740 for (i = 2; i > 0; i--) {
741 outl(MDIO_ENB_IN, mdio_addr);
742 mdio_delay();
743 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
744 mdio_delay();
745 }
746 return;
747}
748
749
750static void
751xircom_up(struct net_device *dev)
752{
753 struct xircom_private *tp = netdev_priv(dev);
754 long ioaddr = dev->base_addr;
755 int i;
756
757 xircom_init_ring(dev);
758 /* Clear the tx ring */
759 for (i = 0; i < TX_RING_SIZE; i++) {
760 tp->tx_skbuff[i] = NULL;
761 tp->tx_ring[i].status = 0;
762 }
763
764 if (xircom_debug > 1)
765 printk(KERN_DEBUG "%s: xircom_up() irq %d.\n", dev->name, dev->irq);
766
767 outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
768 outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
769
770 tp->saved_if_port = dev->if_port;
771 if (dev->if_port == 0)
772 dev->if_port = tp->default_port;
773
774 tp->csr6 = TxThresh10 /*| FullDuplexBit*/; /* XXX: why 10 and not 100? */
775
776 set_rx_mode(dev);
777
778 /* Start the chip's Tx to process setup frame. */
779 outl_CSR6(tp->csr6, ioaddr);
780 outl_CSR6(tp->csr6 | EnableTx, ioaddr);
781
782 /* Acknowledge all outstanding interrupts sources */
783 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
784 /* Enable interrupts by setting the interrupt mask. */
785 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
786 /* Enable Rx */
787 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
788 /* Rx poll demand */
789 outl(0, ioaddr + CSR2);
790
791 /* Tell the net layer we're ready */
792 netif_start_queue (dev);
793
794 /* Check current media state */
795 xircom_media_change(dev);
796
797 if (xircom_debug > 2) {
798 printk(KERN_DEBUG "%s: Done xircom_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
799 dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
800 inl(ioaddr + CSR6));
801 }
802}
803
804
805static int
806xircom_open(struct net_device *dev)
807{
808 struct xircom_private *tp = netdev_priv(dev);
809
1fb9df5d 810 if (request_irq(dev->irq, &xircom_interrupt, IRQF_SHARED, dev->name, dev))
1da177e4
LT
811 return -EAGAIN;
812
813 xircom_up(dev);
814 tp->open = 1;
815
816 return 0;
817}
818
819
820static void xircom_tx_timeout(struct net_device *dev)
821{
822 struct xircom_private *tp = netdev_priv(dev);
823 long ioaddr = dev->base_addr;
824
825 if (media_cap[dev->if_port] & MediaIsMII) {
826 /* Do nothing -- the media monitor should handle this. */
827 if (xircom_debug > 1)
828 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
829 dev->name);
830 }
831
832#if defined(way_too_many_messages)
833 if (xircom_debug > 3) {
834 int i;
835 for (i = 0; i < RX_RING_SIZE; i++) {
836 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
837 int j;
838 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
839 "%2.2x %2.2x %2.2x.\n",
840 i, (unsigned int)tp->rx_ring[i].status,
841 (unsigned int)tp->rx_ring[i].length,
842 (unsigned int)tp->rx_ring[i].buffer1,
843 (unsigned int)tp->rx_ring[i].buffer2,
844 buf[0], buf[1], buf[2]);
845 for (j = 0; buf[j] != 0xee && j < 1600; j++)
846 if (j < 100) printk(" %2.2x", buf[j]);
847 printk(" j=%d.\n", j);
848 }
849 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
850 for (i = 0; i < RX_RING_SIZE; i++)
851 printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
852 printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
853 for (i = 0; i < TX_RING_SIZE; i++)
854 printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
855 printk("\n");
856 }
857#endif
858
859 /* Stop and restart the chip's Tx/Rx processes . */
860 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
861 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
862 /* Trigger an immediate transmit demand. */
863 outl(0, ioaddr + CSR1);
864
865 dev->trans_start = jiffies;
866 netif_wake_queue (dev);
867 tp->stats.tx_errors++;
868}
869
870
871/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
872static void xircom_init_ring(struct net_device *dev)
873{
874 struct xircom_private *tp = netdev_priv(dev);
875 int i;
876
877 tp->tx_full = 0;
878 tp->cur_rx = tp->cur_tx = 0;
879 tp->dirty_rx = tp->dirty_tx = 0;
880
881 for (i = 0; i < RX_RING_SIZE; i++) {
882 tp->rx_ring[i].status = 0;
883 tp->rx_ring[i].length = PKT_BUF_SZ;
884 tp->rx_ring[i].buffer2 = virt_to_bus(&tp->rx_ring[i+1]);
885 tp->rx_skbuff[i] = NULL;
886 }
887 /* Mark the last entry as wrapping the ring. */
888 tp->rx_ring[i-1].length = PKT_BUF_SZ | Rx1RingWrap;
889 tp->rx_ring[i-1].buffer2 = virt_to_bus(&tp->rx_ring[0]);
890
891 for (i = 0; i < RX_RING_SIZE; i++) {
892 /* Note the receive buffer must be longword aligned.
893 dev_alloc_skb() provides 16 byte alignment. But do *not*
894 use skb_reserve() to align the IP header! */
895 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
896 tp->rx_skbuff[i] = skb;
897 if (skb == NULL)
898 break;
899 skb->dev = dev; /* Mark as being used by this device. */
900 tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */
689be439 901 tp->rx_ring[i].buffer1 = virt_to_bus(skb->data);
1da177e4
LT
902 }
903 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
904
905 /* The Tx buffer descriptor is filled in as needed, but we
906 do need to clear the ownership bit. */
907 for (i = 0; i < TX_RING_SIZE; i++) {
908 tp->tx_skbuff[i] = NULL;
909 tp->tx_ring[i].status = 0;
910 tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
911#ifdef CARDBUS
912 if (tp->chip_id == X3201_3)
913 tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ);
914#endif /* CARDBUS */
915 }
916 tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]);
917}
918
919
920static int
921xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
922{
923 struct xircom_private *tp = netdev_priv(dev);
924 int entry;
925 u32 flag;
926
927 /* Caution: the write order is important here, set the base address
928 with the "ownership" bits last. */
929
930 /* Calculate the next Tx descriptor entry. */
931 entry = tp->cur_tx % TX_RING_SIZE;
932
933 tp->tx_skbuff[entry] = skb;
934#ifdef CARDBUS
935 if (tp->chip_id == X3201_3) {
936 memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len);
937 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
938 } else
939#endif
940 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
941
942 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
943 flag = Tx1WholePkt; /* No interrupt */
944 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
945 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
946 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
947 flag = Tx1WholePkt; /* No Tx-done intr. */
948 } else {
949 /* Leave room for set_rx_mode() to fill entries. */
950 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
951 tp->tx_full = 1;
952 }
953 if (entry == TX_RING_SIZE - 1)
954 flag |= Tx1WholePkt | Tx1ComplIntr | Tx1RingWrap;
955
956 tp->tx_ring[entry].length = skb->len | flag;
957 tp->tx_ring[entry].status = Tx0DescOwned; /* Pass ownership to the chip. */
958 tp->cur_tx++;
959 if (tp->tx_full)
960 netif_stop_queue (dev);
961 else
962 netif_wake_queue (dev);
963
964 /* Trigger an immediate transmit demand. */
965 outl(0, dev->base_addr + CSR1);
966
967 dev->trans_start = jiffies;
968
969 return 0;
970}
971
972
973static void xircom_media_change(struct net_device *dev)
974{
975 struct xircom_private *tp = netdev_priv(dev);
976 long ioaddr = dev->base_addr;
977 u16 reg0, reg1, reg4, reg5;
978 u32 csr6 = inl(ioaddr + CSR6), newcsr6;
979
980 /* reset status first */
981 mdio_read(dev, tp->phys[0], MII_BMCR);
982 mdio_read(dev, tp->phys[0], MII_BMSR);
983
984 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
985 reg1 = mdio_read(dev, tp->phys[0], MII_BMSR);
986
987 if (reg1 & BMSR_LSTATUS) {
988 /* link is up */
989 if (reg0 & BMCR_ANENABLE) {
990 /* autonegotiation is enabled */
991 reg4 = mdio_read(dev, tp->phys[0], MII_ADVERTISE);
992 reg5 = mdio_read(dev, tp->phys[0], MII_LPA);
993 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
994 tp->speed100 = 1;
995 tp->full_duplex = 1;
996 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
997 tp->speed100 = 1;
998 tp->full_duplex = 0;
999 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1000 tp->speed100 = 0;
1001 tp->full_duplex = 1;
1002 } else {
1003 tp->speed100 = 0;
1004 tp->full_duplex = 0;
1005 }
1006 } else {
1007 /* autonegotiation is disabled */
1008 if (reg0 & BMCR_SPEED100)
1009 tp->speed100 = 1;
1010 else
1011 tp->speed100 = 0;
1012 if (reg0 & BMCR_FULLDPLX)
1013 tp->full_duplex = 1;
1014 else
1015 tp->full_duplex = 0;
1016 }
1017 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1018 dev->name,
1019 tp->speed100 ? "100" : "10",
1020 tp->full_duplex ? "full" : "half");
1021 netif_carrier_on(dev);
1022 newcsr6 = csr6 & ~FullDuplexBit;
1023 if (tp->full_duplex)
1024 newcsr6 |= FullDuplexBit;
1025 if (newcsr6 != csr6)
1026 outl_CSR6(newcsr6, ioaddr + CSR6);
1027 } else {
1028 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1029 netif_carrier_off(dev);
1030 }
1031}
1032
1033
1034static void check_duplex(struct net_device *dev)
1035{
1036 struct xircom_private *tp = netdev_priv(dev);
1037 u16 reg0;
1038
1039 mdio_write(dev, tp->phys[0], MII_BMCR, BMCR_RESET);
1040 udelay(500);
1041 while (mdio_read(dev, tp->phys[0], MII_BMCR) & BMCR_RESET);
1042
1043 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
1044 mdio_write(dev, tp->phys[0], MII_ADVERTISE, tp->advertising[0]);
1045
1046 if (tp->autoneg) {
1047 reg0 &= ~(BMCR_SPEED100 | BMCR_FULLDPLX);
1048 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1049 } else {
1050 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1051 if (tp->speed100)
1052 reg0 |= BMCR_SPEED100;
1053 if (tp->full_duplex)
1054 reg0 |= BMCR_FULLDPLX;
1055 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1056 dev->name,
1057 tp->speed100 ? "100" : "10",
1058 tp->full_duplex ? "full" : "half");
1059 }
1060 mdio_write(dev, tp->phys[0], MII_BMCR, reg0);
1061}
1062
1063
1064/* The interrupt handler does all of the Rx thread work and cleans up
1065 after the Tx thread. */
1066static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1067{
1068 struct net_device *dev = dev_instance;
1069 struct xircom_private *tp = netdev_priv(dev);
1070 long ioaddr = dev->base_addr;
1071 int csr5, work_budget = max_interrupt_work;
1072 int handled = 0;
1073
1074 spin_lock (&tp->lock);
1075
1076 do {
1077 csr5 = inl(ioaddr + CSR5);
1078 /* Acknowledge all of the current interrupt sources ASAP. */
1079 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
1080
1081 if (xircom_debug > 4)
1082 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
1083 dev->name, csr5, inl(dev->base_addr + CSR5));
1084
1085 if (csr5 == 0xffffffff)
1086 break; /* all bits set, assume PCMCIA card removed */
1087
1088 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
1089 break;
1090
1091 handled = 1;
1092
1093 if (csr5 & (RxIntr | RxNoBuf))
1094 work_budget -= xircom_rx(dev);
1095
1096 if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
1097 unsigned int dirty_tx;
1098
1099 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
1100 dirty_tx++) {
1101 int entry = dirty_tx % TX_RING_SIZE;
1102 int status = tp->tx_ring[entry].status;
1103
1104 if (status < 0)
1105 break; /* It still hasn't been Txed */
1106 /* Check for Rx filter setup frames. */
1107 if (tp->tx_skbuff[entry] == NULL)
1108 continue;
1109
1110 if (status & Tx0DescError) {
1111 /* There was an major error, log it. */
1112#ifndef final_version
1113 if (xircom_debug > 1)
1114 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1115 dev->name, status);
1116#endif
1117 tp->stats.tx_errors++;
1118 if (status & Tx0ManyColl) {
1119 tp->stats.tx_aborted_errors++;
1120 }
1121 if (status & Tx0NoCarrier) tp->stats.tx_carrier_errors++;
1122 if (status & Tx0LateColl) tp->stats.tx_window_errors++;
1123 if (status & Tx0Underflow) tp->stats.tx_fifo_errors++;
1124 } else {
1125 tp->stats.tx_bytes += tp->tx_ring[entry].length & 0x7ff;
1126 tp->stats.collisions += (status >> 3) & 15;
1127 tp->stats.tx_packets++;
1128 }
1129
1130 /* Free the original skb. */
1131 dev_kfree_skb_irq(tp->tx_skbuff[entry]);
1132 tp->tx_skbuff[entry] = NULL;
1133 }
1134
1135#ifndef final_version
1136 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
1137 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1138 dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
1139 dirty_tx += TX_RING_SIZE;
1140 }
1141#endif
1142
1143 if (tp->tx_full &&
1144 tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
1145 /* The ring is no longer full */
1146 tp->tx_full = 0;
1147
1148 if (tp->tx_full)
1149 netif_stop_queue (dev);
1150 else
1151 netif_wake_queue (dev);
1152
1153 tp->dirty_tx = dirty_tx;
1154 if (csr5 & TxDied) {
1155 if (xircom_debug > 2)
1156 printk(KERN_WARNING "%s: The transmitter stopped."
1157 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
1158 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
1159 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1160 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1161 }
1162 }
1163
1164 /* Log errors. */
1165 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
1166 if (csr5 & LinkChange)
1167 xircom_media_change(dev);
1168 if (csr5 & TxFIFOUnderflow) {
1169 if ((tp->csr6 & TxThreshMask) != TxThreshMask)
1170 tp->csr6 += (1 << TxThreshShift); /* Bump up the Tx threshold */
1171 else
1172 tp->csr6 |= TxStoreForw; /* Store-n-forward. */
1173 /* Restart the transmit process. */
1174 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1175 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1176 }
1177 if (csr5 & RxDied) { /* Missed a Rx frame. */
1178 tp->stats.rx_errors++;
1179 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1180 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1181 }
1182 /* Clear all error sources, included undocumented ones! */
1183 outl(0x0800f7ba, ioaddr + CSR5);
1184 }
1185 if (--work_budget < 0) {
1186 if (xircom_debug > 1)
1187 printk(KERN_WARNING "%s: Too much work during an interrupt, "
1188 "csr5=0x%8.8x.\n", dev->name, csr5);
1189 /* Acknowledge all interrupt sources. */
1190 outl(0x8001ffff, ioaddr + CSR5);
1191 break;
1192 }
1193 } while (1);
1194
1195 if (xircom_debug > 3)
1196 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
1197 dev->name, inl(ioaddr + CSR5));
1198
1199 spin_unlock (&tp->lock);
1200 return IRQ_RETVAL(handled);
1201}
1202
1203
1204static int
1205xircom_rx(struct net_device *dev)
1206{
1207 struct xircom_private *tp = netdev_priv(dev);
1208 int entry = tp->cur_rx % RX_RING_SIZE;
1209 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
1210 int work_done = 0;
1211
1212 if (xircom_debug > 4)
1213 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1214 tp->rx_ring[entry].status);
1215 /* If we own the next entry, it's a new packet. Send it up. */
1216 while (tp->rx_ring[entry].status >= 0) {
1217 s32 status = tp->rx_ring[entry].status;
1218
1219 if (xircom_debug > 5)
1220 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1221 tp->rx_ring[entry].status);
1222 if (--rx_work_limit < 0)
1223 break;
1224 if ((status & 0x38008300) != 0x0300) {
1225 if ((status & 0x38000300) != 0x0300) {
1226 /* Ignore earlier buffers. */
1227 if ((status & 0xffff) != 0x7fff) {
1228 if (xircom_debug > 1)
1229 printk(KERN_WARNING "%s: Oversized Ethernet frame "
1230 "spanned multiple buffers, status %8.8x!\n",
1231 dev->name, status);
1232 tp->stats.rx_length_errors++;
1233 }
1234 } else if (status & Rx0DescError) {
1235 /* There was a fatal error. */
1236 if (xircom_debug > 2)
1237 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1238 dev->name, status);
1239 tp->stats.rx_errors++; /* end of a packet.*/
1240 if (status & (Rx0Runt | Rx0HugeFrame)) tp->stats.rx_length_errors++;
1241 if (status & Rx0CRCError) tp->stats.rx_crc_errors++;
1242 }
1243 } else {
1244 /* Omit the four octet CRC from the length. */
1245 short pkt_len = ((status >> 16) & 0x7ff) - 4;
1246 struct sk_buff *skb;
1247
1248#ifndef final_version
1249 if (pkt_len > 1518) {
1250 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
1251 dev->name, pkt_len, pkt_len);
1252 pkt_len = 1518;
1253 tp->stats.rx_length_errors++;
1254 }
1255#endif
1256 /* Check if the packet is long enough to accept without copying
1257 to a minimally-sized skbuff. */
1258 if (pkt_len < rx_copybreak
1259 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1260 skb->dev = dev;
1261 skb_reserve(skb, 2); /* 16 byte align the IP header */
1262#if ! defined(__alpha__)
1263 eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
1264 pkt_len, 0);
1265 skb_put(skb, pkt_len);
1266#else
1267 memcpy(skb_put(skb, pkt_len),
1268 bus_to_virt(tp->rx_ring[entry].buffer1), pkt_len);
1269#endif
1270 work_done++;
1271 } else { /* Pass up the skb already on the Rx ring. */
1272 skb_put(skb = tp->rx_skbuff[entry], pkt_len);
1273 tp->rx_skbuff[entry] = NULL;
1274 }
1275 skb->protocol = eth_type_trans(skb, dev);
1276 netif_rx(skb);
1277 dev->last_rx = jiffies;
1278 tp->stats.rx_packets++;
1279 tp->stats.rx_bytes += pkt_len;
1280 }
1281 entry = (++tp->cur_rx) % RX_RING_SIZE;
1282 }
1283
1284 /* Refill the Rx ring buffers. */
1285 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
1286 entry = tp->dirty_rx % RX_RING_SIZE;
1287 if (tp->rx_skbuff[entry] == NULL) {
1288 struct sk_buff *skb;
1289 skb = tp->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ);
1290 if (skb == NULL)
1291 break;
1292 skb->dev = dev; /* Mark as being used by this device. */
689be439 1293 tp->rx_ring[entry].buffer1 = virt_to_bus(skb->data);
1da177e4
LT
1294 work_done++;
1295 }
1296 tp->rx_ring[entry].status = Rx0DescOwned;
1297 }
1298
1299 return work_done;
1300}
1301
1302
1303static void
1304xircom_down(struct net_device *dev)
1305{
1306 long ioaddr = dev->base_addr;
1307 struct xircom_private *tp = netdev_priv(dev);
1308
1309 /* Disable interrupts by clearing the interrupt mask. */
1310 outl(0, ioaddr + CSR7);
1311 /* Stop the chip's Tx and Rx processes. */
1312 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
1313
1314 if (inl(ioaddr + CSR6) != 0xffffffff)
1315 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1316
1317 dev->if_port = tp->saved_if_port;
1318}
1319
1320
1321static int
1322xircom_close(struct net_device *dev)
1323{
1324 long ioaddr = dev->base_addr;
1325 struct xircom_private *tp = netdev_priv(dev);
1326 int i;
1327
1328 if (xircom_debug > 1)
1329 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1330 dev->name, inl(ioaddr + CSR5));
1331
1332 netif_stop_queue(dev);
1333
1334 if (netif_device_present(dev))
1335 xircom_down(dev);
1336
1337 free_irq(dev->irq, dev);
1338
1339 /* Free all the skbuffs in the Rx queue. */
1340 for (i = 0; i < RX_RING_SIZE; i++) {
1341 struct sk_buff *skb = tp->rx_skbuff[i];
1342 tp->rx_skbuff[i] = NULL;
1343 tp->rx_ring[i].status = 0; /* Not owned by Xircom chip. */
1344 tp->rx_ring[i].length = 0;
1345 tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
1346 if (skb) {
1347 dev_kfree_skb(skb);
1348 }
1349 }
1350 for (i = 0; i < TX_RING_SIZE; i++) {
1351 if (tp->tx_skbuff[i])
1352 dev_kfree_skb(tp->tx_skbuff[i]);
1353 tp->tx_skbuff[i] = NULL;
1354 }
1355
1356 tp->open = 0;
1357 return 0;
1358}
1359
1360
1361static struct net_device_stats *xircom_get_stats(struct net_device *dev)
1362{
1363 struct xircom_private *tp = netdev_priv(dev);
1364 long ioaddr = dev->base_addr;
1365
1366 if (netif_device_present(dev))
1367 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1368
1369 return &tp->stats;
1370}
1371
1372static int xircom_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1373{
1374 struct xircom_private *tp = netdev_priv(dev);
1375 ecmd->supported =
1376 SUPPORTED_10baseT_Half |
1377 SUPPORTED_10baseT_Full |
1378 SUPPORTED_100baseT_Half |
1379 SUPPORTED_100baseT_Full |
1380 SUPPORTED_Autoneg |
1381 SUPPORTED_MII;
1382
1383 ecmd->advertising = ADVERTISED_MII;
1384 if (tp->advertising[0] & ADVERTISE_10HALF)
1385 ecmd->advertising |= ADVERTISED_10baseT_Half;
1386 if (tp->advertising[0] & ADVERTISE_10FULL)
1387 ecmd->advertising |= ADVERTISED_10baseT_Full;
1388 if (tp->advertising[0] & ADVERTISE_100HALF)
1389 ecmd->advertising |= ADVERTISED_100baseT_Half;
1390 if (tp->advertising[0] & ADVERTISE_100FULL)
1391 ecmd->advertising |= ADVERTISED_100baseT_Full;
1392 if (tp->autoneg) {
1393 ecmd->advertising |= ADVERTISED_Autoneg;
1394 ecmd->autoneg = AUTONEG_ENABLE;
1395 } else
1396 ecmd->autoneg = AUTONEG_DISABLE;
1397
1398 ecmd->port = PORT_MII;
1399 ecmd->transceiver = XCVR_INTERNAL;
1400 ecmd->phy_address = tp->phys[0];
1401 ecmd->speed = tp->speed100 ? SPEED_100 : SPEED_10;
1402 ecmd->duplex = tp->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1403 ecmd->maxtxpkt = TX_RING_SIZE / 2;
1404 ecmd->maxrxpkt = 0;
1405 return 0;
1406}
1407
1408static int xircom_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1409{
1410 struct xircom_private *tp = netdev_priv(dev);
1411 u16 autoneg, speed100, full_duplex;
1412
1413 autoneg = (ecmd->autoneg == AUTONEG_ENABLE);
1414 speed100 = (ecmd->speed == SPEED_100);
1415 full_duplex = (ecmd->duplex == DUPLEX_FULL);
1416
1417 tp->autoneg = autoneg;
1418 if (speed100 != tp->speed100 ||
1419 full_duplex != tp->full_duplex) {
1420 tp->speed100 = speed100;
1421 tp->full_duplex = full_duplex;
1422 /* change advertising bits */
1423 tp->advertising[0] &= ~(ADVERTISE_10HALF |
1424 ADVERTISE_10FULL |
1425 ADVERTISE_100HALF |
1426 ADVERTISE_100FULL |
1427 ADVERTISE_100BASE4);
1428 if (speed100) {
1429 if (full_duplex)
1430 tp->advertising[0] |= ADVERTISE_100FULL;
1431 else
1432 tp->advertising[0] |= ADVERTISE_100HALF;
1433 } else {
1434 if (full_duplex)
1435 tp->advertising[0] |= ADVERTISE_10FULL;
1436 else
1437 tp->advertising[0] |= ADVERTISE_10HALF;
1438 }
1439 }
1440 check_duplex(dev);
1441 return 0;
1442}
1443
1444static void xircom_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1445{
1446 struct xircom_private *tp = netdev_priv(dev);
1447 strcpy(info->driver, DRV_NAME);
1448 strcpy(info->version, DRV_VERSION);
1449 strcpy(info->bus_info, pci_name(tp->pdev));
1450}
1451
1452static struct ethtool_ops ops = {
1453 .get_settings = xircom_get_settings,
1454 .set_settings = xircom_set_settings,
1455 .get_drvinfo = xircom_get_drvinfo,
1456};
1457
1458/* Provide ioctl() calls to examine the MII xcvr state. */
1459static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1460{
1461 struct xircom_private *tp = netdev_priv(dev);
1462 u16 *data = (u16 *)&rq->ifr_ifru;
1463 int phy = tp->phys[0] & 0x1f;
1464 unsigned long flags;
1465
1466 switch(cmd) {
1467 /* Legacy mii-diag interface */
1468 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1469 if (tp->mii_cnt)
1470 data[0] = phy;
1471 else
1472 return -ENODEV;
1473 return 0;
1474 case SIOCGMIIREG: /* Read MII PHY register. */
1475 save_flags(flags);
1476 cli();
1477 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1478 restore_flags(flags);
1479 return 0;
1480 case SIOCSMIIREG: /* Write MII PHY register. */
1481 if (!capable(CAP_NET_ADMIN))
1482 return -EPERM;
1483 save_flags(flags);
1484 cli();
1485 if (data[0] == tp->phys[0]) {
1486 u16 value = data[2];
1487 switch (data[1]) {
1488 case 0:
1489 if (value & (BMCR_RESET | BMCR_ANENABLE))
1490 /* Autonegotiation. */
1491 tp->autoneg = 1;
1492 else {
1493 tp->full_duplex = (value & BMCR_FULLDPLX) ? 1 : 0;
1494 tp->autoneg = 0;
1495 }
1496 break;
1497 case 4:
1498 tp->advertising[0] = value;
1499 break;
1500 }
1501 check_duplex(dev);
1502 }
1503 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1504 restore_flags(flags);
1505 return 0;
1506 default:
1507 return -EOPNOTSUPP;
1508 }
1509
1510 return -EOPNOTSUPP;
1511}
1512
1513/* Set or clear the multicast filter for this adaptor.
1514 Note that we only use exclusion around actually queueing the
1515 new frame, not around filling tp->setup_frame. This is non-deterministic
1516 when re-entered but still correct. */
1517static void set_rx_mode(struct net_device *dev)
1518{
1519 struct xircom_private *tp = netdev_priv(dev);
1520 struct dev_mc_list *mclist;
1521 long ioaddr = dev->base_addr;
1522 int csr6 = inl(ioaddr + CSR6);
1523 u16 *eaddrs, *setup_frm;
1524 u32 tx_flags;
1525 int i;
1526
1527 tp->csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1528 csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1529 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1530 tp->csr6 |= PromiscBit;
1531 csr6 |= PromiscBit;
1532 goto out;
1533 }
1534
1535 if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
1536 /* Too many to filter well -- accept all multicasts. */
1537 tp->csr6 |= AllMultiBit;
1538 csr6 |= AllMultiBit;
1539 goto out;
1540 }
1541
1542 tx_flags = Tx1WholePkt | Tx1SetupPkt | PKT_SETUP_SZ;
1543
1544 /* Note that only the low-address shortword of setup_frame is valid! */
1545 setup_frm = tp->setup_frame;
1546 mclist = dev->mc_list;
1547
1548 /* Fill the first entry with our physical address. */
1549 eaddrs = (u16 *)dev->dev_addr;
1550 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1551 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1552 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1553
1554 if (dev->mc_count > 14) { /* Must use a multicast hash table. */
1555 u32 *hash_table = (u32 *)(tp->setup_frame + 4 * 12);
1556 u32 hash, hash2;
1557
1558 tx_flags |= Tx1HashSetup;
1559 tp->csr6 |= HashFilterBit;
1560 csr6 |= HashFilterBit;
1561
1562 /* Fill the unused 3 entries with the broadcast address.
1563 At least one entry *must* contain the broadcast address!!!*/
1564 for (i = 0; i < 3; i++) {
1565 *setup_frm = 0xffff; setup_frm += 2;
1566 *setup_frm = 0xffff; setup_frm += 2;
1567 *setup_frm = 0xffff; setup_frm += 2;
1568 }
1569
1570 /* Truly brain-damaged hash filter layout */
1571 /* XXX: not sure if I should take the last or the first 9 bits */
1572 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1573 u32 *hptr;
1574 hash = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
1575 if (hash < 384) {
1576 hash2 = hash + ((hash >> 4) << 4) +
1577 ((hash >> 5) << 5);
1578 } else {
1579 hash -= 384;
1580 hash2 = 64 + hash + (hash >> 4) * 80;
1581 }
1582 hptr = &hash_table[hash2 & ~0x1f];
1583 *hptr |= cpu_to_le32(1 << (hash2 & 0x1f));
1584 }
1585 } else {
1586 /* We have <= 14 mcast addresses so we can use Xircom's
1587 wonderful 16-address perfect filter. */
1588 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1589 eaddrs = (u16 *)mclist->dmi_addr;
1590 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1591 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1592 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1593 }
1594 /* Fill the unused entries with the broadcast address.
1595 At least one entry *must* contain the broadcast address!!!*/
1596 for (; i < 15; i++) {
1597 *setup_frm = 0xffff; setup_frm += 2;
1598 *setup_frm = 0xffff; setup_frm += 2;
1599 *setup_frm = 0xffff; setup_frm += 2;
1600 }
1601 }
1602
1603 /* Now add this frame to the Tx list. */
1604 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1605 /* Same setup recently queued, we need not add it. */
1606 /* XXX: Huh? All it means is that the Tx list is full...*/
1607 } else {
1608 unsigned long flags;
1609 unsigned int entry;
1610 int dummy = -1;
1611
1612 save_flags(flags); cli();
1613 entry = tp->cur_tx++ % TX_RING_SIZE;
1614
1615 if (entry != 0) {
1616 /* Avoid a chip errata by prefixing a dummy entry. */
1617 tp->tx_skbuff[entry] = NULL;
1618 tp->tx_ring[entry].length =
1619 (entry == TX_RING_SIZE - 1) ? Tx1RingWrap : 0;
1620 tp->tx_ring[entry].buffer1 = 0;
1621 /* race with chip, set Tx0DescOwned later */
1622 dummy = entry;
1623 entry = tp->cur_tx++ % TX_RING_SIZE;
1624 }
1625
1626 tp->tx_skbuff[entry] = NULL;
1627 /* Put the setup frame on the Tx list. */
1628 if (entry == TX_RING_SIZE - 1)
1629 tx_flags |= Tx1RingWrap; /* Wrap ring. */
1630 tp->tx_ring[entry].length = tx_flags;
1631 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->setup_frame);
1632 tp->tx_ring[entry].status = Tx0DescOwned;
1633 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
1634 tp->tx_full = 1;
1635 netif_stop_queue (dev);
1636 }
1637 if (dummy >= 0)
1638 tp->tx_ring[dummy].status = Tx0DescOwned;
1639 restore_flags(flags);
1640 /* Trigger an immediate transmit demand. */
1641 outl(0, ioaddr + CSR1);
1642 }
1643
1644out:
1645 outl_CSR6(csr6, ioaddr);
1646}
1647
1648
1649static struct pci_device_id xircom_pci_table[] = {
1650 { 0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, X3201_3 },
1651 {0},
1652};
1653MODULE_DEVICE_TABLE(pci, xircom_pci_table);
1654
1655
1656#ifdef CONFIG_PM
05adc3b7 1657static int xircom_suspend(struct pci_dev *pdev, pm_message_t state)
1da177e4
LT
1658{
1659 struct net_device *dev = pci_get_drvdata(pdev);
1660 struct xircom_private *tp = netdev_priv(dev);
1661 printk(KERN_INFO "xircom_suspend(%s)\n", dev->name);
1662 if (tp->open)
1663 xircom_down(dev);
1664
1665 pci_save_state(pdev);
1666 pci_disable_device(pdev);
1667 pci_set_power_state(pdev, 3);
1668
1669 return 0;
1670}
1671
1672
1673static int xircom_resume(struct pci_dev *pdev)
1674{
1675 struct net_device *dev = pci_get_drvdata(pdev);
1676 struct xircom_private *tp = netdev_priv(dev);
1677 printk(KERN_INFO "xircom_resume(%s)\n", dev->name);
1678
1679 pci_set_power_state(pdev,0);
1680 pci_enable_device(pdev);
1681 pci_restore_state(pdev);
1682
1683 /* Bring the chip out of sleep mode.
1684 Caution: Snooze mode does not work with some boards! */
1685 if (xircom_tbl[tp->chip_id].flags & HAS_ACPI)
1686 pci_write_config_dword(tp->pdev, PCI_POWERMGMT, 0);
1687
1688 transceiver_voodoo(dev);
1689 if (xircom_tbl[tp->chip_id].flags & HAS_MII)
1690 check_duplex(dev);
1691
1692 if (tp->open)
1693 xircom_up(dev);
1694 return 0;
1695}
1696#endif /* CONFIG_PM */
1697
1698
1699static void __devexit xircom_remove_one(struct pci_dev *pdev)
1700{
1701 struct net_device *dev = pci_get_drvdata(pdev);
1702
1703 printk(KERN_INFO "xircom_remove_one(%s)\n", dev->name);
1704 unregister_netdev(dev);
1705 pci_release_regions(pdev);
1706 free_netdev(dev);
1707 pci_set_drvdata(pdev, NULL);
1708}
1709
1710
1711static struct pci_driver xircom_driver = {
1712 .name = DRV_NAME,
1713 .id_table = xircom_pci_table,
1714 .probe = xircom_init_one,
1715 .remove = __devexit_p(xircom_remove_one),
1716#ifdef CONFIG_PM
1717 .suspend = xircom_suspend,
1718 .resume = xircom_resume
1719#endif /* CONFIG_PM */
1720};
1721
1722
1723static int __init xircom_init(void)
1724{
1725/* when a module, this is printed whether or not devices are found in probe */
1726#ifdef MODULE
1727 printk(version);
1728#endif
1729 return pci_module_init(&xircom_driver);
1730}
1731
1732
1733static void __exit xircom_exit(void)
1734{
1735 pci_unregister_driver(&xircom_driver);
1736}
1737
1738module_init(xircom_init)
1739module_exit(xircom_exit)
1740
1741/*
1742 * Local variables:
1743 * c-indent-level: 4
1744 * c-basic-offset: 4
1745 * tab-width: 4
1746 * End:
1747 */