]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tulip/xircom_tulip_cb.c
[SK_BUFF]: Introduce skb_copy_from_linear_data{_offset}
[net-next-2.6.git] / drivers / net / tulip / xircom_tulip_cb.c
CommitLineData
1da177e4
LT
1/* xircom_tulip_cb.c: A Xircom CBE-100 ethernet driver for Linux. */
2/*
3 Written/copyright 1994-1999 by Donald Becker.
4
5 This software may be used and distributed according to the terms
6 of the GNU General Public License, incorporated herein by reference.
7
8 The author may be reached as becker@scyld.com, or C/O
9 Scyld Computing Corporation
10 410 Severn Ave., Suite 210
11 Annapolis MD 21403
12
1da177e4
LT
13*/
14
15#define DRV_NAME "xircom_tulip_cb"
03a8c661
JG
16#define DRV_VERSION "0.92"
17#define DRV_RELDATE "June 27, 2006"
1da177e4
LT
18
19/* A few user-configurable values. */
20
21#define xircom_debug debug
22#ifdef XIRCOM_DEBUG
23static int xircom_debug = XIRCOM_DEBUG;
24#else
25static int xircom_debug = 1;
26#endif
27
28/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
29static int max_interrupt_work = 25;
30
31#define MAX_UNITS 4
32/* Used to pass the full-duplex flag, etc. */
33static int full_duplex[MAX_UNITS];
34static int options[MAX_UNITS];
35static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
36
37/* Keep the ring sizes a power of two for efficiency.
38 Making the Tx ring too large decreases the effectiveness of channel
39 bonding and packet priority.
40 There are no ill effects from too-large receive rings. */
41#define TX_RING_SIZE 16
42#define RX_RING_SIZE 32
43
44/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
45#ifdef __alpha__
46static int rx_copybreak = 1518;
47#else
48static int rx_copybreak = 100;
49#endif
50
51/*
52 Set the bus performance register.
53 Typical: Set 16 longword cache alignment, no burst limit.
54 Cache alignment bits 15:14 Burst length 13:8
55 0000 No alignment 0x00000000 unlimited 0800 8 longwords
56 4000 8 longwords 0100 1 longword 1000 16 longwords
57 8000 16 longwords 0200 2 longwords 2000 32 longwords
58 C000 32 longwords 0400 4 longwords
59 Warning: many older 486 systems are broken and require setting 0x00A04800
60 8 longword cache alignment, 8 longword burst.
61 ToDo: Non-Intel setting could be better.
62*/
63
64#if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
65static int csr0 = 0x01A00000 | 0xE000;
66#elif defined(__powerpc__)
67static int csr0 = 0x01B00000 | 0x8000;
68#elif defined(__sparc__)
69static int csr0 = 0x01B00080 | 0x8000;
70#elif defined(__i386__)
71static int csr0 = 0x01A00000 | 0x8000;
72#else
73#warning Processor architecture undefined!
74static int csr0 = 0x00A00000 | 0x4800;
75#endif
76
77/* Operational parameters that usually are not changed. */
78/* Time in jiffies before concluding the transmitter is hung. */
79#define TX_TIMEOUT (4 * HZ)
80#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
81#define PKT_SETUP_SZ 192 /* Size of the setup frame */
82
83/* PCI registers */
84#define PCI_POWERMGMT 0x40
85
1da177e4
LT
86#include <linux/module.h>
87#include <linux/moduleparam.h>
88#include <linux/kernel.h>
89#include <linux/pci.h>
90#include <linux/netdevice.h>
91#include <linux/etherdevice.h>
92#include <linux/delay.h>
93#include <linux/init.h>
94#include <linux/mii.h>
95#include <linux/ethtool.h>
96#include <linux/crc32.h>
97
98#include <asm/io.h>
99#include <asm/processor.h> /* Processor type for cache alignment. */
100#include <asm/uaccess.h>
101
102
103/* These identify the driver base version and may not be removed. */
104static char version[] __devinitdata =
105KERN_INFO DRV_NAME ".c derived from tulip.c:v0.91 4/14/99 becker@scyld.com\n"
106KERN_INFO " unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE "\n";
107
108MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
109MODULE_DESCRIPTION("Xircom CBE-100 ethernet driver");
110MODULE_LICENSE("GPL v2");
111MODULE_VERSION(DRV_VERSION);
112
113module_param(debug, int, 0);
114module_param(max_interrupt_work, int, 0);
115module_param(rx_copybreak, int, 0);
116module_param(csr0, int, 0);
117
118module_param_array(options, int, NULL, 0);
119module_param_array(full_duplex, int, NULL, 0);
120
121#define RUN_AT(x) (jiffies + (x))
122
123/*
124 Theory of Operation
125
126I. Board Compatibility
127
128This device driver was forked from the driver for the DECchip "Tulip",
129Digital's single-chip ethernet controllers for PCI. It supports Xircom's
130almost-Tulip-compatible CBE-100 CardBus adapters.
131
132II. Board-specific settings
133
134PCI bus devices are configured by the system at boot time, so no jumpers
135need to be set on the board. The system BIOS preferably should assign the
136PCI INTA signal to an otherwise unused system IRQ line.
137
138III. Driver operation
139
140IIIa. Ring buffers
141
142The Xircom can use either ring buffers or lists of Tx and Rx descriptors.
143This driver uses statically allocated rings of Rx and Tx descriptors, set at
144compile time by RX/TX_RING_SIZE. This version of the driver allocates skbuffs
145for the Rx ring buffers at open() time and passes the skb->data field to the
146Xircom as receive data buffers. When an incoming frame is less than
147RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is
148copied to the new skbuff. When the incoming frame is larger, the skbuff is
149passed directly up the protocol stack and replaced by a newly allocated
150skbuff.
151
152The RX_COPYBREAK value is chosen to trade-off the memory wasted by
153using a full-sized skbuff for small frames vs. the copying costs of larger
154frames. For small frames the copying cost is negligible (esp. considering
155that we are pre-loading the cache with immediately useful header
156information). For large frames the copying cost is non-trivial, and the
157larger copy might flush the cache of useful data. A subtle aspect of this
158choice is that the Xircom only receives into longword aligned buffers, thus
159the IP header at offset 14 isn't longword aligned for further processing.
160Copied frames are put into the new skbuff at an offset of "+2", thus copying
161has the beneficial effect of aligning the IP header and preloading the
162cache.
163
164IIIC. Synchronization
165The driver runs as two independent, single-threaded flows of control. One
166is the send-packet routine, which enforces single-threaded use by the
167dev->tbusy flag. The other thread is the interrupt handler, which is single
168threaded by the hardware and other software.
169
170The send packet thread has partial control over the Tx ring and 'dev->tbusy'
171flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
172queue slot is empty, it clears the tbusy flag when finished otherwise it sets
173the 'tp->tx_full' flag.
174
175The interrupt handler has exclusive control over the Rx ring and records stats
176from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
177we can't avoid the interrupt overhead by having the Tx routine reap the Tx
178stats.) After reaping the stats, it marks the queue entry as empty by setting
179the 'base' to zero. Iff the 'tp->tx_full' flag is set, it clears both the
180tx_full and tbusy flags.
181
182IV. Notes
183
184IVb. References
185
186http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
187http://www.digital.com (search for current 21*4* datasheets and "21X4 SROM")
188http://www.national.com/pf/DP/DP83840A.html
189
190IVc. Errata
191
192*/
193
194/* A full-duplex map for media types. */
195enum MediaIs {
196 MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
197 MediaIs100=16};
198static const char media_cap[] =
199{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20 };
200
201/* Offsets to the Command and Status Registers, "CSRs". All accesses
202 must be longword instructions and quadword aligned. */
203enum xircom_offsets {
204 CSR0=0, CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
205 CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
206 CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78, CSR16=0x04, };
207
208/* The bits in the CSR5 status registers, mostly interrupt sources. */
209enum status_bits {
210 LinkChange=0x08000000,
211 NormalIntr=0x10000, NormalIntrMask=0x00014045,
212 AbnormalIntr=0x8000, AbnormalIntrMask=0x0a00a5a2,
213 ReservedIntrMask=0xe0001a18,
214 EarlyRxIntr=0x4000, BusErrorIntr=0x2000,
215 EarlyTxIntr=0x400, RxDied=0x100, RxNoBuf=0x80, RxIntr=0x40,
216 TxFIFOUnderflow=0x20, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
217};
218
219enum csr0_control_bits {
220 EnableMWI=0x01000000, EnableMRL=0x00800000,
221 EnableMRM=0x00200000, EqualBusPrio=0x02,
222 SoftwareReset=0x01,
223};
224
225enum csr6_control_bits {
226 ReceiveAllBit=0x40000000, AllMultiBit=0x80, PromiscBit=0x40,
227 HashFilterBit=0x01, FullDuplexBit=0x0200,
228 TxThresh10=0x400000, TxStoreForw=0x200000,
229 TxThreshMask=0xc000, TxThreshShift=14,
230 EnableTx=0x2000, EnableRx=0x02,
231 ReservedZeroMask=0x8d930134, ReservedOneMask=0x320c0000,
232 EnableTxRx=(EnableTx | EnableRx),
233};
234
235
236enum tbl_flag {
237 HAS_MII=1, HAS_ACPI=2,
238};
239static struct xircom_chip_table {
240 char *chip_name;
241 int valid_intrs; /* CSR7 interrupt enable settings */
242 int flags;
243} xircom_tbl[] = {
244 { "Xircom Cardbus Adapter",
245 LinkChange | NormalIntr | AbnormalIntr | BusErrorIntr |
246 RxDied | RxNoBuf | RxIntr | TxFIFOUnderflow | TxNoBuf | TxDied | TxIntr,
247 HAS_MII | HAS_ACPI, },
248 { NULL, },
249};
250/* This matches the table above. */
251enum chips {
252 X3201_3,
253};
254
255
256/* The Xircom Rx and Tx buffer descriptors. */
257struct xircom_rx_desc {
258 s32 status;
259 s32 length;
260 u32 buffer1, buffer2;
261};
262
263struct xircom_tx_desc {
264 s32 status;
265 s32 length;
266 u32 buffer1, buffer2; /* We use only buffer 1. */
267};
268
269enum tx_desc0_status_bits {
270 Tx0DescOwned=0x80000000, Tx0DescError=0x8000, Tx0NoCarrier=0x0800,
271 Tx0LateColl=0x0200, Tx0ManyColl=0x0100, Tx0Underflow=0x02,
272};
273enum tx_desc1_status_bits {
274 Tx1ComplIntr=0x80000000, Tx1LastSeg=0x40000000, Tx1FirstSeg=0x20000000,
275 Tx1SetupPkt=0x08000000, Tx1DisableCRC=0x04000000, Tx1RingWrap=0x02000000,
276 Tx1ChainDesc=0x01000000, Tx1NoPad=0x800000, Tx1HashSetup=0x400000,
277 Tx1WholePkt=(Tx1FirstSeg | Tx1LastSeg),
278};
279enum rx_desc0_status_bits {
280 Rx0DescOwned=0x80000000, Rx0DescError=0x8000, Rx0NoSpace=0x4000,
281 Rx0Runt=0x0800, Rx0McastPkt=0x0400, Rx0FirstSeg=0x0200, Rx0LastSeg=0x0100,
282 Rx0HugeFrame=0x80, Rx0CRCError=0x02,
283 Rx0WholePkt=(Rx0FirstSeg | Rx0LastSeg),
284};
285enum rx_desc1_status_bits {
286 Rx1RingWrap=0x02000000, Rx1ChainDesc=0x01000000,
287};
288
289struct xircom_private {
290 struct xircom_rx_desc rx_ring[RX_RING_SIZE];
291 struct xircom_tx_desc tx_ring[TX_RING_SIZE];
292 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
293 struct sk_buff* tx_skbuff[TX_RING_SIZE];
03a8c661 294
1da177e4
LT
295 /* The X3201-3 requires 4-byte aligned tx bufs */
296 struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE];
03a8c661 297
1da177e4
LT
298 /* The addresses of receive-in-place skbuffs. */
299 struct sk_buff* rx_skbuff[RX_RING_SIZE];
300 u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */
301 int chip_id;
302 struct net_device_stats stats;
303 unsigned int cur_rx, cur_tx; /* The next free ring entry */
304 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
305 unsigned int tx_full:1; /* The Tx queue is full. */
306 unsigned int speed100:1;
307 unsigned int full_duplex:1; /* Full-duplex operation requested. */
308 unsigned int autoneg:1;
309 unsigned int default_port:4; /* Last dev->if_port value. */
310 unsigned int open:1;
311 unsigned int csr0; /* CSR0 setting. */
312 unsigned int csr6; /* Current CSR6 control settings. */
313 u16 to_advertise; /* NWay capabilities advertised. */
314 u16 advertising[4];
315 signed char phys[4], mii_cnt; /* MII device addresses. */
316 int saved_if_port;
317 struct pci_dev *pdev;
318 spinlock_t lock;
319};
320
321static int mdio_read(struct net_device *dev, int phy_id, int location);
322static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
323static void xircom_up(struct net_device *dev);
324static void xircom_down(struct net_device *dev);
325static int xircom_open(struct net_device *dev);
326static void xircom_tx_timeout(struct net_device *dev);
327static void xircom_init_ring(struct net_device *dev);
328static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev);
329static int xircom_rx(struct net_device *dev);
330static void xircom_media_change(struct net_device *dev);
7d12e780 331static irqreturn_t xircom_interrupt(int irq, void *dev_instance);
1da177e4
LT
332static int xircom_close(struct net_device *dev);
333static struct net_device_stats *xircom_get_stats(struct net_device *dev);
334static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
335static void set_rx_mode(struct net_device *dev);
336static void check_duplex(struct net_device *dev);
7282d491 337static const struct ethtool_ops ops;
1da177e4
LT
338
339
340/* The Xircom cards are picky about when certain bits in CSR6 can be
341 manipulated. Keith Owens <kaos@ocs.com.au>. */
342static void outl_CSR6(u32 newcsr6, long ioaddr)
343{
344 const int strict_bits =
345 TxThresh10 | TxStoreForw | TxThreshMask | EnableTxRx | FullDuplexBit;
346 int csr5, csr5_22_20, csr5_19_17, currcsr6, attempts = 200;
347 unsigned long flags;
348 save_flags(flags);
349 cli();
350 /* mask out the reserved bits that always read 0 on the Xircom cards */
351 newcsr6 &= ~ReservedZeroMask;
352 /* or in the reserved bits that always read 1 */
353 newcsr6 |= ReservedOneMask;
354 currcsr6 = inl(ioaddr + CSR6);
355 if (((newcsr6 & strict_bits) == (currcsr6 & strict_bits)) ||
356 ((currcsr6 & ~EnableTxRx) == 0)) {
357 outl(newcsr6, ioaddr + CSR6); /* safe */
358 restore_flags(flags);
359 return;
360 }
361 /* make sure the transmitter and receiver are stopped first */
362 currcsr6 &= ~EnableTxRx;
363 while (1) {
364 csr5 = inl(ioaddr + CSR5);
365 if (csr5 == 0xffffffff)
366 break; /* cannot read csr5, card removed? */
367 csr5_22_20 = csr5 & 0x700000;
368 csr5_19_17 = csr5 & 0x0e0000;
369 if ((csr5_22_20 == 0 || csr5_22_20 == 0x600000) &&
370 (csr5_19_17 == 0 || csr5_19_17 == 0x80000 || csr5_19_17 == 0xc0000))
371 break; /* both are stopped or suspended */
372 if (!--attempts) {
373 printk(KERN_INFO DRV_NAME ": outl_CSR6 too many attempts,"
374 "csr5=0x%08x\n", csr5);
375 outl(newcsr6, ioaddr + CSR6); /* unsafe but do it anyway */
376 restore_flags(flags);
377 return;
378 }
379 outl(currcsr6, ioaddr + CSR6);
380 udelay(1);
381 }
382 /* now it is safe to change csr6 */
383 outl(newcsr6, ioaddr + CSR6);
384 restore_flags(flags);
385}
386
387
388static void __devinit read_mac_address(struct net_device *dev)
389{
390 long ioaddr = dev->base_addr;
391 int i, j;
392 unsigned char tuple, link, data_id, data_count;
393
394 /* Xircom has its address stored in the CIS;
395 * we access it through the boot rom interface for now
396 * this might not work, as the CIS is not parsed but I
397 * (danilo) use the offset I found on my card's CIS !!!
398 *
399 * Doug Ledford: I changed this routine around so that it
400 * walks the CIS memory space, parsing the config items, and
401 * finds the proper lan_node_id tuple and uses the data
402 * stored there.
403 */
404 outl(1 << 12, ioaddr + CSR9); /* enable boot rom access */
405 for (i = 0x100; i < 0x1f7; i += link+2) {
406 outl(i, ioaddr + CSR10);
407 tuple = inl(ioaddr + CSR9) & 0xff;
408 outl(i + 1, ioaddr + CSR10);
409 link = inl(ioaddr + CSR9) & 0xff;
410 outl(i + 2, ioaddr + CSR10);
411 data_id = inl(ioaddr + CSR9) & 0xff;
412 outl(i + 3, ioaddr + CSR10);
413 data_count = inl(ioaddr + CSR9) & 0xff;
414 if ( (tuple == 0x22) &&
415 (data_id == 0x04) && (data_count == 0x06) ) {
416 /*
417 * This is it. We have the data we want.
418 */
419 for (j = 0; j < 6; j++) {
420 outl(i + j + 4, ioaddr + CSR10);
421 dev->dev_addr[j] = inl(ioaddr + CSR9) & 0xff;
422 }
423 break;
424 } else if (link == 0) {
425 break;
426 }
427 }
428}
429
430
431/*
432 * locate the MII interfaces and initialize them.
433 * we disable full-duplex modes here,
434 * because we don't know how to handle them.
435 */
436static void find_mii_transceivers(struct net_device *dev)
437{
438 struct xircom_private *tp = netdev_priv(dev);
439 int phy, phy_idx;
440
441 if (media_cap[tp->default_port] & MediaIsMII) {
442 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
443 tp->to_advertise = media2advert[tp->default_port - 9];
444 } else
445 tp->to_advertise =
446 /*ADVERTISE_100BASE4 | ADVERTISE_100FULL |*/ ADVERTISE_100HALF |
447 /*ADVERTISE_10FULL |*/ ADVERTISE_10HALF | ADVERTISE_CSMA;
448
449 /* Find the connected MII xcvrs.
450 Doing this in open() would allow detecting external xcvrs later,
451 but takes much time. */
452 for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
453 int mii_status = mdio_read(dev, phy, MII_BMSR);
454 if ((mii_status & (BMSR_100BASE4 | BMSR_100HALF | BMSR_10HALF)) == BMSR_100BASE4 ||
455 ((mii_status & BMSR_100BASE4) == 0 &&
456 (mii_status & (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | BMSR_10HALF)) != 0)) {
457 int mii_reg0 = mdio_read(dev, phy, MII_BMCR);
458 int mii_advert = mdio_read(dev, phy, MII_ADVERTISE);
459 int reg4 = ((mii_status >> 6) & tp->to_advertise) | ADVERTISE_CSMA;
460 tp->phys[phy_idx] = phy;
461 tp->advertising[phy_idx++] = reg4;
462 printk(KERN_INFO "%s: MII transceiver #%d "
463 "config %4.4x status %4.4x advertising %4.4x.\n",
464 dev->name, phy, mii_reg0, mii_status, mii_advert);
465 }
466 }
467 tp->mii_cnt = phy_idx;
468 if (phy_idx == 0) {
469 printk(KERN_INFO "%s: ***WARNING***: No MII transceiver found!\n",
470 dev->name);
471 tp->phys[0] = 0;
472 }
473}
474
475
476/*
477 * To quote Arjan van de Ven:
478 * transceiver_voodoo() enables the external UTP plug thingy.
479 * it's called voodoo as I stole this code and cannot cross-reference
480 * it with the specification.
481 * Actually it seems to go like this:
482 * - GPIO2 enables the MII itself so we can talk to it. The MII gets reset
483 * so any prior MII settings are lost.
484 * - GPIO0 enables the TP port so the MII can talk to the network.
485 * - a software reset will reset both GPIO pins.
486 * I also moved the software reset here, because doing it in xircom_up()
487 * required enabling the GPIO pins each time, which reset the MII each time.
488 * Thus we couldn't control the MII -- which sucks because we don't know
489 * how to handle full-duplex modes so we *must* disable them.
490 */
491static void transceiver_voodoo(struct net_device *dev)
492{
493 struct xircom_private *tp = netdev_priv(dev);
494 long ioaddr = dev->base_addr;
495
496 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
497 outl(SoftwareReset, ioaddr + CSR0);
498 udelay(2);
499
500 /* Deassert reset. */
501 outl(tp->csr0, ioaddr + CSR0);
502
503 /* Reset the xcvr interface and turn on heartbeat. */
504 outl(0x0008, ioaddr + CSR15);
505 udelay(5); /* The delays are Xircom-recommended to give the
506 * chipset time to reset the actual hardware
507 * on the PCMCIA card
508 */
509 outl(0xa8050000, ioaddr + CSR15);
510 udelay(5);
511 outl(0xa00f0000, ioaddr + CSR15);
512 udelay(5);
513
514 outl_CSR6(0, ioaddr);
515 //outl_CSR6(FullDuplexBit, ioaddr);
516}
517
518
519static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
520{
521 struct net_device *dev;
522 struct xircom_private *tp;
523 static int board_idx = -1;
524 int chip_idx = id->driver_data;
525 long ioaddr;
526 int i;
527 u8 chip_rev;
528
529/* when built into the kernel, we only print version if device is found */
530#ifndef MODULE
531 static int printed_version;
532 if (!printed_version++)
533 printk(version);
534#endif
535
536 //printk(KERN_INFO "xircom_init_one(%s)\n", pci_name(pdev));
537
538 board_idx++;
539
540 if (pci_enable_device(pdev))
541 return -ENODEV;
542
543 pci_set_master(pdev);
544
545 ioaddr = pci_resource_start(pdev, 0);
546 dev = alloc_etherdev(sizeof(*tp));
547 if (!dev) {
548 printk (KERN_ERR DRV_NAME "%d: cannot alloc etherdev, aborting\n", board_idx);
549 return -ENOMEM;
550 }
551 SET_MODULE_OWNER(dev);
552 SET_NETDEV_DEV(dev, &pdev->dev);
553
554 dev->base_addr = ioaddr;
555 dev->irq = pdev->irq;
556
557 if (pci_request_regions(pdev, dev->name)) {
558 printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", board_idx);
559 goto err_out_free_netdev;
560 }
561
562 /* Bring the chip out of sleep mode.
563 Caution: Snooze mode does not work with some boards! */
564 if (xircom_tbl[chip_idx].flags & HAS_ACPI)
565 pci_write_config_dword(pdev, PCI_POWERMGMT, 0);
566
567 /* Stop the chip's Tx and Rx processes. */
568 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
569 /* Clear the missed-packet counter. */
570 (volatile int)inl(ioaddr + CSR8);
571
572 tp = netdev_priv(dev);
573
574 spin_lock_init(&tp->lock);
575 tp->pdev = pdev;
576 tp->chip_id = chip_idx;
577 /* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles. */
578 /* XXX: is this necessary for Xircom? */
579 tp->csr0 = csr0 & ~EnableMWI;
580
581 pci_set_drvdata(pdev, dev);
582
583 /* The lower four bits are the media type. */
584 if (board_idx >= 0 && board_idx < MAX_UNITS) {
585 tp->default_port = options[board_idx] & 15;
586 if ((options[board_idx] & 0x90) || full_duplex[board_idx] > 0)
587 tp->full_duplex = 1;
588 if (mtu[board_idx] > 0)
589 dev->mtu = mtu[board_idx];
590 }
591 if (dev->mem_start)
592 tp->default_port = dev->mem_start;
593 if (tp->default_port) {
594 if (media_cap[tp->default_port] & MediaAlwaysFD)
595 tp->full_duplex = 1;
596 }
597 if (tp->full_duplex)
598 tp->autoneg = 0;
599 else
600 tp->autoneg = 1;
601 tp->speed100 = 1;
602
603 /* The Xircom-specific entries in the device structure. */
604 dev->open = &xircom_open;
605 dev->hard_start_xmit = &xircom_start_xmit;
606 dev->stop = &xircom_close;
607 dev->get_stats = &xircom_get_stats;
608 dev->do_ioctl = &xircom_ioctl;
609#ifdef HAVE_MULTICAST
610 dev->set_multicast_list = &set_rx_mode;
611#endif
612 dev->tx_timeout = xircom_tx_timeout;
613 dev->watchdog_timeo = TX_TIMEOUT;
614 SET_ETHTOOL_OPS(dev, &ops);
615
616 transceiver_voodoo(dev);
617
618 read_mac_address(dev);
619
620 if (register_netdev(dev))
621 goto err_out_cleardev;
622
623 pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
624 printk(KERN_INFO "%s: %s rev %d at %#3lx,",
625 dev->name, xircom_tbl[chip_idx].chip_name, chip_rev, ioaddr);
626 for (i = 0; i < 6; i++)
627 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
628 printk(", IRQ %d.\n", dev->irq);
629
630 if (xircom_tbl[chip_idx].flags & HAS_MII) {
631 find_mii_transceivers(dev);
632 check_duplex(dev);
633 }
634
635 return 0;
636
637err_out_cleardev:
638 pci_set_drvdata(pdev, NULL);
639 pci_release_regions(pdev);
640err_out_free_netdev:
641 free_netdev(dev);
642 return -ENODEV;
643}
644
645
646/* MII transceiver control section.
647 Read and write the MII registers using software-generated serial
648 MDIO protocol. See the MII specifications or DP83840A data sheet
649 for details. */
650
651/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
652 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
653 "overclocking" issues or future 66Mhz PCI. */
654#define mdio_delay() inl(mdio_addr)
655
656/* Read and write the MII registers using software-generated serial
657 MDIO protocol. It is just different enough from the EEPROM protocol
658 to not share code. The maxium data clock rate is 2.5 Mhz. */
659#define MDIO_SHIFT_CLK 0x10000
660#define MDIO_DATA_WRITE0 0x00000
661#define MDIO_DATA_WRITE1 0x20000
662#define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
663#define MDIO_ENB_IN 0x40000
664#define MDIO_DATA_READ 0x80000
665
666static int mdio_read(struct net_device *dev, int phy_id, int location)
667{
668 int i;
669 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
670 int retval = 0;
671 long ioaddr = dev->base_addr;
672 long mdio_addr = ioaddr + CSR9;
673
674 /* Establish sync by sending at least 32 logic ones. */
675 for (i = 32; i >= 0; i--) {
676 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
677 mdio_delay();
678 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
679 mdio_delay();
680 }
681 /* Shift the read command bits out. */
682 for (i = 15; i >= 0; i--) {
683 int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
684
685 outl(MDIO_ENB | dataval, mdio_addr);
686 mdio_delay();
687 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
688 mdio_delay();
689 }
690 /* Read the two transition, 16 data, and wire-idle bits. */
691 for (i = 19; i > 0; i--) {
692 outl(MDIO_ENB_IN, mdio_addr);
693 mdio_delay();
694 retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
695 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
696 mdio_delay();
697 }
698 return (retval>>1) & 0xffff;
699}
700
701
702static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
703{
704 int i;
705 int cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
706 long ioaddr = dev->base_addr;
707 long mdio_addr = ioaddr + CSR9;
708
709 /* Establish sync by sending 32 logic ones. */
710 for (i = 32; i >= 0; i--) {
711 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
712 mdio_delay();
713 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
714 mdio_delay();
715 }
716 /* Shift the command bits out. */
717 for (i = 31; i >= 0; i--) {
718 int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
719 outl(MDIO_ENB | dataval, mdio_addr);
720 mdio_delay();
721 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
722 mdio_delay();
723 }
724 /* Clear out extra bits. */
725 for (i = 2; i > 0; i--) {
726 outl(MDIO_ENB_IN, mdio_addr);
727 mdio_delay();
728 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
729 mdio_delay();
730 }
731 return;
732}
733
734
735static void
736xircom_up(struct net_device *dev)
737{
738 struct xircom_private *tp = netdev_priv(dev);
739 long ioaddr = dev->base_addr;
740 int i;
741
742 xircom_init_ring(dev);
743 /* Clear the tx ring */
744 for (i = 0; i < TX_RING_SIZE; i++) {
745 tp->tx_skbuff[i] = NULL;
746 tp->tx_ring[i].status = 0;
747 }
748
749 if (xircom_debug > 1)
750 printk(KERN_DEBUG "%s: xircom_up() irq %d.\n", dev->name, dev->irq);
751
752 outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
753 outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
754
755 tp->saved_if_port = dev->if_port;
756 if (dev->if_port == 0)
757 dev->if_port = tp->default_port;
758
759 tp->csr6 = TxThresh10 /*| FullDuplexBit*/; /* XXX: why 10 and not 100? */
760
761 set_rx_mode(dev);
762
763 /* Start the chip's Tx to process setup frame. */
764 outl_CSR6(tp->csr6, ioaddr);
765 outl_CSR6(tp->csr6 | EnableTx, ioaddr);
766
767 /* Acknowledge all outstanding interrupts sources */
768 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
769 /* Enable interrupts by setting the interrupt mask. */
770 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
771 /* Enable Rx */
772 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
773 /* Rx poll demand */
774 outl(0, ioaddr + CSR2);
775
776 /* Tell the net layer we're ready */
777 netif_start_queue (dev);
778
779 /* Check current media state */
780 xircom_media_change(dev);
781
782 if (xircom_debug > 2) {
783 printk(KERN_DEBUG "%s: Done xircom_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
784 dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
785 inl(ioaddr + CSR6));
786 }
787}
788
789
790static int
791xircom_open(struct net_device *dev)
792{
793 struct xircom_private *tp = netdev_priv(dev);
794
1fb9df5d 795 if (request_irq(dev->irq, &xircom_interrupt, IRQF_SHARED, dev->name, dev))
1da177e4
LT
796 return -EAGAIN;
797
798 xircom_up(dev);
799 tp->open = 1;
800
801 return 0;
802}
803
804
805static void xircom_tx_timeout(struct net_device *dev)
806{
807 struct xircom_private *tp = netdev_priv(dev);
808 long ioaddr = dev->base_addr;
809
810 if (media_cap[dev->if_port] & MediaIsMII) {
811 /* Do nothing -- the media monitor should handle this. */
812 if (xircom_debug > 1)
813 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
814 dev->name);
815 }
816
817#if defined(way_too_many_messages)
818 if (xircom_debug > 3) {
819 int i;
820 for (i = 0; i < RX_RING_SIZE; i++) {
821 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
822 int j;
823 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
824 "%2.2x %2.2x %2.2x.\n",
825 i, (unsigned int)tp->rx_ring[i].status,
826 (unsigned int)tp->rx_ring[i].length,
827 (unsigned int)tp->rx_ring[i].buffer1,
828 (unsigned int)tp->rx_ring[i].buffer2,
829 buf[0], buf[1], buf[2]);
830 for (j = 0; buf[j] != 0xee && j < 1600; j++)
831 if (j < 100) printk(" %2.2x", buf[j]);
832 printk(" j=%d.\n", j);
833 }
834 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
835 for (i = 0; i < RX_RING_SIZE; i++)
836 printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
837 printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
838 for (i = 0; i < TX_RING_SIZE; i++)
839 printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
840 printk("\n");
841 }
842#endif
843
844 /* Stop and restart the chip's Tx/Rx processes . */
845 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
846 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
847 /* Trigger an immediate transmit demand. */
848 outl(0, ioaddr + CSR1);
849
850 dev->trans_start = jiffies;
851 netif_wake_queue (dev);
852 tp->stats.tx_errors++;
853}
854
855
856/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
857static void xircom_init_ring(struct net_device *dev)
858{
859 struct xircom_private *tp = netdev_priv(dev);
860 int i;
861
862 tp->tx_full = 0;
863 tp->cur_rx = tp->cur_tx = 0;
864 tp->dirty_rx = tp->dirty_tx = 0;
865
866 for (i = 0; i < RX_RING_SIZE; i++) {
867 tp->rx_ring[i].status = 0;
868 tp->rx_ring[i].length = PKT_BUF_SZ;
869 tp->rx_ring[i].buffer2 = virt_to_bus(&tp->rx_ring[i+1]);
870 tp->rx_skbuff[i] = NULL;
871 }
872 /* Mark the last entry as wrapping the ring. */
873 tp->rx_ring[i-1].length = PKT_BUF_SZ | Rx1RingWrap;
874 tp->rx_ring[i-1].buffer2 = virt_to_bus(&tp->rx_ring[0]);
875
876 for (i = 0; i < RX_RING_SIZE; i++) {
877 /* Note the receive buffer must be longword aligned.
878 dev_alloc_skb() provides 16 byte alignment. But do *not*
879 use skb_reserve() to align the IP header! */
880 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
881 tp->rx_skbuff[i] = skb;
882 if (skb == NULL)
883 break;
884 skb->dev = dev; /* Mark as being used by this device. */
885 tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */
689be439 886 tp->rx_ring[i].buffer1 = virt_to_bus(skb->data);
1da177e4
LT
887 }
888 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
889
890 /* The Tx buffer descriptor is filled in as needed, but we
891 do need to clear the ownership bit. */
892 for (i = 0; i < TX_RING_SIZE; i++) {
893 tp->tx_skbuff[i] = NULL;
894 tp->tx_ring[i].status = 0;
895 tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
1da177e4
LT
896 if (tp->chip_id == X3201_3)
897 tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ);
1da177e4
LT
898 }
899 tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]);
900}
901
902
903static int
904xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
905{
906 struct xircom_private *tp = netdev_priv(dev);
907 int entry;
908 u32 flag;
909
910 /* Caution: the write order is important here, set the base address
911 with the "ownership" bits last. */
912
913 /* Calculate the next Tx descriptor entry. */
914 entry = tp->cur_tx % TX_RING_SIZE;
915
916 tp->tx_skbuff[entry] = skb;
1da177e4 917 if (tp->chip_id == X3201_3) {
d626f62b
ACM
918 skb_copy_from_linear_data(skb,
919 tp->tx_aligned_skbuff[entry]->data,
920 skb->len);
1da177e4
LT
921 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
922 } else
1da177e4
LT
923 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
924
925 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
926 flag = Tx1WholePkt; /* No interrupt */
927 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
928 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
929 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
930 flag = Tx1WholePkt; /* No Tx-done intr. */
931 } else {
932 /* Leave room for set_rx_mode() to fill entries. */
933 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
934 tp->tx_full = 1;
935 }
936 if (entry == TX_RING_SIZE - 1)
937 flag |= Tx1WholePkt | Tx1ComplIntr | Tx1RingWrap;
938
939 tp->tx_ring[entry].length = skb->len | flag;
940 tp->tx_ring[entry].status = Tx0DescOwned; /* Pass ownership to the chip. */
941 tp->cur_tx++;
942 if (tp->tx_full)
943 netif_stop_queue (dev);
944 else
945 netif_wake_queue (dev);
946
947 /* Trigger an immediate transmit demand. */
948 outl(0, dev->base_addr + CSR1);
949
950 dev->trans_start = jiffies;
951
952 return 0;
953}
954
955
956static void xircom_media_change(struct net_device *dev)
957{
958 struct xircom_private *tp = netdev_priv(dev);
959 long ioaddr = dev->base_addr;
960 u16 reg0, reg1, reg4, reg5;
961 u32 csr6 = inl(ioaddr + CSR6), newcsr6;
962
963 /* reset status first */
964 mdio_read(dev, tp->phys[0], MII_BMCR);
965 mdio_read(dev, tp->phys[0], MII_BMSR);
966
967 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
968 reg1 = mdio_read(dev, tp->phys[0], MII_BMSR);
969
970 if (reg1 & BMSR_LSTATUS) {
971 /* link is up */
972 if (reg0 & BMCR_ANENABLE) {
973 /* autonegotiation is enabled */
974 reg4 = mdio_read(dev, tp->phys[0], MII_ADVERTISE);
975 reg5 = mdio_read(dev, tp->phys[0], MII_LPA);
976 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
977 tp->speed100 = 1;
978 tp->full_duplex = 1;
979 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
980 tp->speed100 = 1;
981 tp->full_duplex = 0;
982 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
983 tp->speed100 = 0;
984 tp->full_duplex = 1;
985 } else {
986 tp->speed100 = 0;
987 tp->full_duplex = 0;
988 }
989 } else {
990 /* autonegotiation is disabled */
991 if (reg0 & BMCR_SPEED100)
992 tp->speed100 = 1;
993 else
994 tp->speed100 = 0;
995 if (reg0 & BMCR_FULLDPLX)
996 tp->full_duplex = 1;
997 else
998 tp->full_duplex = 0;
999 }
1000 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1001 dev->name,
1002 tp->speed100 ? "100" : "10",
1003 tp->full_duplex ? "full" : "half");
1004 netif_carrier_on(dev);
1005 newcsr6 = csr6 & ~FullDuplexBit;
1006 if (tp->full_duplex)
1007 newcsr6 |= FullDuplexBit;
1008 if (newcsr6 != csr6)
1009 outl_CSR6(newcsr6, ioaddr + CSR6);
1010 } else {
1011 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1012 netif_carrier_off(dev);
1013 }
1014}
1015
1016
1017static void check_duplex(struct net_device *dev)
1018{
1019 struct xircom_private *tp = netdev_priv(dev);
1020 u16 reg0;
1021
1022 mdio_write(dev, tp->phys[0], MII_BMCR, BMCR_RESET);
1023 udelay(500);
1024 while (mdio_read(dev, tp->phys[0], MII_BMCR) & BMCR_RESET);
1025
1026 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
1027 mdio_write(dev, tp->phys[0], MII_ADVERTISE, tp->advertising[0]);
1028
1029 if (tp->autoneg) {
1030 reg0 &= ~(BMCR_SPEED100 | BMCR_FULLDPLX);
1031 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1032 } else {
1033 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1034 if (tp->speed100)
1035 reg0 |= BMCR_SPEED100;
1036 if (tp->full_duplex)
1037 reg0 |= BMCR_FULLDPLX;
1038 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1039 dev->name,
1040 tp->speed100 ? "100" : "10",
1041 tp->full_duplex ? "full" : "half");
1042 }
1043 mdio_write(dev, tp->phys[0], MII_BMCR, reg0);
1044}
1045
1046
1047/* The interrupt handler does all of the Rx thread work and cleans up
1048 after the Tx thread. */
7d12e780 1049static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
1da177e4
LT
1050{
1051 struct net_device *dev = dev_instance;
1052 struct xircom_private *tp = netdev_priv(dev);
1053 long ioaddr = dev->base_addr;
1054 int csr5, work_budget = max_interrupt_work;
1055 int handled = 0;
1056
1057 spin_lock (&tp->lock);
1058
1059 do {
1060 csr5 = inl(ioaddr + CSR5);
1061 /* Acknowledge all of the current interrupt sources ASAP. */
1062 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
1063
1064 if (xircom_debug > 4)
1065 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
1066 dev->name, csr5, inl(dev->base_addr + CSR5));
1067
1068 if (csr5 == 0xffffffff)
1069 break; /* all bits set, assume PCMCIA card removed */
1070
1071 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
1072 break;
1073
1074 handled = 1;
1075
1076 if (csr5 & (RxIntr | RxNoBuf))
1077 work_budget -= xircom_rx(dev);
1078
1079 if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
1080 unsigned int dirty_tx;
1081
1082 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
1083 dirty_tx++) {
1084 int entry = dirty_tx % TX_RING_SIZE;
1085 int status = tp->tx_ring[entry].status;
1086
1087 if (status < 0)
1088 break; /* It still hasn't been Txed */
1089 /* Check for Rx filter setup frames. */
1090 if (tp->tx_skbuff[entry] == NULL)
1091 continue;
1092
1093 if (status & Tx0DescError) {
1094 /* There was an major error, log it. */
1095#ifndef final_version
1096 if (xircom_debug > 1)
1097 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1098 dev->name, status);
1099#endif
1100 tp->stats.tx_errors++;
1101 if (status & Tx0ManyColl) {
1102 tp->stats.tx_aborted_errors++;
1103 }
1104 if (status & Tx0NoCarrier) tp->stats.tx_carrier_errors++;
1105 if (status & Tx0LateColl) tp->stats.tx_window_errors++;
1106 if (status & Tx0Underflow) tp->stats.tx_fifo_errors++;
1107 } else {
1108 tp->stats.tx_bytes += tp->tx_ring[entry].length & 0x7ff;
1109 tp->stats.collisions += (status >> 3) & 15;
1110 tp->stats.tx_packets++;
1111 }
1112
1113 /* Free the original skb. */
1114 dev_kfree_skb_irq(tp->tx_skbuff[entry]);
1115 tp->tx_skbuff[entry] = NULL;
1116 }
1117
1118#ifndef final_version
1119 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
1120 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1121 dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
1122 dirty_tx += TX_RING_SIZE;
1123 }
1124#endif
1125
1126 if (tp->tx_full &&
1127 tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
1128 /* The ring is no longer full */
1129 tp->tx_full = 0;
1130
1131 if (tp->tx_full)
1132 netif_stop_queue (dev);
1133 else
1134 netif_wake_queue (dev);
1135
1136 tp->dirty_tx = dirty_tx;
1137 if (csr5 & TxDied) {
1138 if (xircom_debug > 2)
1139 printk(KERN_WARNING "%s: The transmitter stopped."
1140 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
1141 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
1142 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1143 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1144 }
1145 }
1146
1147 /* Log errors. */
1148 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
1149 if (csr5 & LinkChange)
1150 xircom_media_change(dev);
1151 if (csr5 & TxFIFOUnderflow) {
1152 if ((tp->csr6 & TxThreshMask) != TxThreshMask)
1153 tp->csr6 += (1 << TxThreshShift); /* Bump up the Tx threshold */
1154 else
1155 tp->csr6 |= TxStoreForw; /* Store-n-forward. */
1156 /* Restart the transmit process. */
1157 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1158 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1159 }
1160 if (csr5 & RxDied) { /* Missed a Rx frame. */
1161 tp->stats.rx_errors++;
1162 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1163 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1164 }
1165 /* Clear all error sources, included undocumented ones! */
1166 outl(0x0800f7ba, ioaddr + CSR5);
1167 }
1168 if (--work_budget < 0) {
1169 if (xircom_debug > 1)
1170 printk(KERN_WARNING "%s: Too much work during an interrupt, "
1171 "csr5=0x%8.8x.\n", dev->name, csr5);
1172 /* Acknowledge all interrupt sources. */
1173 outl(0x8001ffff, ioaddr + CSR5);
1174 break;
1175 }
1176 } while (1);
1177
1178 if (xircom_debug > 3)
1179 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
1180 dev->name, inl(ioaddr + CSR5));
1181
1182 spin_unlock (&tp->lock);
1183 return IRQ_RETVAL(handled);
1184}
1185
1186
1187static int
1188xircom_rx(struct net_device *dev)
1189{
1190 struct xircom_private *tp = netdev_priv(dev);
1191 int entry = tp->cur_rx % RX_RING_SIZE;
1192 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
1193 int work_done = 0;
1194
1195 if (xircom_debug > 4)
1196 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1197 tp->rx_ring[entry].status);
1198 /* If we own the next entry, it's a new packet. Send it up. */
1199 while (tp->rx_ring[entry].status >= 0) {
1200 s32 status = tp->rx_ring[entry].status;
1201
1202 if (xircom_debug > 5)
1203 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1204 tp->rx_ring[entry].status);
1205 if (--rx_work_limit < 0)
1206 break;
1207 if ((status & 0x38008300) != 0x0300) {
1208 if ((status & 0x38000300) != 0x0300) {
1209 /* Ignore earlier buffers. */
1210 if ((status & 0xffff) != 0x7fff) {
1211 if (xircom_debug > 1)
1212 printk(KERN_WARNING "%s: Oversized Ethernet frame "
1213 "spanned multiple buffers, status %8.8x!\n",
1214 dev->name, status);
1215 tp->stats.rx_length_errors++;
1216 }
1217 } else if (status & Rx0DescError) {
1218 /* There was a fatal error. */
1219 if (xircom_debug > 2)
1220 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1221 dev->name, status);
1222 tp->stats.rx_errors++; /* end of a packet.*/
1223 if (status & (Rx0Runt | Rx0HugeFrame)) tp->stats.rx_length_errors++;
1224 if (status & Rx0CRCError) tp->stats.rx_crc_errors++;
1225 }
1226 } else {
1227 /* Omit the four octet CRC from the length. */
1228 short pkt_len = ((status >> 16) & 0x7ff) - 4;
1229 struct sk_buff *skb;
1230
1231#ifndef final_version
1232 if (pkt_len > 1518) {
1233 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
1234 dev->name, pkt_len, pkt_len);
1235 pkt_len = 1518;
1236 tp->stats.rx_length_errors++;
1237 }
1238#endif
1239 /* Check if the packet is long enough to accept without copying
1240 to a minimally-sized skbuff. */
1241 if (pkt_len < rx_copybreak
1242 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1da177e4
LT
1243 skb_reserve(skb, 2); /* 16 byte align the IP header */
1244#if ! defined(__alpha__)
1245 eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
1246 pkt_len, 0);
1247 skb_put(skb, pkt_len);
1248#else
1249 memcpy(skb_put(skb, pkt_len),
1250 bus_to_virt(tp->rx_ring[entry].buffer1), pkt_len);
1251#endif
1252 work_done++;
1253 } else { /* Pass up the skb already on the Rx ring. */
1254 skb_put(skb = tp->rx_skbuff[entry], pkt_len);
1255 tp->rx_skbuff[entry] = NULL;
1256 }
1257 skb->protocol = eth_type_trans(skb, dev);
1258 netif_rx(skb);
1259 dev->last_rx = jiffies;
1260 tp->stats.rx_packets++;
1261 tp->stats.rx_bytes += pkt_len;
1262 }
1263 entry = (++tp->cur_rx) % RX_RING_SIZE;
1264 }
1265
1266 /* Refill the Rx ring buffers. */
1267 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
1268 entry = tp->dirty_rx % RX_RING_SIZE;
1269 if (tp->rx_skbuff[entry] == NULL) {
1270 struct sk_buff *skb;
1271 skb = tp->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ);
1272 if (skb == NULL)
1273 break;
1274 skb->dev = dev; /* Mark as being used by this device. */
689be439 1275 tp->rx_ring[entry].buffer1 = virt_to_bus(skb->data);
1da177e4
LT
1276 work_done++;
1277 }
1278 tp->rx_ring[entry].status = Rx0DescOwned;
1279 }
1280
1281 return work_done;
1282}
1283
1284
1285static void
1286xircom_down(struct net_device *dev)
1287{
1288 long ioaddr = dev->base_addr;
1289 struct xircom_private *tp = netdev_priv(dev);
1290
1291 /* Disable interrupts by clearing the interrupt mask. */
1292 outl(0, ioaddr + CSR7);
1293 /* Stop the chip's Tx and Rx processes. */
1294 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
1295
1296 if (inl(ioaddr + CSR6) != 0xffffffff)
1297 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1298
1299 dev->if_port = tp->saved_if_port;
1300}
1301
1302
1303static int
1304xircom_close(struct net_device *dev)
1305{
1306 long ioaddr = dev->base_addr;
1307 struct xircom_private *tp = netdev_priv(dev);
1308 int i;
1309
1310 if (xircom_debug > 1)
1311 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1312 dev->name, inl(ioaddr + CSR5));
1313
1314 netif_stop_queue(dev);
1315
1316 if (netif_device_present(dev))
1317 xircom_down(dev);
1318
1319 free_irq(dev->irq, dev);
1320
1321 /* Free all the skbuffs in the Rx queue. */
1322 for (i = 0; i < RX_RING_SIZE; i++) {
1323 struct sk_buff *skb = tp->rx_skbuff[i];
1324 tp->rx_skbuff[i] = NULL;
1325 tp->rx_ring[i].status = 0; /* Not owned by Xircom chip. */
1326 tp->rx_ring[i].length = 0;
1327 tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
1328 if (skb) {
1329 dev_kfree_skb(skb);
1330 }
1331 }
1332 for (i = 0; i < TX_RING_SIZE; i++) {
1333 if (tp->tx_skbuff[i])
1334 dev_kfree_skb(tp->tx_skbuff[i]);
1335 tp->tx_skbuff[i] = NULL;
1336 }
1337
1338 tp->open = 0;
1339 return 0;
1340}
1341
1342
1343static struct net_device_stats *xircom_get_stats(struct net_device *dev)
1344{
1345 struct xircom_private *tp = netdev_priv(dev);
1346 long ioaddr = dev->base_addr;
1347
1348 if (netif_device_present(dev))
1349 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1350
1351 return &tp->stats;
1352}
1353
1354static int xircom_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1355{
1356 struct xircom_private *tp = netdev_priv(dev);
1357 ecmd->supported =
1358 SUPPORTED_10baseT_Half |
1359 SUPPORTED_10baseT_Full |
1360 SUPPORTED_100baseT_Half |
1361 SUPPORTED_100baseT_Full |
1362 SUPPORTED_Autoneg |
1363 SUPPORTED_MII;
1364
1365 ecmd->advertising = ADVERTISED_MII;
1366 if (tp->advertising[0] & ADVERTISE_10HALF)
1367 ecmd->advertising |= ADVERTISED_10baseT_Half;
1368 if (tp->advertising[0] & ADVERTISE_10FULL)
1369 ecmd->advertising |= ADVERTISED_10baseT_Full;
1370 if (tp->advertising[0] & ADVERTISE_100HALF)
1371 ecmd->advertising |= ADVERTISED_100baseT_Half;
1372 if (tp->advertising[0] & ADVERTISE_100FULL)
1373 ecmd->advertising |= ADVERTISED_100baseT_Full;
1374 if (tp->autoneg) {
1375 ecmd->advertising |= ADVERTISED_Autoneg;
1376 ecmd->autoneg = AUTONEG_ENABLE;
1377 } else
1378 ecmd->autoneg = AUTONEG_DISABLE;
1379
1380 ecmd->port = PORT_MII;
1381 ecmd->transceiver = XCVR_INTERNAL;
1382 ecmd->phy_address = tp->phys[0];
1383 ecmd->speed = tp->speed100 ? SPEED_100 : SPEED_10;
1384 ecmd->duplex = tp->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1385 ecmd->maxtxpkt = TX_RING_SIZE / 2;
1386 ecmd->maxrxpkt = 0;
1387 return 0;
1388}
1389
1390static int xircom_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1391{
1392 struct xircom_private *tp = netdev_priv(dev);
1393 u16 autoneg, speed100, full_duplex;
1394
1395 autoneg = (ecmd->autoneg == AUTONEG_ENABLE);
1396 speed100 = (ecmd->speed == SPEED_100);
1397 full_duplex = (ecmd->duplex == DUPLEX_FULL);
1398
1399 tp->autoneg = autoneg;
1400 if (speed100 != tp->speed100 ||
1401 full_duplex != tp->full_duplex) {
1402 tp->speed100 = speed100;
1403 tp->full_duplex = full_duplex;
1404 /* change advertising bits */
1405 tp->advertising[0] &= ~(ADVERTISE_10HALF |
1406 ADVERTISE_10FULL |
1407 ADVERTISE_100HALF |
1408 ADVERTISE_100FULL |
1409 ADVERTISE_100BASE4);
1410 if (speed100) {
1411 if (full_duplex)
1412 tp->advertising[0] |= ADVERTISE_100FULL;
1413 else
1414 tp->advertising[0] |= ADVERTISE_100HALF;
1415 } else {
1416 if (full_duplex)
1417 tp->advertising[0] |= ADVERTISE_10FULL;
1418 else
1419 tp->advertising[0] |= ADVERTISE_10HALF;
1420 }
1421 }
1422 check_duplex(dev);
1423 return 0;
1424}
1425
1426static void xircom_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1427{
1428 struct xircom_private *tp = netdev_priv(dev);
1429 strcpy(info->driver, DRV_NAME);
1430 strcpy(info->version, DRV_VERSION);
1431 strcpy(info->bus_info, pci_name(tp->pdev));
1432}
1433
7282d491 1434static const struct ethtool_ops ops = {
1da177e4
LT
1435 .get_settings = xircom_get_settings,
1436 .set_settings = xircom_set_settings,
1437 .get_drvinfo = xircom_get_drvinfo,
1438};
1439
1440/* Provide ioctl() calls to examine the MII xcvr state. */
1441static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1442{
1443 struct xircom_private *tp = netdev_priv(dev);
1444 u16 *data = (u16 *)&rq->ifr_ifru;
1445 int phy = tp->phys[0] & 0x1f;
1446 unsigned long flags;
1447
1448 switch(cmd) {
1449 /* Legacy mii-diag interface */
1450 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1451 if (tp->mii_cnt)
1452 data[0] = phy;
1453 else
1454 return -ENODEV;
1455 return 0;
1456 case SIOCGMIIREG: /* Read MII PHY register. */
1457 save_flags(flags);
1458 cli();
1459 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1460 restore_flags(flags);
1461 return 0;
1462 case SIOCSMIIREG: /* Write MII PHY register. */
1463 if (!capable(CAP_NET_ADMIN))
1464 return -EPERM;
1465 save_flags(flags);
1466 cli();
1467 if (data[0] == tp->phys[0]) {
1468 u16 value = data[2];
1469 switch (data[1]) {
1470 case 0:
1471 if (value & (BMCR_RESET | BMCR_ANENABLE))
1472 /* Autonegotiation. */
1473 tp->autoneg = 1;
1474 else {
1475 tp->full_duplex = (value & BMCR_FULLDPLX) ? 1 : 0;
1476 tp->autoneg = 0;
1477 }
1478 break;
1479 case 4:
1480 tp->advertising[0] = value;
1481 break;
1482 }
1483 check_duplex(dev);
1484 }
1485 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1486 restore_flags(flags);
1487 return 0;
1488 default:
1489 return -EOPNOTSUPP;
1490 }
1491
1492 return -EOPNOTSUPP;
1493}
1494
1495/* Set or clear the multicast filter for this adaptor.
1496 Note that we only use exclusion around actually queueing the
1497 new frame, not around filling tp->setup_frame. This is non-deterministic
1498 when re-entered but still correct. */
1499static void set_rx_mode(struct net_device *dev)
1500{
1501 struct xircom_private *tp = netdev_priv(dev);
1502 struct dev_mc_list *mclist;
1503 long ioaddr = dev->base_addr;
1504 int csr6 = inl(ioaddr + CSR6);
1505 u16 *eaddrs, *setup_frm;
1506 u32 tx_flags;
1507 int i;
1508
1509 tp->csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1510 csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1511 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1512 tp->csr6 |= PromiscBit;
1513 csr6 |= PromiscBit;
1514 goto out;
1515 }
1516
1517 if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
1518 /* Too many to filter well -- accept all multicasts. */
1519 tp->csr6 |= AllMultiBit;
1520 csr6 |= AllMultiBit;
1521 goto out;
1522 }
1523
1524 tx_flags = Tx1WholePkt | Tx1SetupPkt | PKT_SETUP_SZ;
1525
1526 /* Note that only the low-address shortword of setup_frame is valid! */
1527 setup_frm = tp->setup_frame;
1528 mclist = dev->mc_list;
1529
1530 /* Fill the first entry with our physical address. */
1531 eaddrs = (u16 *)dev->dev_addr;
1532 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1533 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1534 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1535
1536 if (dev->mc_count > 14) { /* Must use a multicast hash table. */
1537 u32 *hash_table = (u32 *)(tp->setup_frame + 4 * 12);
1538 u32 hash, hash2;
1539
1540 tx_flags |= Tx1HashSetup;
1541 tp->csr6 |= HashFilterBit;
1542 csr6 |= HashFilterBit;
1543
1544 /* Fill the unused 3 entries with the broadcast address.
1545 At least one entry *must* contain the broadcast address!!!*/
1546 for (i = 0; i < 3; i++) {
1547 *setup_frm = 0xffff; setup_frm += 2;
1548 *setup_frm = 0xffff; setup_frm += 2;
1549 *setup_frm = 0xffff; setup_frm += 2;
1550 }
1551
1552 /* Truly brain-damaged hash filter layout */
1553 /* XXX: not sure if I should take the last or the first 9 bits */
1554 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1555 u32 *hptr;
1556 hash = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
1557 if (hash < 384) {
1558 hash2 = hash + ((hash >> 4) << 4) +
1559 ((hash >> 5) << 5);
1560 } else {
1561 hash -= 384;
1562 hash2 = 64 + hash + (hash >> 4) * 80;
1563 }
1564 hptr = &hash_table[hash2 & ~0x1f];
1565 *hptr |= cpu_to_le32(1 << (hash2 & 0x1f));
1566 }
1567 } else {
1568 /* We have <= 14 mcast addresses so we can use Xircom's
1569 wonderful 16-address perfect filter. */
1570 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1571 eaddrs = (u16 *)mclist->dmi_addr;
1572 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1573 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1574 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1575 }
1576 /* Fill the unused entries with the broadcast address.
1577 At least one entry *must* contain the broadcast address!!!*/
1578 for (; i < 15; i++) {
1579 *setup_frm = 0xffff; setup_frm += 2;
1580 *setup_frm = 0xffff; setup_frm += 2;
1581 *setup_frm = 0xffff; setup_frm += 2;
1582 }
1583 }
1584
1585 /* Now add this frame to the Tx list. */
1586 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1587 /* Same setup recently queued, we need not add it. */
1588 /* XXX: Huh? All it means is that the Tx list is full...*/
1589 } else {
1590 unsigned long flags;
1591 unsigned int entry;
1592 int dummy = -1;
1593
1594 save_flags(flags); cli();
1595 entry = tp->cur_tx++ % TX_RING_SIZE;
1596
1597 if (entry != 0) {
1598 /* Avoid a chip errata by prefixing a dummy entry. */
1599 tp->tx_skbuff[entry] = NULL;
1600 tp->tx_ring[entry].length =
1601 (entry == TX_RING_SIZE - 1) ? Tx1RingWrap : 0;
1602 tp->tx_ring[entry].buffer1 = 0;
1603 /* race with chip, set Tx0DescOwned later */
1604 dummy = entry;
1605 entry = tp->cur_tx++ % TX_RING_SIZE;
1606 }
1607
1608 tp->tx_skbuff[entry] = NULL;
1609 /* Put the setup frame on the Tx list. */
1610 if (entry == TX_RING_SIZE - 1)
1611 tx_flags |= Tx1RingWrap; /* Wrap ring. */
1612 tp->tx_ring[entry].length = tx_flags;
1613 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->setup_frame);
1614 tp->tx_ring[entry].status = Tx0DescOwned;
1615 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
1616 tp->tx_full = 1;
1617 netif_stop_queue (dev);
1618 }
1619 if (dummy >= 0)
1620 tp->tx_ring[dummy].status = Tx0DescOwned;
1621 restore_flags(flags);
1622 /* Trigger an immediate transmit demand. */
1623 outl(0, ioaddr + CSR1);
1624 }
1625
1626out:
1627 outl_CSR6(csr6, ioaddr);
1628}
1629
1630
1631static struct pci_device_id xircom_pci_table[] = {
1632 { 0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, X3201_3 },
1633 {0},
1634};
1635MODULE_DEVICE_TABLE(pci, xircom_pci_table);
1636
1637
1638#ifdef CONFIG_PM
05adc3b7 1639static int xircom_suspend(struct pci_dev *pdev, pm_message_t state)
1da177e4
LT
1640{
1641 struct net_device *dev = pci_get_drvdata(pdev);
1642 struct xircom_private *tp = netdev_priv(dev);
1643 printk(KERN_INFO "xircom_suspend(%s)\n", dev->name);
1644 if (tp->open)
1645 xircom_down(dev);
1646
1647 pci_save_state(pdev);
1648 pci_disable_device(pdev);
1649 pci_set_power_state(pdev, 3);
1650
1651 return 0;
1652}
1653
1654
1655static int xircom_resume(struct pci_dev *pdev)
1656{
1657 struct net_device *dev = pci_get_drvdata(pdev);
1658 struct xircom_private *tp = netdev_priv(dev);
1659 printk(KERN_INFO "xircom_resume(%s)\n", dev->name);
1660
1661 pci_set_power_state(pdev,0);
1662 pci_enable_device(pdev);
1663 pci_restore_state(pdev);
1664
1665 /* Bring the chip out of sleep mode.
1666 Caution: Snooze mode does not work with some boards! */
1667 if (xircom_tbl[tp->chip_id].flags & HAS_ACPI)
1668 pci_write_config_dword(tp->pdev, PCI_POWERMGMT, 0);
1669
1670 transceiver_voodoo(dev);
1671 if (xircom_tbl[tp->chip_id].flags & HAS_MII)
1672 check_duplex(dev);
1673
1674 if (tp->open)
1675 xircom_up(dev);
1676 return 0;
1677}
1678#endif /* CONFIG_PM */
1679
1680
1681static void __devexit xircom_remove_one(struct pci_dev *pdev)
1682{
1683 struct net_device *dev = pci_get_drvdata(pdev);
1684
1685 printk(KERN_INFO "xircom_remove_one(%s)\n", dev->name);
1686 unregister_netdev(dev);
1687 pci_release_regions(pdev);
1688 free_netdev(dev);
1689 pci_set_drvdata(pdev, NULL);
1690}
1691
1692
1693static struct pci_driver xircom_driver = {
1694 .name = DRV_NAME,
1695 .id_table = xircom_pci_table,
1696 .probe = xircom_init_one,
1697 .remove = __devexit_p(xircom_remove_one),
1698#ifdef CONFIG_PM
1699 .suspend = xircom_suspend,
1700 .resume = xircom_resume
1701#endif /* CONFIG_PM */
1702};
1703
1704
1705static int __init xircom_init(void)
1706{
1707/* when a module, this is printed whether or not devices are found in probe */
1708#ifdef MODULE
1709 printk(version);
1710#endif
29917620 1711 return pci_register_driver(&xircom_driver);
1da177e4
LT
1712}
1713
1714
1715static void __exit xircom_exit(void)
1716{
1717 pci_unregister_driver(&xircom_driver);
1718}
1719
1720module_init(xircom_init)
1721module_exit(xircom_exit)
1722
1723/*
1724 * Local variables:
1725 * c-indent-level: 4
1726 * c-basic-offset: 4
1727 * tab-width: 4
1728 * End:
1729 */