]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/via-rhine.c
netdev: convert bulk of drivers to netdev_tx_t
[net-next-2.6.git] / drivers / net / via-rhine.c
CommitLineData
1da177e4
LT
1/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
03a8c661 28 [link no longer provides useful info -jgarzik]
1da177e4
LT
29
30*/
31
32#define DRV_NAME "via-rhine"
e84df485
RL
33#define DRV_VERSION "1.4.3"
34#define DRV_RELDATE "2007-03-06"
1da177e4
LT
35
36
37/* A few user-configurable values.
38 These may be modified when a driver module is loaded. */
39
40static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
41static int max_interrupt_work = 20;
42
43/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44 Setting to > 1518 effectively disables this feature. */
b47157f0
DM
45#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
46 || defined(CONFIG_SPARC) || defined(__ia64__) \
47 || defined(__sh__) || defined(__mips__)
48static int rx_copybreak = 1518;
49#else
1da177e4 50static int rx_copybreak;
b47157f0 51#endif
1da177e4 52
b933b4d9
RL
53/* Work-around for broken BIOSes: they are unable to get the chip back out of
54 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
55static int avoid_D3;
56
1da177e4
LT
57/*
58 * In case you are looking for 'options[]' or 'full_duplex[]', they
59 * are gone. Use ethtool(8) instead.
60 */
61
62/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63 The Rhine has a 64 element 8390-like hash table. */
64static const int multicast_filter_limit = 32;
65
66
67/* Operational parameters that are set at compile time. */
68
69/* Keep the ring sizes a power of two for compile efficiency.
70 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
71 Making the Tx ring too large decreases the effectiveness of channel
72 bonding and packet priority.
73 There are no ill effects from too-large receive rings. */
74#define TX_RING_SIZE 16
75#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
633949a1 76#define RX_RING_SIZE 64
1da177e4
LT
77
78/* Operational parameters that usually are not changed. */
79
80/* Time in jiffies before concluding the transmitter is hung. */
81#define TX_TIMEOUT (2*HZ)
82
83#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
84
85#include <linux/module.h>
86#include <linux/moduleparam.h>
87#include <linux/kernel.h>
88#include <linux/string.h>
89#include <linux/timer.h>
90#include <linux/errno.h>
91#include <linux/ioport.h>
92#include <linux/slab.h>
93#include <linux/interrupt.h>
94#include <linux/pci.h>
1e7f0bd8 95#include <linux/dma-mapping.h>
1da177e4
LT
96#include <linux/netdevice.h>
97#include <linux/etherdevice.h>
98#include <linux/skbuff.h>
99#include <linux/init.h>
100#include <linux/delay.h>
101#include <linux/mii.h>
102#include <linux/ethtool.h>
103#include <linux/crc32.h>
104#include <linux/bitops.h>
105#include <asm/processor.h> /* Processor type for cache alignment. */
106#include <asm/io.h>
107#include <asm/irq.h>
108#include <asm/uaccess.h>
e84df485 109#include <linux/dmi.h>
1da177e4
LT
110
111/* These identify the driver base version and may not be removed. */
c8de1fce
SH
112static const char version[] __devinitconst =
113 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE
114 " Written by Donald Becker\n";
1da177e4
LT
115
116/* This driver was written to use PCI memory space. Some early versions
117 of the Rhine may only work correctly with I/O space accesses. */
118#ifdef CONFIG_VIA_RHINE_MMIO
119#define USE_MMIO
120#else
121#endif
122
123MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
124MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
125MODULE_LICENSE("GPL");
126
127module_param(max_interrupt_work, int, 0);
128module_param(debug, int, 0);
129module_param(rx_copybreak, int, 0);
b933b4d9 130module_param(avoid_D3, bool, 0);
1da177e4
LT
131MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
132MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
133MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
b933b4d9 134MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
1da177e4
LT
135
136/*
137 Theory of Operation
138
139I. Board Compatibility
140
141This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
142controller.
143
144II. Board-specific settings
145
146Boards with this chip are functional only in a bus-master PCI slot.
147
148Many operational settings are loaded from the EEPROM to the Config word at
149offset 0x78. For most of these settings, this driver assumes that they are
150correct.
151If this driver is compiled to use PCI memory space operations the EEPROM
152must be configured to enable memory ops.
153
154III. Driver operation
155
156IIIa. Ring buffers
157
158This driver uses two statically allocated fixed-size descriptor lists
159formed into rings by a branch from the final descriptor to the beginning of
160the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
161
162IIIb/c. Transmit/Receive Structure
163
164This driver attempts to use a zero-copy receive and transmit scheme.
165
166Alas, all data buffers are required to start on a 32 bit boundary, so
167the driver must often copy transmit packets into bounce buffers.
168
169The driver allocates full frame size skbuffs for the Rx ring buffers at
170open() time and passes the skb->data field to the chip as receive data
171buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
172a fresh skbuff is allocated and the frame is copied to the new skbuff.
173When the incoming frame is larger, the skbuff is passed directly up the
174protocol stack. Buffers consumed this way are replaced by newly allocated
175skbuffs in the last phase of rhine_rx().
176
177The RX_COPYBREAK value is chosen to trade-off the memory wasted by
178using a full-sized skbuff for small frames vs. the copying costs of larger
179frames. New boards are typically used in generously configured machines
180and the underfilled buffers have negligible impact compared to the benefit of
181a single allocation size, so the default value of zero results in never
182copying packets. When copying is done, the cost is usually mitigated by using
183a combined copy/checksum routine. Copying also preloads the cache, which is
184most useful with small frames.
185
186Since the VIA chips are only able to transfer data to buffers on 32 bit
187boundaries, the IP header at offset 14 in an ethernet frame isn't
188longword aligned for further processing. Copying these unaligned buffers
189has the beneficial effect of 16-byte aligning the IP header.
190
191IIId. Synchronization
192
193The driver runs as two independent, single-threaded flows of control. One
194is the send-packet routine, which enforces single-threaded use by the
b74ca3a8
WC
195netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
196which is single threaded by the hardware and interrupt handling software.
1da177e4
LT
197
198The send packet thread has partial control over the Tx ring. It locks the
b74ca3a8
WC
199netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
200the ring is not available it stops the transmit queue by
201calling netif_stop_queue.
1da177e4
LT
202
203The interrupt handler has exclusive control over the Rx ring and records stats
204from the Tx ring. After reaping the stats, it marks the Tx queue entry as
205empty by incrementing the dirty_tx mark. If at least half of the entries in
206the Rx ring are available the transmit queue is woken up if it was stopped.
207
208IV. Notes
209
210IVb. References
211
212Preliminary VT86C100A manual from http://www.via.com.tw/
213http://www.scyld.com/expert/100mbps.html
214http://www.scyld.com/expert/NWay.html
215ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
216ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
217
218
219IVc. Errata
220
221The VT86C100A manual is not reliable information.
222The 3043 chip does not handle unaligned transmit or receive buffers, resulting
223in significant performance degradation for bounce buffer copies on transmit
224and unaligned IP headers on receive.
225The chip does not pad to minimum transmit length.
226
227*/
228
229
230/* This table drives the PCI probe routines. It's mostly boilerplate in all
231 of the drivers, and will likely be provided by some future kernel.
232 Note the matching code -- the first table entry matchs all 56** cards but
233 second only the 1234 card.
234*/
235
236enum rhine_revs {
237 VT86C100A = 0x00,
238 VTunknown0 = 0x20,
239 VT6102 = 0x40,
240 VT8231 = 0x50, /* Integrated MAC */
241 VT8233 = 0x60, /* Integrated MAC */
242 VT8235 = 0x74, /* Integrated MAC */
243 VT8237 = 0x78, /* Integrated MAC */
244 VTunknown1 = 0x7C,
245 VT6105 = 0x80,
246 VT6105_B0 = 0x83,
247 VT6105L = 0x8A,
248 VT6107 = 0x8C,
249 VTunknown2 = 0x8E,
250 VT6105M = 0x90, /* Management adapter */
251};
252
253enum rhine_quirks {
254 rqWOL = 0x0001, /* Wake-On-LAN support */
255 rqForceReset = 0x0002,
256 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
257 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
258 rqRhineI = 0x0100, /* See comment below */
259};
260/*
261 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
262 * MMIO as well as for the collision counter and the Tx FIFO underflow
263 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
264 */
265
266/* Beware of PCI posted writes */
267#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
268
46009c8b
JG
269static const struct pci_device_id rhine_pci_tbl[] = {
270 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
271 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
272 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
273 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
1da177e4
LT
274 { } /* terminate list */
275};
276MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
277
278
279/* Offsets to the device registers. */
280enum register_offsets {
281 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
282 ChipCmd1=0x09,
283 IntrStatus=0x0C, IntrEnable=0x0E,
284 MulticastFilter0=0x10, MulticastFilter1=0x14,
285 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
286 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
287 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
288 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
289 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
290 StickyHW=0x83, IntrStatus2=0x84,
291 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
292 WOLcrClr1=0xA6, WOLcgClr=0xA7,
293 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
294};
295
296/* Bits in ConfigD */
297enum backoff_bits {
298 BackOptional=0x01, BackModify=0x02,
299 BackCaptureEffect=0x04, BackRandom=0x08
300};
301
302#ifdef USE_MMIO
303/* Registers we check that mmio and reg are the same. */
304static const int mmio_verify_registers[] = {
305 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
306 0
307};
308#endif
309
310/* Bits in the interrupt status/mask registers. */
311enum intr_status_bits {
312 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
313 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
314 IntrPCIErr=0x0040,
315 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
316 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
317 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
318 IntrRxWakeUp=0x8000,
319 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
320 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
321 IntrTxErrSummary=0x082218,
322};
323
324/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
325enum wol_bits {
326 WOLucast = 0x10,
327 WOLmagic = 0x20,
328 WOLbmcast = 0x30,
329 WOLlnkon = 0x40,
330 WOLlnkoff = 0x80,
331};
332
333/* The Rx and Tx buffer descriptors. */
334struct rx_desc {
53c03f5c
AV
335 __le32 rx_status;
336 __le32 desc_length; /* Chain flag, Buffer/frame length */
337 __le32 addr;
338 __le32 next_desc;
1da177e4
LT
339};
340struct tx_desc {
53c03f5c
AV
341 __le32 tx_status;
342 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
343 __le32 addr;
344 __le32 next_desc;
1da177e4
LT
345};
346
347/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
348#define TXDESC 0x00e08000
349
350enum rx_status_bits {
351 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
352};
353
354/* Bits in *_desc.*_status */
355enum desc_status_bits {
356 DescOwn=0x80000000
357};
358
359/* Bits in ChipCmd. */
360enum chip_cmd_bits {
361 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
362 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
363 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
364 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
365};
366
367struct rhine_private {
368 /* Descriptor rings */
369 struct rx_desc *rx_ring;
370 struct tx_desc *tx_ring;
371 dma_addr_t rx_ring_dma;
372 dma_addr_t tx_ring_dma;
373
374 /* The addresses of receive-in-place skbuffs. */
375 struct sk_buff *rx_skbuff[RX_RING_SIZE];
376 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
377
378 /* The saved address of a sent-in-place packet/buffer, for later free(). */
379 struct sk_buff *tx_skbuff[TX_RING_SIZE];
380 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
381
4be5de25 382 /* Tx bounce buffers (Rhine-I only) */
1da177e4
LT
383 unsigned char *tx_buf[TX_RING_SIZE];
384 unsigned char *tx_bufs;
385 dma_addr_t tx_bufs_dma;
386
387 struct pci_dev *pdev;
388 long pioaddr;
bea3348e
SH
389 struct net_device *dev;
390 struct napi_struct napi;
1da177e4
LT
391 spinlock_t lock;
392
393 /* Frequently used values: keep some adjacent for cache effect. */
394 u32 quirks;
395 struct rx_desc *rx_head_desc;
396 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
397 unsigned int cur_tx, dirty_tx;
398 unsigned int rx_buf_sz; /* Based on MTU+slack. */
399 u8 wolopts;
400
401 u8 tx_thresh, rx_thresh;
402
403 struct mii_if_info mii_if;
404 void __iomem *base;
405};
406
407static int mdio_read(struct net_device *dev, int phy_id, int location);
408static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
409static int rhine_open(struct net_device *dev);
410static void rhine_tx_timeout(struct net_device *dev);
61357325
SH
411static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
412 struct net_device *dev);
7d12e780 413static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
1da177e4 414static void rhine_tx(struct net_device *dev);
633949a1 415static int rhine_rx(struct net_device *dev, int limit);
1da177e4
LT
416static void rhine_error(struct net_device *dev, int intr_status);
417static void rhine_set_rx_mode(struct net_device *dev);
418static struct net_device_stats *rhine_get_stats(struct net_device *dev);
419static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
7282d491 420static const struct ethtool_ops netdev_ethtool_ops;
1da177e4 421static int rhine_close(struct net_device *dev);
d18c3db5 422static void rhine_shutdown (struct pci_dev *pdev);
1da177e4
LT
423
424#define RHINE_WAIT_FOR(condition) do { \
425 int i=1024; \
426 while (!(condition) && --i) \
427 ; \
428 if (debug > 1 && i < 512) \
429 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
430 DRV_NAME, 1024-i, __func__, __LINE__); \
431} while(0)
432
433static inline u32 get_intr_status(struct net_device *dev)
434{
435 struct rhine_private *rp = netdev_priv(dev);
436 void __iomem *ioaddr = rp->base;
437 u32 intr_status;
438
439 intr_status = ioread16(ioaddr + IntrStatus);
440 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
441 if (rp->quirks & rqStatusWBRace)
442 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
443 return intr_status;
444}
445
446/*
447 * Get power related registers into sane state.
448 * Notify user about past WOL event.
449 */
450static void rhine_power_init(struct net_device *dev)
451{
452 struct rhine_private *rp = netdev_priv(dev);
453 void __iomem *ioaddr = rp->base;
454 u16 wolstat;
455
456 if (rp->quirks & rqWOL) {
457 /* Make sure chip is in power state D0 */
458 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
459
460 /* Disable "force PME-enable" */
461 iowrite8(0x80, ioaddr + WOLcgClr);
462
463 /* Clear power-event config bits (WOL) */
464 iowrite8(0xFF, ioaddr + WOLcrClr);
465 /* More recent cards can manage two additional patterns */
466 if (rp->quirks & rq6patterns)
467 iowrite8(0x03, ioaddr + WOLcrClr1);
468
469 /* Save power-event status bits */
470 wolstat = ioread8(ioaddr + PwrcsrSet);
471 if (rp->quirks & rq6patterns)
472 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
473
474 /* Clear power-event status bits */
475 iowrite8(0xFF, ioaddr + PwrcsrClr);
476 if (rp->quirks & rq6patterns)
477 iowrite8(0x03, ioaddr + PwrcsrClr1);
478
479 if (wolstat) {
480 char *reason;
481 switch (wolstat) {
482 case WOLmagic:
483 reason = "Magic packet";
484 break;
485 case WOLlnkon:
486 reason = "Link went up";
487 break;
488 case WOLlnkoff:
489 reason = "Link went down";
490 break;
491 case WOLucast:
492 reason = "Unicast packet";
493 break;
494 case WOLbmcast:
495 reason = "Multicast/broadcast packet";
496 break;
497 default:
498 reason = "Unknown";
499 }
500 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
501 DRV_NAME, reason);
502 }
503 }
504}
505
506static void rhine_chip_reset(struct net_device *dev)
507{
508 struct rhine_private *rp = netdev_priv(dev);
509 void __iomem *ioaddr = rp->base;
510
511 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
512 IOSYNC;
513
514 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
515 printk(KERN_INFO "%s: Reset not complete yet. "
516 "Trying harder.\n", DRV_NAME);
517
518 /* Force reset */
519 if (rp->quirks & rqForceReset)
520 iowrite8(0x40, ioaddr + MiscCmd);
521
522 /* Reset can take somewhat longer (rare) */
523 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
524 }
525
526 if (debug > 1)
527 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
528 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
529 "failed" : "succeeded");
530}
531
532#ifdef USE_MMIO
533static void enable_mmio(long pioaddr, u32 quirks)
534{
535 int n;
536 if (quirks & rqRhineI) {
537 /* More recent docs say that this bit is reserved ... */
538 n = inb(pioaddr + ConfigA) | 0x20;
539 outb(n, pioaddr + ConfigA);
540 } else {
541 n = inb(pioaddr + ConfigD) | 0x80;
542 outb(n, pioaddr + ConfigD);
543 }
544}
545#endif
546
547/*
548 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
549 * (plus 0x6C for Rhine-I/II)
550 */
551static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
552{
553 struct rhine_private *rp = netdev_priv(dev);
554 void __iomem *ioaddr = rp->base;
555
556 outb(0x20, pioaddr + MACRegEEcsr);
557 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
558
559#ifdef USE_MMIO
560 /*
561 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
562 * MMIO. If reloading EEPROM was done first this could be avoided, but
563 * it is not known if that still works with the "win98-reboot" problem.
564 */
565 enable_mmio(pioaddr, rp->quirks);
566#endif
567
568 /* Turn off EEPROM-controlled wake-up (magic packet) */
569 if (rp->quirks & rqWOL)
570 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
571
572}
573
574#ifdef CONFIG_NET_POLL_CONTROLLER
575static void rhine_poll(struct net_device *dev)
576{
577 disable_irq(dev->irq);
7d12e780 578 rhine_interrupt(dev->irq, (void *)dev);
1da177e4
LT
579 enable_irq(dev->irq);
580}
581#endif
582
bea3348e 583static int rhine_napipoll(struct napi_struct *napi, int budget)
633949a1 584{
bea3348e
SH
585 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
586 struct net_device *dev = rp->dev;
633949a1 587 void __iomem *ioaddr = rp->base;
bea3348e 588 int work_done;
633949a1 589
bea3348e 590 work_done = rhine_rx(dev, budget);
633949a1 591
bea3348e 592 if (work_done < budget) {
288379f0 593 napi_complete(napi);
633949a1
RL
594
595 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
596 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
597 IntrTxDone | IntrTxError | IntrTxUnderrun |
598 IntrPCIErr | IntrStatsMax | IntrLinkChange,
599 ioaddr + IntrEnable);
633949a1 600 }
bea3348e 601 return work_done;
633949a1 602}
633949a1 603
de4e7c88 604static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
1da177e4
LT
605{
606 struct rhine_private *rp = netdev_priv(dev);
607
608 /* Reset the chip to erase previous misconfiguration. */
609 rhine_chip_reset(dev);
610
611 /* Rhine-I needs extra time to recuperate before EEPROM reload */
612 if (rp->quirks & rqRhineI)
613 msleep(5);
614
615 /* Reload EEPROM controlled bytes cleared by soft reset */
616 rhine_reload_eeprom(pioaddr, dev);
617}
618
5d1d07d8
SH
619static const struct net_device_ops rhine_netdev_ops = {
620 .ndo_open = rhine_open,
621 .ndo_stop = rhine_close,
622 .ndo_start_xmit = rhine_start_tx,
623 .ndo_get_stats = rhine_get_stats,
624 .ndo_set_multicast_list = rhine_set_rx_mode,
635ecaa7 625 .ndo_change_mtu = eth_change_mtu,
5d1d07d8 626 .ndo_validate_addr = eth_validate_addr,
fe96aaa1 627 .ndo_set_mac_address = eth_mac_addr,
5d1d07d8
SH
628 .ndo_do_ioctl = netdev_ioctl,
629 .ndo_tx_timeout = rhine_tx_timeout,
630#ifdef CONFIG_NET_POLL_CONTROLLER
631 .ndo_poll_controller = rhine_poll,
632#endif
633};
634
1da177e4
LT
635static int __devinit rhine_init_one(struct pci_dev *pdev,
636 const struct pci_device_id *ent)
637{
638 struct net_device *dev;
639 struct rhine_private *rp;
640 int i, rc;
1da177e4
LT
641 u32 quirks;
642 long pioaddr;
643 long memaddr;
644 void __iomem *ioaddr;
645 int io_size, phy_id;
646 const char *name;
647#ifdef USE_MMIO
648 int bar = 1;
649#else
650 int bar = 0;
651#endif
652
653/* when built into the kernel, we only print version if device is found */
654#ifndef MODULE
655 static int printed_version;
656 if (!printed_version++)
657 printk(version);
658#endif
659
1da177e4
LT
660 io_size = 256;
661 phy_id = 0;
662 quirks = 0;
663 name = "Rhine";
44c10138 664 if (pdev->revision < VTunknown0) {
1da177e4
LT
665 quirks = rqRhineI;
666 io_size = 128;
667 }
44c10138 668 else if (pdev->revision >= VT6102) {
1da177e4 669 quirks = rqWOL | rqForceReset;
44c10138 670 if (pdev->revision < VT6105) {
1da177e4
LT
671 name = "Rhine II";
672 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
673 }
674 else {
675 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
44c10138 676 if (pdev->revision >= VT6105_B0)
1da177e4 677 quirks |= rq6patterns;
44c10138 678 if (pdev->revision < VT6105M)
1da177e4
LT
679 name = "Rhine III";
680 else
681 name = "Rhine III (Management Adapter)";
682 }
683 }
684
685 rc = pci_enable_device(pdev);
686 if (rc)
687 goto err_out;
688
689 /* this should always be supported */
284901a9 690 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1da177e4
LT
691 if (rc) {
692 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
693 "the card!?\n");
694 goto err_out;
695 }
696
697 /* sanity check */
698 if ((pci_resource_len(pdev, 0) < io_size) ||
699 (pci_resource_len(pdev, 1) < io_size)) {
700 rc = -EIO;
701 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
702 goto err_out;
703 }
704
705 pioaddr = pci_resource_start(pdev, 0);
706 memaddr = pci_resource_start(pdev, 1);
707
708 pci_set_master(pdev);
709
710 dev = alloc_etherdev(sizeof(struct rhine_private));
711 if (!dev) {
712 rc = -ENOMEM;
713 printk(KERN_ERR "alloc_etherdev failed\n");
714 goto err_out;
715 }
1da177e4
LT
716 SET_NETDEV_DEV(dev, &pdev->dev);
717
718 rp = netdev_priv(dev);
bea3348e 719 rp->dev = dev;
1da177e4
LT
720 rp->quirks = quirks;
721 rp->pioaddr = pioaddr;
722 rp->pdev = pdev;
723
724 rc = pci_request_regions(pdev, DRV_NAME);
725 if (rc)
726 goto err_out_free_netdev;
727
728 ioaddr = pci_iomap(pdev, bar, io_size);
729 if (!ioaddr) {
730 rc = -EIO;
731 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
732 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
733 goto err_out_free_res;
734 }
735
736#ifdef USE_MMIO
737 enable_mmio(pioaddr, quirks);
738
739 /* Check that selected MMIO registers match the PIO ones */
740 i = 0;
741 while (mmio_verify_registers[i]) {
742 int reg = mmio_verify_registers[i++];
743 unsigned char a = inb(pioaddr+reg);
744 unsigned char b = readb(ioaddr+reg);
745 if (a != b) {
746 rc = -EIO;
747 printk(KERN_ERR "MMIO do not match PIO [%02x] "
748 "(%02x != %02x)\n", reg, a, b);
749 goto err_out_unmap;
750 }
751 }
752#endif /* USE_MMIO */
753
754 dev->base_addr = (unsigned long)ioaddr;
755 rp->base = ioaddr;
756
757 /* Get chip registers into a sane state */
758 rhine_power_init(dev);
759 rhine_hw_init(dev, pioaddr);
760
761 for (i = 0; i < 6; i++)
762 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
b81e8e1f 763 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4 764
b81e8e1f 765 if (!is_valid_ether_addr(dev->perm_addr)) {
1da177e4
LT
766 rc = -EIO;
767 printk(KERN_ERR "Invalid MAC address\n");
768 goto err_out_unmap;
769 }
770
771 /* For Rhine-I/II, phy_id is loaded from EEPROM */
772 if (!phy_id)
773 phy_id = ioread8(ioaddr + 0x6C);
774
775 dev->irq = pdev->irq;
776
777 spin_lock_init(&rp->lock);
778 rp->mii_if.dev = dev;
779 rp->mii_if.mdio_read = mdio_read;
780 rp->mii_if.mdio_write = mdio_write;
781 rp->mii_if.phy_id_mask = 0x1f;
782 rp->mii_if.reg_num_mask = 0x1f;
783
784 /* The chip-specific entries in the device structure. */
5d1d07d8
SH
785 dev->netdev_ops = &rhine_netdev_ops;
786 dev->ethtool_ops = &netdev_ethtool_ops,
1da177e4 787 dev->watchdog_timeo = TX_TIMEOUT;
5d1d07d8 788
bea3348e 789 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
32b0f53e 790
1da177e4
LT
791 if (rp->quirks & rqRhineI)
792 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
793
794 /* dev->name not defined before register_netdev()! */
795 rc = register_netdev(dev);
796 if (rc)
797 goto err_out_unmap;
798
e174961c 799 printk(KERN_INFO "%s: VIA %s at 0x%lx, %pM, IRQ %d.\n",
1da177e4
LT
800 dev->name, name,
801#ifdef USE_MMIO
0795af57 802 memaddr,
1da177e4 803#else
0795af57 804 (long)ioaddr,
1da177e4 805#endif
e174961c 806 dev->dev_addr, pdev->irq);
1da177e4
LT
807
808 pci_set_drvdata(pdev, dev);
809
810 {
811 u16 mii_cmd;
812 int mii_status = mdio_read(dev, phy_id, 1);
813 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
814 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
815 if (mii_status != 0xffff && mii_status != 0x0000) {
816 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
817 printk(KERN_INFO "%s: MII PHY found at address "
818 "%d, status 0x%4.4x advertising %4.4x "
819 "Link %4.4x.\n", dev->name, phy_id,
820 mii_status, rp->mii_if.advertising,
821 mdio_read(dev, phy_id, 5));
822
823 /* set IFF_RUNNING */
824 if (mii_status & BMSR_LSTATUS)
825 netif_carrier_on(dev);
826 else
827 netif_carrier_off(dev);
828
829 }
830 }
831 rp->mii_if.phy_id = phy_id;
b933b4d9
RL
832 if (debug > 1 && avoid_D3)
833 printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
834 dev->name);
1da177e4
LT
835
836 return 0;
837
838err_out_unmap:
839 pci_iounmap(pdev, ioaddr);
840err_out_free_res:
841 pci_release_regions(pdev);
842err_out_free_netdev:
843 free_netdev(dev);
844err_out:
845 return rc;
846}
847
848static int alloc_ring(struct net_device* dev)
849{
850 struct rhine_private *rp = netdev_priv(dev);
851 void *ring;
852 dma_addr_t ring_dma;
853
854 ring = pci_alloc_consistent(rp->pdev,
855 RX_RING_SIZE * sizeof(struct rx_desc) +
856 TX_RING_SIZE * sizeof(struct tx_desc),
857 &ring_dma);
858 if (!ring) {
859 printk(KERN_ERR "Could not allocate DMA memory.\n");
860 return -ENOMEM;
861 }
862 if (rp->quirks & rqRhineI) {
863 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
864 PKT_BUF_SZ * TX_RING_SIZE,
865 &rp->tx_bufs_dma);
866 if (rp->tx_bufs == NULL) {
867 pci_free_consistent(rp->pdev,
868 RX_RING_SIZE * sizeof(struct rx_desc) +
869 TX_RING_SIZE * sizeof(struct tx_desc),
870 ring, ring_dma);
871 return -ENOMEM;
872 }
873 }
874
875 rp->rx_ring = ring;
876 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
877 rp->rx_ring_dma = ring_dma;
878 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
879
880 return 0;
881}
882
883static void free_ring(struct net_device* dev)
884{
885 struct rhine_private *rp = netdev_priv(dev);
886
887 pci_free_consistent(rp->pdev,
888 RX_RING_SIZE * sizeof(struct rx_desc) +
889 TX_RING_SIZE * sizeof(struct tx_desc),
890 rp->rx_ring, rp->rx_ring_dma);
891 rp->tx_ring = NULL;
892
893 if (rp->tx_bufs)
894 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
895 rp->tx_bufs, rp->tx_bufs_dma);
896
897 rp->tx_bufs = NULL;
898
899}
900
901static void alloc_rbufs(struct net_device *dev)
902{
903 struct rhine_private *rp = netdev_priv(dev);
904 dma_addr_t next;
905 int i;
906
907 rp->dirty_rx = rp->cur_rx = 0;
908
909 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
910 rp->rx_head_desc = &rp->rx_ring[0];
911 next = rp->rx_ring_dma;
912
913 /* Init the ring entries */
914 for (i = 0; i < RX_RING_SIZE; i++) {
915 rp->rx_ring[i].rx_status = 0;
916 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
917 next += sizeof(struct rx_desc);
918 rp->rx_ring[i].next_desc = cpu_to_le32(next);
919 rp->rx_skbuff[i] = NULL;
920 }
921 /* Mark the last entry as wrapping the ring. */
922 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
923
924 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
925 for (i = 0; i < RX_RING_SIZE; i++) {
b26b555a 926 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1da177e4
LT
927 rp->rx_skbuff[i] = skb;
928 if (skb == NULL)
929 break;
930 skb->dev = dev; /* Mark as being used by this device. */
931
932 rp->rx_skbuff_dma[i] =
689be439 933 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1da177e4
LT
934 PCI_DMA_FROMDEVICE);
935
936 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
937 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
938 }
939 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
940}
941
942static void free_rbufs(struct net_device* dev)
943{
944 struct rhine_private *rp = netdev_priv(dev);
945 int i;
946
947 /* Free all the skbuffs in the Rx queue. */
948 for (i = 0; i < RX_RING_SIZE; i++) {
949 rp->rx_ring[i].rx_status = 0;
950 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
951 if (rp->rx_skbuff[i]) {
952 pci_unmap_single(rp->pdev,
953 rp->rx_skbuff_dma[i],
954 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
955 dev_kfree_skb(rp->rx_skbuff[i]);
956 }
957 rp->rx_skbuff[i] = NULL;
958 }
959}
960
961static void alloc_tbufs(struct net_device* dev)
962{
963 struct rhine_private *rp = netdev_priv(dev);
964 dma_addr_t next;
965 int i;
966
967 rp->dirty_tx = rp->cur_tx = 0;
968 next = rp->tx_ring_dma;
969 for (i = 0; i < TX_RING_SIZE; i++) {
970 rp->tx_skbuff[i] = NULL;
971 rp->tx_ring[i].tx_status = 0;
972 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
973 next += sizeof(struct tx_desc);
974 rp->tx_ring[i].next_desc = cpu_to_le32(next);
4be5de25
RL
975 if (rp->quirks & rqRhineI)
976 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1da177e4
LT
977 }
978 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
979
980}
981
982static void free_tbufs(struct net_device* dev)
983{
984 struct rhine_private *rp = netdev_priv(dev);
985 int i;
986
987 for (i = 0; i < TX_RING_SIZE; i++) {
988 rp->tx_ring[i].tx_status = 0;
989 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
990 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
991 if (rp->tx_skbuff[i]) {
992 if (rp->tx_skbuff_dma[i]) {
993 pci_unmap_single(rp->pdev,
994 rp->tx_skbuff_dma[i],
995 rp->tx_skbuff[i]->len,
996 PCI_DMA_TODEVICE);
997 }
998 dev_kfree_skb(rp->tx_skbuff[i]);
999 }
1000 rp->tx_skbuff[i] = NULL;
1001 rp->tx_buf[i] = NULL;
1002 }
1003}
1004
1005static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1006{
1007 struct rhine_private *rp = netdev_priv(dev);
1008 void __iomem *ioaddr = rp->base;
1009
1010 mii_check_media(&rp->mii_if, debug, init_media);
1011
1012 if (rp->mii_if.full_duplex)
1013 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1014 ioaddr + ChipCmd1);
1015 else
1016 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1017 ioaddr + ChipCmd1);
00b428c2
RL
1018 if (debug > 1)
1019 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
1020 rp->mii_if.force_media, netif_carrier_ok(dev));
1021}
1022
1023/* Called after status of force_media possibly changed */
0761be4f 1024static void rhine_set_carrier(struct mii_if_info *mii)
00b428c2
RL
1025{
1026 if (mii->force_media) {
1027 /* autoneg is off: Link is always assumed to be up */
1028 if (!netif_carrier_ok(mii->dev))
1029 netif_carrier_on(mii->dev);
1030 }
1031 else /* Let MMI library update carrier status */
1032 rhine_check_media(mii->dev, 0);
1033 if (debug > 1)
1034 printk(KERN_INFO "%s: force_media %d, carrier %d\n",
1035 mii->dev->name, mii->force_media,
1036 netif_carrier_ok(mii->dev));
1da177e4
LT
1037}
1038
1039static void init_registers(struct net_device *dev)
1040{
1041 struct rhine_private *rp = netdev_priv(dev);
1042 void __iomem *ioaddr = rp->base;
1043 int i;
1044
1045 for (i = 0; i < 6; i++)
1046 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1047
1048 /* Initialize other registers. */
1049 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1050 /* Configure initial FIFO thresholds. */
1051 iowrite8(0x20, ioaddr + TxConfig);
1052 rp->tx_thresh = 0x20;
1053 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1054
1055 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1056 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1057
1058 rhine_set_rx_mode(dev);
1059
bea3348e 1060 napi_enable(&rp->napi);
ab197668 1061
1da177e4
LT
1062 /* Enable interrupts by setting the interrupt mask. */
1063 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1064 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1065 IntrTxDone | IntrTxError | IntrTxUnderrun |
1066 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1067 ioaddr + IntrEnable);
1068
1069 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1070 ioaddr + ChipCmd);
1071 rhine_check_media(dev, 1);
1072}
1073
1074/* Enable MII link status auto-polling (required for IntrLinkChange) */
1075static void rhine_enable_linkmon(void __iomem *ioaddr)
1076{
1077 iowrite8(0, ioaddr + MIICmd);
1078 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1079 iowrite8(0x80, ioaddr + MIICmd);
1080
1081 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1082
1083 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1084}
1085
1086/* Disable MII link status auto-polling (required for MDIO access) */
1087static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1088{
1089 iowrite8(0, ioaddr + MIICmd);
1090
1091 if (quirks & rqRhineI) {
1092 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1093
38bb6b28
JL
1094 /* Can be called from ISR. Evil. */
1095 mdelay(1);
1da177e4
LT
1096
1097 /* 0x80 must be set immediately before turning it off */
1098 iowrite8(0x80, ioaddr + MIICmd);
1099
1100 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1101
1102 /* Heh. Now clear 0x80 again. */
1103 iowrite8(0, ioaddr + MIICmd);
1104 }
1105 else
1106 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1107}
1108
1109/* Read and write over the MII Management Data I/O (MDIO) interface. */
1110
1111static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1112{
1113 struct rhine_private *rp = netdev_priv(dev);
1114 void __iomem *ioaddr = rp->base;
1115 int result;
1116
1117 rhine_disable_linkmon(ioaddr, rp->quirks);
1118
1119 /* rhine_disable_linkmon already cleared MIICmd */
1120 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1121 iowrite8(regnum, ioaddr + MIIRegAddr);
1122 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1123 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1124 result = ioread16(ioaddr + MIIData);
1125
1126 rhine_enable_linkmon(ioaddr);
1127 return result;
1128}
1129
1130static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1131{
1132 struct rhine_private *rp = netdev_priv(dev);
1133 void __iomem *ioaddr = rp->base;
1134
1135 rhine_disable_linkmon(ioaddr, rp->quirks);
1136
1137 /* rhine_disable_linkmon already cleared MIICmd */
1138 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1139 iowrite8(regnum, ioaddr + MIIRegAddr);
1140 iowrite16(value, ioaddr + MIIData);
1141 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1142 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1143
1144 rhine_enable_linkmon(ioaddr);
1145}
1146
1147static int rhine_open(struct net_device *dev)
1148{
1149 struct rhine_private *rp = netdev_priv(dev);
1150 void __iomem *ioaddr = rp->base;
1151 int rc;
1152
1fb9df5d 1153 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
1da177e4
LT
1154 dev);
1155 if (rc)
1156 return rc;
1157
1158 if (debug > 1)
1159 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1160 dev->name, rp->pdev->irq);
1161
1162 rc = alloc_ring(dev);
1163 if (rc) {
1164 free_irq(rp->pdev->irq, dev);
1165 return rc;
1166 }
1167 alloc_rbufs(dev);
1168 alloc_tbufs(dev);
1169 rhine_chip_reset(dev);
1170 init_registers(dev);
1171 if (debug > 2)
1172 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1173 "MII status: %4.4x.\n",
1174 dev->name, ioread16(ioaddr + ChipCmd),
1175 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1176
1177 netif_start_queue(dev);
1178
1179 return 0;
1180}
1181
1182static void rhine_tx_timeout(struct net_device *dev)
1183{
1184 struct rhine_private *rp = netdev_priv(dev);
1185 void __iomem *ioaddr = rp->base;
1186
1187 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1188 "%4.4x, resetting...\n",
1189 dev->name, ioread16(ioaddr + IntrStatus),
1190 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1191
1192 /* protect against concurrent rx interrupts */
1193 disable_irq(rp->pdev->irq);
1194
bea3348e 1195 napi_disable(&rp->napi);
bea3348e 1196
1da177e4
LT
1197 spin_lock(&rp->lock);
1198
1199 /* clear all descriptors */
1200 free_tbufs(dev);
1201 free_rbufs(dev);
1202 alloc_tbufs(dev);
1203 alloc_rbufs(dev);
1204
1205 /* Reinitialize the hardware. */
1206 rhine_chip_reset(dev);
1207 init_registers(dev);
1208
1209 spin_unlock(&rp->lock);
1210 enable_irq(rp->pdev->irq);
1211
1212 dev->trans_start = jiffies;
553e2335 1213 dev->stats.tx_errors++;
1da177e4
LT
1214 netif_wake_queue(dev);
1215}
1216
61357325
SH
1217static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1218 struct net_device *dev)
1da177e4
LT
1219{
1220 struct rhine_private *rp = netdev_priv(dev);
1221 void __iomem *ioaddr = rp->base;
1222 unsigned entry;
1223
1224 /* Caution: the write order is important here, set the field
1225 with the "ownership" bits last. */
1226
1227 /* Calculate the next Tx descriptor entry. */
1228 entry = rp->cur_tx % TX_RING_SIZE;
1229
5b057c6b 1230 if (skb_padto(skb, ETH_ZLEN))
6ed10654 1231 return NETDEV_TX_OK;
1da177e4
LT
1232
1233 rp->tx_skbuff[entry] = skb;
1234
1235 if ((rp->quirks & rqRhineI) &&
84fa7933 1236 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1da177e4
LT
1237 /* Must use alignment buffer. */
1238 if (skb->len > PKT_BUF_SZ) {
1239 /* packet too long, drop it */
1240 dev_kfree_skb(skb);
1241 rp->tx_skbuff[entry] = NULL;
553e2335 1242 dev->stats.tx_dropped++;
6ed10654 1243 return NETDEV_TX_OK;
1da177e4 1244 }
3e0d167a
CB
1245
1246 /* Padding is not copied and so must be redone. */
1da177e4 1247 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
3e0d167a
CB
1248 if (skb->len < ETH_ZLEN)
1249 memset(rp->tx_buf[entry] + skb->len, 0,
1250 ETH_ZLEN - skb->len);
1da177e4
LT
1251 rp->tx_skbuff_dma[entry] = 0;
1252 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1253 (rp->tx_buf[entry] -
1254 rp->tx_bufs));
1255 } else {
1256 rp->tx_skbuff_dma[entry] =
1257 pci_map_single(rp->pdev, skb->data, skb->len,
1258 PCI_DMA_TODEVICE);
1259 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1260 }
1261
1262 rp->tx_ring[entry].desc_length =
1263 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1264
1265 /* lock eth irq */
1266 spin_lock_irq(&rp->lock);
1267 wmb();
1268 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1269 wmb();
1270
1271 rp->cur_tx++;
1272
1273 /* Non-x86 Todo: explicitly flush cache lines here. */
1274
1275 /* Wake the potentially-idle transmit channel */
1276 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1277 ioaddr + ChipCmd1);
1278 IOSYNC;
1279
1280 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1281 netif_stop_queue(dev);
1282
1283 dev->trans_start = jiffies;
1284
1285 spin_unlock_irq(&rp->lock);
1286
1287 if (debug > 4) {
1288 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1289 dev->name, rp->cur_tx-1, entry);
1290 }
6ed10654 1291 return NETDEV_TX_OK;
1da177e4
LT
1292}
1293
1294/* The interrupt handler does all of the Rx thread work and cleans up
1295 after the Tx thread. */
7d12e780 1296static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1da177e4
LT
1297{
1298 struct net_device *dev = dev_instance;
1299 struct rhine_private *rp = netdev_priv(dev);
1300 void __iomem *ioaddr = rp->base;
1301 u32 intr_status;
1302 int boguscnt = max_interrupt_work;
1303 int handled = 0;
1304
1305 while ((intr_status = get_intr_status(dev))) {
1306 handled = 1;
1307
1308 /* Acknowledge all of the current interrupt sources ASAP. */
1309 if (intr_status & IntrTxDescRace)
1310 iowrite8(0x08, ioaddr + IntrStatus2);
1311 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1312 IOSYNC;
1313
1314 if (debug > 4)
1315 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1316 dev->name, intr_status);
1317
1318 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
633949a1 1319 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
633949a1
RL
1320 iowrite16(IntrTxAborted |
1321 IntrTxDone | IntrTxError | IntrTxUnderrun |
1322 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1323 ioaddr + IntrEnable);
1324
288379f0 1325 napi_schedule(&rp->napi);
633949a1 1326 }
1da177e4
LT
1327
1328 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1329 if (intr_status & IntrTxErrSummary) {
1330 /* Avoid scavenging before Tx engine turned off */
1331 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1332 if (debug > 2 &&
1333 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1334 printk(KERN_WARNING "%s: "
2450022a 1335 "rhine_interrupt() Tx engine "
1da177e4
LT
1336 "still on.\n", dev->name);
1337 }
1338 rhine_tx(dev);
1339 }
1340
1341 /* Abnormal error summary/uncommon events handlers. */
1342 if (intr_status & (IntrPCIErr | IntrLinkChange |
1343 IntrStatsMax | IntrTxError | IntrTxAborted |
1344 IntrTxUnderrun | IntrTxDescRace))
1345 rhine_error(dev, intr_status);
1346
1347 if (--boguscnt < 0) {
1348 printk(KERN_WARNING "%s: Too much work at interrupt, "
1349 "status=%#8.8x.\n",
1350 dev->name, intr_status);
1351 break;
1352 }
1353 }
1354
1355 if (debug > 3)
1356 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1357 dev->name, ioread16(ioaddr + IntrStatus));
1358 return IRQ_RETVAL(handled);
1359}
1360
1361/* This routine is logically part of the interrupt handler, but isolated
1362 for clarity. */
1363static void rhine_tx(struct net_device *dev)
1364{
1365 struct rhine_private *rp = netdev_priv(dev);
1366 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1367
1368 spin_lock(&rp->lock);
1369
1370 /* find and cleanup dirty tx descriptors */
1371 while (rp->dirty_tx != rp->cur_tx) {
1372 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1373 if (debug > 6)
ed4030d1 1374 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1da177e4
LT
1375 entry, txstatus);
1376 if (txstatus & DescOwn)
1377 break;
1378 if (txstatus & 0x8000) {
1379 if (debug > 1)
1380 printk(KERN_DEBUG "%s: Transmit error, "
1381 "Tx status %8.8x.\n",
1382 dev->name, txstatus);
553e2335
ED
1383 dev->stats.tx_errors++;
1384 if (txstatus & 0x0400)
1385 dev->stats.tx_carrier_errors++;
1386 if (txstatus & 0x0200)
1387 dev->stats.tx_window_errors++;
1388 if (txstatus & 0x0100)
1389 dev->stats.tx_aborted_errors++;
1390 if (txstatus & 0x0080)
1391 dev->stats.tx_heartbeat_errors++;
1da177e4
LT
1392 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1393 (txstatus & 0x0800) || (txstatus & 0x1000)) {
553e2335 1394 dev->stats.tx_fifo_errors++;
1da177e4
LT
1395 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1396 break; /* Keep the skb - we try again */
1397 }
1398 /* Transmitter restarted in 'abnormal' handler. */
1399 } else {
1400 if (rp->quirks & rqRhineI)
553e2335 1401 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1da177e4 1402 else
553e2335 1403 dev->stats.collisions += txstatus & 0x0F;
1da177e4
LT
1404 if (debug > 6)
1405 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1406 (txstatus >> 3) & 0xF,
1407 txstatus & 0xF);
553e2335
ED
1408 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1409 dev->stats.tx_packets++;
1da177e4
LT
1410 }
1411 /* Free the original skb. */
1412 if (rp->tx_skbuff_dma[entry]) {
1413 pci_unmap_single(rp->pdev,
1414 rp->tx_skbuff_dma[entry],
1415 rp->tx_skbuff[entry]->len,
1416 PCI_DMA_TODEVICE);
1417 }
1418 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1419 rp->tx_skbuff[entry] = NULL;
1420 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1421 }
1422 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1423 netif_wake_queue(dev);
1424
1425 spin_unlock(&rp->lock);
1426}
1427
633949a1
RL
1428/* Process up to limit frames from receive ring */
1429static int rhine_rx(struct net_device *dev, int limit)
1da177e4
LT
1430{
1431 struct rhine_private *rp = netdev_priv(dev);
633949a1 1432 int count;
1da177e4 1433 int entry = rp->cur_rx % RX_RING_SIZE;
1da177e4
LT
1434
1435 if (debug > 4) {
1436 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1437 dev->name, entry,
1438 le32_to_cpu(rp->rx_head_desc->rx_status));
1439 }
1440
1441 /* If EOP is set on the next entry, it's a new packet. Send it up. */
633949a1 1442 for (count = 0; count < limit; ++count) {
1da177e4
LT
1443 struct rx_desc *desc = rp->rx_head_desc;
1444 u32 desc_status = le32_to_cpu(desc->rx_status);
1445 int data_size = desc_status >> 16;
1446
633949a1
RL
1447 if (desc_status & DescOwn)
1448 break;
1449
1da177e4 1450 if (debug > 4)
ed4030d1 1451 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1da177e4 1452 desc_status);
633949a1 1453
1da177e4
LT
1454 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1455 if ((desc_status & RxWholePkt) != RxWholePkt) {
1456 printk(KERN_WARNING "%s: Oversized Ethernet "
1457 "frame spanned multiple buffers, entry "
1458 "%#x length %d status %8.8x!\n",
1459 dev->name, entry, data_size,
1460 desc_status);
1461 printk(KERN_WARNING "%s: Oversized Ethernet "
1462 "frame %p vs %p.\n", dev->name,
1463 rp->rx_head_desc, &rp->rx_ring[entry]);
553e2335 1464 dev->stats.rx_length_errors++;
1da177e4
LT
1465 } else if (desc_status & RxErr) {
1466 /* There was a error. */
1467 if (debug > 2)
ed4030d1 1468 printk(KERN_DEBUG "rhine_rx() Rx "
1da177e4
LT
1469 "error was %8.8x.\n",
1470 desc_status);
553e2335
ED
1471 dev->stats.rx_errors++;
1472 if (desc_status & 0x0030)
1473 dev->stats.rx_length_errors++;
1474 if (desc_status & 0x0048)
1475 dev->stats.rx_fifo_errors++;
1476 if (desc_status & 0x0004)
1477 dev->stats.rx_frame_errors++;
1da177e4
LT
1478 if (desc_status & 0x0002) {
1479 /* this can also be updated outside the interrupt handler */
1480 spin_lock(&rp->lock);
553e2335 1481 dev->stats.rx_crc_errors++;
1da177e4
LT
1482 spin_unlock(&rp->lock);
1483 }
1484 }
1485 } else {
1486 struct sk_buff *skb;
1487 /* Length should omit the CRC */
1488 int pkt_len = data_size - 4;
1489
1490 /* Check if the packet is long enough to accept without
1491 copying to a minimally-sized skbuff. */
1492 if (pkt_len < rx_copybreak &&
b26b555a
KL
1493 (skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) {
1494 skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */
1da177e4
LT
1495 pci_dma_sync_single_for_cpu(rp->pdev,
1496 rp->rx_skbuff_dma[entry],
1497 rp->rx_buf_sz,
1498 PCI_DMA_FROMDEVICE);
1499
8c7b7faa 1500 skb_copy_to_linear_data(skb,
689be439 1501 rp->rx_skbuff[entry]->data,
8c7b7faa 1502 pkt_len);
1da177e4
LT
1503 skb_put(skb, pkt_len);
1504 pci_dma_sync_single_for_device(rp->pdev,
1505 rp->rx_skbuff_dma[entry],
1506 rp->rx_buf_sz,
1507 PCI_DMA_FROMDEVICE);
1508 } else {
1509 skb = rp->rx_skbuff[entry];
1510 if (skb == NULL) {
1511 printk(KERN_ERR "%s: Inconsistent Rx "
1512 "descriptor chain.\n",
1513 dev->name);
1514 break;
1515 }
1516 rp->rx_skbuff[entry] = NULL;
1517 skb_put(skb, pkt_len);
1518 pci_unmap_single(rp->pdev,
1519 rp->rx_skbuff_dma[entry],
1520 rp->rx_buf_sz,
1521 PCI_DMA_FROMDEVICE);
1522 }
1523 skb->protocol = eth_type_trans(skb, dev);
633949a1 1524 netif_receive_skb(skb);
553e2335
ED
1525 dev->stats.rx_bytes += pkt_len;
1526 dev->stats.rx_packets++;
1da177e4
LT
1527 }
1528 entry = (++rp->cur_rx) % RX_RING_SIZE;
1529 rp->rx_head_desc = &rp->rx_ring[entry];
1530 }
1531
1532 /* Refill the Rx ring buffers. */
1533 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1534 struct sk_buff *skb;
1535 entry = rp->dirty_rx % RX_RING_SIZE;
1536 if (rp->rx_skbuff[entry] == NULL) {
b26b555a 1537 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1da177e4
LT
1538 rp->rx_skbuff[entry] = skb;
1539 if (skb == NULL)
1540 break; /* Better luck next round. */
1541 skb->dev = dev; /* Mark as being used by this device. */
1542 rp->rx_skbuff_dma[entry] =
689be439 1543 pci_map_single(rp->pdev, skb->data,
1da177e4
LT
1544 rp->rx_buf_sz,
1545 PCI_DMA_FROMDEVICE);
1546 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1547 }
1548 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1549 }
633949a1
RL
1550
1551 return count;
1da177e4
LT
1552}
1553
1554/*
1555 * Clears the "tally counters" for CRC errors and missed frames(?).
1556 * It has been reported that some chips need a write of 0 to clear
1557 * these, for others the counters are set to 1 when written to and
1558 * instead cleared when read. So we clear them both ways ...
1559 */
1560static inline void clear_tally_counters(void __iomem *ioaddr)
1561{
1562 iowrite32(0, ioaddr + RxMissed);
1563 ioread16(ioaddr + RxCRCErrs);
1564 ioread16(ioaddr + RxMissed);
1565}
1566
1567static void rhine_restart_tx(struct net_device *dev) {
1568 struct rhine_private *rp = netdev_priv(dev);
1569 void __iomem *ioaddr = rp->base;
1570 int entry = rp->dirty_tx % TX_RING_SIZE;
1571 u32 intr_status;
1572
1573 /*
1574 * If new errors occured, we need to sort them out before doing Tx.
1575 * In that case the ISR will be back here RSN anyway.
1576 */
1577 intr_status = get_intr_status(dev);
1578
1579 if ((intr_status & IntrTxErrSummary) == 0) {
1580
1581 /* We know better than the chip where it should continue. */
1582 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1583 ioaddr + TxRingPtr);
1584
1585 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1586 ioaddr + ChipCmd);
1587 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1588 ioaddr + ChipCmd1);
1589 IOSYNC;
1590 }
1591 else {
1592 /* This should never happen */
1593 if (debug > 1)
1594 printk(KERN_WARNING "%s: rhine_restart_tx() "
1595 "Another error occured %8.8x.\n",
1596 dev->name, intr_status);
1597 }
1598
1599}
1600
1601static void rhine_error(struct net_device *dev, int intr_status)
1602{
1603 struct rhine_private *rp = netdev_priv(dev);
1604 void __iomem *ioaddr = rp->base;
1605
1606 spin_lock(&rp->lock);
1607
1608 if (intr_status & IntrLinkChange)
38bb6b28 1609 rhine_check_media(dev, 0);
1da177e4 1610 if (intr_status & IntrStatsMax) {
553e2335
ED
1611 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1612 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1da177e4
LT
1613 clear_tally_counters(ioaddr);
1614 }
1615 if (intr_status & IntrTxAborted) {
1616 if (debug > 1)
1617 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1618 dev->name, intr_status);
1619 }
1620 if (intr_status & IntrTxUnderrun) {
1621 if (rp->tx_thresh < 0xE0)
1622 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1623 if (debug > 1)
1624 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1625 "threshold now %2.2x.\n",
1626 dev->name, rp->tx_thresh);
1627 }
1628 if (intr_status & IntrTxDescRace) {
1629 if (debug > 2)
1630 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1631 dev->name);
1632 }
1633 if ((intr_status & IntrTxError) &&
1634 (intr_status & (IntrTxAborted |
1635 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1636 if (rp->tx_thresh < 0xE0) {
1637 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1638 }
1639 if (debug > 1)
1640 printk(KERN_INFO "%s: Unspecified error. Tx "
1641 "threshold now %2.2x.\n",
1642 dev->name, rp->tx_thresh);
1643 }
1644 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1645 IntrTxError))
1646 rhine_restart_tx(dev);
1647
1648 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1649 IntrTxError | IntrTxAborted | IntrNormalSummary |
1650 IntrTxDescRace)) {
1651 if (debug > 1)
1652 printk(KERN_ERR "%s: Something Wicked happened! "
1653 "%8.8x.\n", dev->name, intr_status);
1654 }
1655
1656 spin_unlock(&rp->lock);
1657}
1658
1659static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1660{
1661 struct rhine_private *rp = netdev_priv(dev);
1662 void __iomem *ioaddr = rp->base;
1663 unsigned long flags;
1664
1665 spin_lock_irqsave(&rp->lock, flags);
553e2335
ED
1666 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1667 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1da177e4
LT
1668 clear_tally_counters(ioaddr);
1669 spin_unlock_irqrestore(&rp->lock, flags);
1670
553e2335 1671 return &dev->stats;
1da177e4
LT
1672}
1673
1674static void rhine_set_rx_mode(struct net_device *dev)
1675{
1676 struct rhine_private *rp = netdev_priv(dev);
1677 void __iomem *ioaddr = rp->base;
1678 u32 mc_filter[2]; /* Multicast hash filter */
1679 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1680
1681 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1da177e4
LT
1682 rx_mode = 0x1C;
1683 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1684 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1685 } else if ((dev->mc_count > multicast_filter_limit)
1686 || (dev->flags & IFF_ALLMULTI)) {
1687 /* Too many to match, or accept all multicasts. */
1688 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1689 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1690 rx_mode = 0x0C;
1691 } else {
1692 struct dev_mc_list *mclist;
1693 int i;
1694 memset(mc_filter, 0, sizeof(mc_filter));
1695 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1696 i++, mclist = mclist->next) {
1697 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1698
1699 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1700 }
1701 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1702 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1703 rx_mode = 0x0C;
1704 }
1705 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1706}
1707
1708static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1709{
1710 struct rhine_private *rp = netdev_priv(dev);
1711
1712 strcpy(info->driver, DRV_NAME);
1713 strcpy(info->version, DRV_VERSION);
1714 strcpy(info->bus_info, pci_name(rp->pdev));
1715}
1716
1717static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1718{
1719 struct rhine_private *rp = netdev_priv(dev);
1720 int rc;
1721
1722 spin_lock_irq(&rp->lock);
1723 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1724 spin_unlock_irq(&rp->lock);
1725
1726 return rc;
1727}
1728
1729static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1730{
1731 struct rhine_private *rp = netdev_priv(dev);
1732 int rc;
1733
1734 spin_lock_irq(&rp->lock);
1735 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1736 spin_unlock_irq(&rp->lock);
00b428c2 1737 rhine_set_carrier(&rp->mii_if);
1da177e4
LT
1738
1739 return rc;
1740}
1741
1742static int netdev_nway_reset(struct net_device *dev)
1743{
1744 struct rhine_private *rp = netdev_priv(dev);
1745
1746 return mii_nway_restart(&rp->mii_if);
1747}
1748
1749static u32 netdev_get_link(struct net_device *dev)
1750{
1751 struct rhine_private *rp = netdev_priv(dev);
1752
1753 return mii_link_ok(&rp->mii_if);
1754}
1755
1756static u32 netdev_get_msglevel(struct net_device *dev)
1757{
1758 return debug;
1759}
1760
1761static void netdev_set_msglevel(struct net_device *dev, u32 value)
1762{
1763 debug = value;
1764}
1765
1766static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1767{
1768 struct rhine_private *rp = netdev_priv(dev);
1769
1770 if (!(rp->quirks & rqWOL))
1771 return;
1772
1773 spin_lock_irq(&rp->lock);
1774 wol->supported = WAKE_PHY | WAKE_MAGIC |
1775 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1776 wol->wolopts = rp->wolopts;
1777 spin_unlock_irq(&rp->lock);
1778}
1779
1780static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1781{
1782 struct rhine_private *rp = netdev_priv(dev);
1783 u32 support = WAKE_PHY | WAKE_MAGIC |
1784 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1785
1786 if (!(rp->quirks & rqWOL))
1787 return -EINVAL;
1788
1789 if (wol->wolopts & ~support)
1790 return -EINVAL;
1791
1792 spin_lock_irq(&rp->lock);
1793 rp->wolopts = wol->wolopts;
1794 spin_unlock_irq(&rp->lock);
1795
1796 return 0;
1797}
1798
7282d491 1799static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
1800 .get_drvinfo = netdev_get_drvinfo,
1801 .get_settings = netdev_get_settings,
1802 .set_settings = netdev_set_settings,
1803 .nway_reset = netdev_nway_reset,
1804 .get_link = netdev_get_link,
1805 .get_msglevel = netdev_get_msglevel,
1806 .set_msglevel = netdev_set_msglevel,
1807 .get_wol = rhine_get_wol,
1808 .set_wol = rhine_set_wol,
1da177e4
LT
1809};
1810
1811static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1812{
1813 struct rhine_private *rp = netdev_priv(dev);
1814 int rc;
1815
1816 if (!netif_running(dev))
1817 return -EINVAL;
1818
1819 spin_lock_irq(&rp->lock);
1820 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1821 spin_unlock_irq(&rp->lock);
00b428c2 1822 rhine_set_carrier(&rp->mii_if);
1da177e4
LT
1823
1824 return rc;
1825}
1826
1827static int rhine_close(struct net_device *dev)
1828{
1829 struct rhine_private *rp = netdev_priv(dev);
1830 void __iomem *ioaddr = rp->base;
1831
1832 spin_lock_irq(&rp->lock);
1833
1834 netif_stop_queue(dev);
bea3348e 1835 napi_disable(&rp->napi);
1da177e4
LT
1836
1837 if (debug > 1)
1838 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1839 "status was %4.4x.\n",
1840 dev->name, ioread16(ioaddr + ChipCmd));
1841
1842 /* Switch to loopback mode to avoid hardware races. */
1843 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1844
1845 /* Disable interrupts by clearing the interrupt mask. */
1846 iowrite16(0x0000, ioaddr + IntrEnable);
1847
1848 /* Stop the chip's Tx and Rx processes. */
1849 iowrite16(CmdStop, ioaddr + ChipCmd);
1850
1851 spin_unlock_irq(&rp->lock);
1852
1853 free_irq(rp->pdev->irq, dev);
1854 free_rbufs(dev);
1855 free_tbufs(dev);
1856 free_ring(dev);
1857
1858 return 0;
1859}
1860
1861
1862static void __devexit rhine_remove_one(struct pci_dev *pdev)
1863{
1864 struct net_device *dev = pci_get_drvdata(pdev);
1865 struct rhine_private *rp = netdev_priv(dev);
1866
1867 unregister_netdev(dev);
1868
1869 pci_iounmap(pdev, rp->base);
1870 pci_release_regions(pdev);
1871
1872 free_netdev(dev);
1873 pci_disable_device(pdev);
1874 pci_set_drvdata(pdev, NULL);
1875}
1876
d18c3db5 1877static void rhine_shutdown (struct pci_dev *pdev)
1da177e4 1878{
1da177e4
LT
1879 struct net_device *dev = pci_get_drvdata(pdev);
1880 struct rhine_private *rp = netdev_priv(dev);
1881 void __iomem *ioaddr = rp->base;
1882
1883 if (!(rp->quirks & rqWOL))
1884 return; /* Nothing to do for non-WOL adapters */
1885
1886 rhine_power_init(dev);
1887
1888 /* Make sure we use pattern 0, 1 and not 4, 5 */
1889 if (rp->quirks & rq6patterns)
f11cf25e 1890 iowrite8(0x04, ioaddr + WOLcgClr);
1da177e4
LT
1891
1892 if (rp->wolopts & WAKE_MAGIC) {
1893 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1894 /*
1895 * Turn EEPROM-controlled wake-up back on -- some hardware may
1896 * not cooperate otherwise.
1897 */
1898 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1899 }
1900
1901 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1902 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1903
1904 if (rp->wolopts & WAKE_PHY)
1905 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1906
1907 if (rp->wolopts & WAKE_UCAST)
1908 iowrite8(WOLucast, ioaddr + WOLcrSet);
1909
1910 if (rp->wolopts) {
1911 /* Enable legacy WOL (for old motherboards) */
1912 iowrite8(0x01, ioaddr + PwcfgSet);
1913 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1914 }
1915
1916 /* Hit power state D3 (sleep) */
b933b4d9
RL
1917 if (!avoid_D3)
1918 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1da177e4
LT
1919
1920 /* TODO: Check use of pci_enable_wake() */
1921
1922}
1923
1924#ifdef CONFIG_PM
1925static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1926{
1927 struct net_device *dev = pci_get_drvdata(pdev);
1928 struct rhine_private *rp = netdev_priv(dev);
1929 unsigned long flags;
1930
1931 if (!netif_running(dev))
1932 return 0;
1933
bea3348e 1934 napi_disable(&rp->napi);
32b0f53e 1935
1da177e4
LT
1936 netif_device_detach(dev);
1937 pci_save_state(pdev);
1938
1939 spin_lock_irqsave(&rp->lock, flags);
d18c3db5 1940 rhine_shutdown(pdev);
1da177e4
LT
1941 spin_unlock_irqrestore(&rp->lock, flags);
1942
1943 free_irq(dev->irq, dev);
1944 return 0;
1945}
1946
1947static int rhine_resume(struct pci_dev *pdev)
1948{
1949 struct net_device *dev = pci_get_drvdata(pdev);
1950 struct rhine_private *rp = netdev_priv(dev);
1951 unsigned long flags;
1952 int ret;
1953
1954 if (!netif_running(dev))
1955 return 0;
1956
1fb9df5d 1957 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
1da177e4
LT
1958 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1959
1960 ret = pci_set_power_state(pdev, PCI_D0);
1961 if (debug > 1)
1962 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1963 dev->name, ret ? "failed" : "succeeded", ret);
1964
1965 pci_restore_state(pdev);
1966
1967 spin_lock_irqsave(&rp->lock, flags);
1968#ifdef USE_MMIO
1969 enable_mmio(rp->pioaddr, rp->quirks);
1970#endif
1971 rhine_power_init(dev);
1972 free_tbufs(dev);
1973 free_rbufs(dev);
1974 alloc_tbufs(dev);
1975 alloc_rbufs(dev);
1976 init_registers(dev);
1977 spin_unlock_irqrestore(&rp->lock, flags);
1978
1979 netif_device_attach(dev);
1980
1981 return 0;
1982}
1983#endif /* CONFIG_PM */
1984
1985static struct pci_driver rhine_driver = {
1986 .name = DRV_NAME,
1987 .id_table = rhine_pci_tbl,
1988 .probe = rhine_init_one,
1989 .remove = __devexit_p(rhine_remove_one),
1990#ifdef CONFIG_PM
1991 .suspend = rhine_suspend,
1992 .resume = rhine_resume,
1993#endif /* CONFIG_PM */
d18c3db5 1994 .shutdown = rhine_shutdown,
1da177e4
LT
1995};
1996
e84df485
RL
1997static struct dmi_system_id __initdata rhine_dmi_table[] = {
1998 {
1999 .ident = "EPIA-M",
2000 .matches = {
2001 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2002 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2003 },
2004 },
2005 {
2006 .ident = "KV7",
2007 .matches = {
2008 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2009 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2010 },
2011 },
2012 { NULL }
2013};
1da177e4
LT
2014
2015static int __init rhine_init(void)
2016{
2017/* when a module, this is printed whether or not devices are found in probe */
2018#ifdef MODULE
2019 printk(version);
2020#endif
e84df485
RL
2021 if (dmi_check_system(rhine_dmi_table)) {
2022 /* these BIOSes fail at PXE boot if chip is in D3 */
2023 avoid_D3 = 1;
2024 printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 "
2025 "enabled.\n",
2026 DRV_NAME);
2027 }
2028 else if (avoid_D3)
2029 printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME);
2030
29917620 2031 return pci_register_driver(&rhine_driver);
1da177e4
LT
2032}
2033
2034
2035static void __exit rhine_cleanup(void)
2036{
2037 pci_unregister_driver(&rhine_driver);
2038}
2039
2040
2041module_init(rhine_init);
2042module_exit(rhine_cleanup);