]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/epic100.c
[PATCH] irq-flags: drivers/net: Use the new IRQF_ constants
[net-next-2.6.git] / drivers / net / epic100.c
CommitLineData
1da177e4
LT
1/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2/*
3 Written/copyright 1997-2001 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
14
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
18 Annapolis MD 21403
19
20 Information and updates available at
21 http://www.scyld.com/network/epic100.html
22
23 ---------------------------------------------------------------------
f3b197ac 24
1da177e4 25 Linux kernel-specific changes:
f3b197ac 26
1da177e4
LT
27 LK1.1.2 (jgarzik):
28 * Merge becker version 1.09 (4/08/2000)
29
30 LK1.1.3:
31 * Major bugfix to 1.09 driver (Francis Romieu)
f3b197ac 32
1da177e4
LT
33 LK1.1.4 (jgarzik):
34 * Merge becker test version 1.09 (5/29/2000)
35
36 LK1.1.5:
37 * Fix locking (jgarzik)
38 * Limit 83c175 probe to ethernet-class PCI devices (rgooch)
39
40 LK1.1.6:
41 * Merge becker version 1.11
42 * Move pci_enable_device before any PCI BAR len checks
43
44 LK1.1.7:
45 * { fill me in }
46
47 LK1.1.8:
48 * ethtool driver info support (jgarzik)
49
50 LK1.1.9:
51 * ethtool media get/set support (jgarzik)
52
53 LK1.1.10:
54 * revert MII transceiver init change (jgarzik)
55
56 LK1.1.11:
57 * implement ETHTOOL_[GS]SET, _NWAY_RST, _[GS]MSGLVL, _GLINK (jgarzik)
58 * replace some MII-related magic numbers with constants
59
60 LK1.1.12:
61 * fix power-up sequence
62
63 LK1.1.13:
64 * revert version 1.1.12, power-up sequence "fix"
65
66 LK1.1.14 (Kryzsztof Halasa):
67 * fix spurious bad initializations
68 * pound phy a la SMSC's app note on the subject
f3b197ac 69
1da177e4
LT
70 AC1.1.14ac
71 * fix power up/down for ethtool that broke in 1.11
72
73*/
74
75#define DRV_NAME "epic100"
76#define DRV_VERSION "1.11+LK1.1.14+AC1.1.14"
77#define DRV_RELDATE "June 2, 2004"
78
79/* The user-configurable values.
80 These may be modified when a driver module is loaded.*/
81
82static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
83
84/* Used to pass the full-duplex flag, etc. */
85#define MAX_UNITS 8 /* More are supported, limit only on options */
86static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
87static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
88
89/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
90 Setting to > 1518 effectively disables this feature. */
91static int rx_copybreak;
92
93/* Operational parameters that are set at compile time. */
94
95/* Keep the ring sizes a power of two for operational efficiency.
96 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
97 Making the Tx ring too large decreases the effectiveness of channel
98 bonding and packet priority.
99 There are no ill effects from too-large receive rings. */
100#define TX_RING_SIZE 256
101#define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */
102#define RX_RING_SIZE 256
103#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
104#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
105
106/* Operational parameters that usually are not changed. */
107/* Time in jiffies before concluding the transmitter is hung. */
108#define TX_TIMEOUT (2*HZ)
109
110#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
111
112/* Bytes transferred to chip before transmission starts. */
113/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
114#define TX_FIFO_THRESH 256
115#define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
116
1da177e4
LT
117#include <linux/module.h>
118#include <linux/kernel.h>
119#include <linux/string.h>
120#include <linux/timer.h>
121#include <linux/errno.h>
122#include <linux/ioport.h>
123#include <linux/slab.h>
124#include <linux/interrupt.h>
125#include <linux/pci.h>
126#include <linux/delay.h>
127#include <linux/netdevice.h>
128#include <linux/etherdevice.h>
129#include <linux/skbuff.h>
130#include <linux/init.h>
131#include <linux/spinlock.h>
132#include <linux/ethtool.h>
133#include <linux/mii.h>
134#include <linux/crc32.h>
135#include <linux/bitops.h>
136#include <asm/io.h>
137#include <asm/uaccess.h>
138
139/* These identify the driver base version and may not be removed. */
140static char version[] __devinitdata =
141DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
142static char version2[] __devinitdata =
143" http://www.scyld.com/network/epic100.html\n";
144static char version3[] __devinitdata =
145" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
146
147MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
148MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
149MODULE_LICENSE("GPL");
150
151module_param(debug, int, 0);
152module_param(rx_copybreak, int, 0);
153module_param_array(options, int, NULL, 0);
154module_param_array(full_duplex, int, NULL, 0);
155MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
156MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
157MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
158MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
159
160/*
161 Theory of Operation
162
163I. Board Compatibility
164
165This device driver is designed for the SMC "EPIC/100", the SMC
166single-chip Ethernet controllers for PCI. This chip is used on
167the SMC EtherPower II boards.
168
169II. Board-specific settings
170
171PCI bus devices are configured by the system at boot time, so no jumpers
172need to be set on the board. The system BIOS will assign the
173PCI INTA signal to a (preferably otherwise unused) system IRQ line.
174Note: Kernel versions earlier than 1.3.73 do not support shared PCI
175interrupt lines.
176
177III. Driver operation
178
179IIIa. Ring buffers
180
181IVb. References
182
183http://www.smsc.com/main/datasheets/83c171.pdf
184http://www.smsc.com/main/datasheets/83c175.pdf
185http://scyld.com/expert/NWay.html
186http://www.national.com/pf/DP/DP83840A.html
187
188IVc. Errata
189
190*/
191
192
1da177e4
LT
193enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
194
195#define EPIC_TOTAL_SIZE 0x100
196#define USE_IO_OPS 1
1da177e4
LT
197
198typedef enum {
199 SMSC_83C170_0,
200 SMSC_83C170,
201 SMSC_83C175,
202} chip_t;
203
204
205struct epic_chip_info {
206 const char *name;
1da177e4
LT
207 int io_size; /* Needed for I/O region check or ioremap(). */
208 int drv_flags; /* Driver use, intended as capability flags. */
209};
210
211
212/* indexed by chip_t */
f71e1309 213static const struct epic_chip_info pci_id_tbl[] = {
1da177e4 214 { "SMSC EPIC/100 83c170",
1f1bd5fc 215 EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
1da177e4 216 { "SMSC EPIC/100 83c170",
1f1bd5fc 217 EPIC_TOTAL_SIZE, TYPE2_INTR },
1da177e4 218 { "SMSC EPIC/C 83c175",
1f1bd5fc 219 EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
1da177e4
LT
220};
221
222
223static struct pci_device_id epic_pci_tbl[] = {
224 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
225 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
226 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
227 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
228 { 0,}
229};
230MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
231
f3b197ac 232
1da177e4
LT
233#ifndef USE_IO_OPS
234#undef inb
235#undef inw
236#undef inl
237#undef outb
238#undef outw
239#undef outl
240#define inb readb
241#define inw readw
242#define inl readl
243#define outb writeb
244#define outw writew
245#define outl writel
246#endif
247
248/* Offsets to registers, using the (ugh) SMC names. */
249enum epic_registers {
250 COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
251 PCIBurstCnt=0x18,
252 TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
253 MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
254 LAN0=64, /* MAC address. */
255 MC0=80, /* Multicast filter table. */
256 RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
257 PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
258};
259
260/* Interrupt register bits, using my own meaningful names. */
261enum IntrStatus {
262 TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
263 PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
264 RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
265 TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
266 RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
267};
268enum CommandBits {
269 StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
270 StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
271};
272
273#define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */
274
275#define EpicNapiEvent (TxEmpty | TxDone | \
276 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
277#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
278
f71e1309 279static const u16 media2miictl[16] = {
1da177e4
LT
280 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
281 0, 0, 0, 0, 0, 0, 0, 0 };
282
283/* The EPIC100 Rx and Tx buffer descriptors. */
284
285struct epic_tx_desc {
286 u32 txstatus;
287 u32 bufaddr;
288 u32 buflength;
289 u32 next;
290};
291
292struct epic_rx_desc {
293 u32 rxstatus;
294 u32 bufaddr;
295 u32 buflength;
296 u32 next;
297};
298
299enum desc_status_bits {
300 DescOwn=0x8000,
301};
302
303#define PRIV_ALIGN 15 /* Required alignment mask */
304struct epic_private {
305 struct epic_rx_desc *rx_ring;
306 struct epic_tx_desc *tx_ring;
307 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
308 struct sk_buff* tx_skbuff[TX_RING_SIZE];
309 /* The addresses of receive-in-place skbuffs. */
310 struct sk_buff* rx_skbuff[RX_RING_SIZE];
311
312 dma_addr_t tx_ring_dma;
313 dma_addr_t rx_ring_dma;
314
315 /* Ring pointers. */
316 spinlock_t lock; /* Group with Tx control cache line. */
317 spinlock_t napi_lock;
318 unsigned int reschedule_in_poll;
319 unsigned int cur_tx, dirty_tx;
320
321 unsigned int cur_rx, dirty_rx;
322 u32 irq_mask;
323 unsigned int rx_buf_sz; /* Based on MTU+slack. */
324
325 struct pci_dev *pci_dev; /* PCI bus location. */
326 int chip_id, chip_flags;
327
328 struct net_device_stats stats;
329 struct timer_list timer; /* Media selection timer. */
330 int tx_threshold;
331 unsigned char mc_filter[8];
332 signed char phys[4]; /* MII device addresses. */
333 u16 advertising; /* NWay media advertisement */
334 int mii_phy_cnt;
335 struct mii_if_info mii;
336 unsigned int tx_full:1; /* The Tx queue is full. */
337 unsigned int default_port:4; /* Last dev->if_port value. */
338};
339
340static int epic_open(struct net_device *dev);
341static int read_eeprom(long ioaddr, int location);
342static int mdio_read(struct net_device *dev, int phy_id, int location);
343static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
344static void epic_restart(struct net_device *dev);
345static void epic_timer(unsigned long data);
346static void epic_tx_timeout(struct net_device *dev);
347static void epic_init_ring(struct net_device *dev);
348static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
349static int epic_rx(struct net_device *dev, int budget);
350static int epic_poll(struct net_device *dev, int *budget);
351static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
352static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
353static struct ethtool_ops netdev_ethtool_ops;
354static int epic_close(struct net_device *dev);
355static struct net_device_stats *epic_get_stats(struct net_device *dev);
356static void set_rx_mode(struct net_device *dev);
357
f3b197ac 358
1da177e4
LT
359
360static int __devinit epic_init_one (struct pci_dev *pdev,
361 const struct pci_device_id *ent)
362{
363 static int card_idx = -1;
364 long ioaddr;
365 int chip_idx = (int) ent->driver_data;
366 int irq;
367 struct net_device *dev;
368 struct epic_private *ep;
369 int i, ret, option = 0, duplex = 0;
370 void *ring_space;
371 dma_addr_t ring_dma;
372
373/* when built into the kernel, we only print version if device is found */
374#ifndef MODULE
375 static int printed_version;
376 if (!printed_version++)
377 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
378 version, version2, version3);
379#endif
f3b197ac 380
1da177e4 381 card_idx++;
f3b197ac 382
1da177e4
LT
383 ret = pci_enable_device(pdev);
384 if (ret)
385 goto out;
386 irq = pdev->irq;
387
388 if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
389 printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
390 ret = -ENODEV;
391 goto err_out_disable;
392 }
f3b197ac 393
1da177e4
LT
394 pci_set_master(pdev);
395
396 ret = pci_request_regions(pdev, DRV_NAME);
397 if (ret < 0)
398 goto err_out_disable;
399
400 ret = -ENOMEM;
401
402 dev = alloc_etherdev(sizeof (*ep));
403 if (!dev) {
404 printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
405 goto err_out_free_res;
406 }
407 SET_MODULE_OWNER(dev);
408 SET_NETDEV_DEV(dev, &pdev->dev);
409
410#ifdef USE_IO_OPS
411 ioaddr = pci_resource_start (pdev, 0);
412#else
413 ioaddr = pci_resource_start (pdev, 1);
414 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
415 if (!ioaddr) {
416 printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
417 goto err_out_free_netdev;
418 }
419#endif
420
421 pci_set_drvdata(pdev, dev);
422 ep = dev->priv;
423 ep->mii.dev = dev;
424 ep->mii.mdio_read = mdio_read;
425 ep->mii.mdio_write = mdio_write;
426 ep->mii.phy_id_mask = 0x1f;
427 ep->mii.reg_num_mask = 0x1f;
428
429 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
430 if (!ring_space)
431 goto err_out_iounmap;
432 ep->tx_ring = (struct epic_tx_desc *)ring_space;
433 ep->tx_ring_dma = ring_dma;
434
435 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
436 if (!ring_space)
437 goto err_out_unmap_tx;
438 ep->rx_ring = (struct epic_rx_desc *)ring_space;
439 ep->rx_ring_dma = ring_dma;
440
441 if (dev->mem_start) {
442 option = dev->mem_start;
443 duplex = (dev->mem_start & 16) ? 1 : 0;
444 } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
445 if (options[card_idx] >= 0)
446 option = options[card_idx];
447 if (full_duplex[card_idx] >= 0)
448 duplex = full_duplex[card_idx];
449 }
450
451 dev->base_addr = ioaddr;
452 dev->irq = irq;
453
454 spin_lock_init(&ep->lock);
455 spin_lock_init(&ep->napi_lock);
456 ep->reschedule_in_poll = 0;
457
458 /* Bring the chip out of low-power mode. */
459 outl(0x4200, ioaddr + GENCTL);
460 /* Magic?! If we don't set this bit the MII interface won't work. */
461 /* This magic is documented in SMSC app note 7.15 */
462 for (i = 16; i > 0; i--)
463 outl(0x0008, ioaddr + TEST1);
464
465 /* Turn on the MII transceiver. */
466 outl(0x12, ioaddr + MIICfg);
467 if (chip_idx == 1)
468 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
469 outl(0x0200, ioaddr + GENCTL);
470
471 /* Note: the '175 does not have a serial EEPROM. */
472 for (i = 0; i < 3; i++)
473 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
474
475 if (debug > 2) {
476 printk(KERN_DEBUG DRV_NAME "(%s): EEPROM contents\n",
477 pci_name(pdev));
478 for (i = 0; i < 64; i++)
479 printk(" %4.4x%s", read_eeprom(ioaddr, i),
480 i % 16 == 15 ? "\n" : "");
481 }
482
483 ep->pci_dev = pdev;
484 ep->chip_id = chip_idx;
485 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
f3b197ac 486 ep->irq_mask =
1da177e4
LT
487 (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
488 | CntFull | TxUnderrun | EpicNapiEvent;
489
490 /* Find the connected MII xcvrs.
491 Doing this in open() would allow detecting external xcvrs later, but
492 takes much time and no cards have external MII. */
493 {
494 int phy, phy_idx = 0;
495 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
496 int mii_status = mdio_read(dev, phy, MII_BMSR);
497 if (mii_status != 0xffff && mii_status != 0x0000) {
498 ep->phys[phy_idx++] = phy;
499 printk(KERN_INFO DRV_NAME "(%s): MII transceiver #%d control "
500 "%4.4x status %4.4x.\n",
501 pci_name(pdev), phy, mdio_read(dev, phy, 0), mii_status);
502 }
503 }
504 ep->mii_phy_cnt = phy_idx;
505 if (phy_idx != 0) {
506 phy = ep->phys[0];
507 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
508 printk(KERN_INFO DRV_NAME "(%s): Autonegotiation advertising %4.4x link "
509 "partner %4.4x.\n",
510 pci_name(pdev), ep->mii.advertising, mdio_read(dev, phy, 5));
511 } else if ( ! (ep->chip_flags & NO_MII)) {
512 printk(KERN_WARNING DRV_NAME "(%s): ***WARNING***: No MII transceiver found!\n",
513 pci_name(pdev));
514 /* Use the known PHY address of the EPII. */
515 ep->phys[0] = 3;
516 }
517 ep->mii.phy_id = ep->phys[0];
518 }
519
520 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
521 if (ep->chip_flags & MII_PWRDWN)
522 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
523 outl(0x0008, ioaddr + GENCTL);
524
525 /* The lower four bits are the media type. */
526 if (duplex) {
527 ep->mii.force_media = ep->mii.full_duplex = 1;
528 printk(KERN_INFO DRV_NAME "(%s): Forced full duplex operation requested.\n",
529 pci_name(pdev));
530 }
531 dev->if_port = ep->default_port = option;
532
533 /* The Epic-specific entries in the device structure. */
534 dev->open = &epic_open;
535 dev->hard_start_xmit = &epic_start_xmit;
536 dev->stop = &epic_close;
537 dev->get_stats = &epic_get_stats;
538 dev->set_multicast_list = &set_rx_mode;
539 dev->do_ioctl = &netdev_ioctl;
540 dev->ethtool_ops = &netdev_ethtool_ops;
541 dev->watchdog_timeo = TX_TIMEOUT;
542 dev->tx_timeout = &epic_tx_timeout;
543 dev->poll = epic_poll;
544 dev->weight = 64;
545
546 ret = register_netdev(dev);
547 if (ret < 0)
548 goto err_out_unmap_rx;
549
550 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
551 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
552 for (i = 0; i < 5; i++)
553 printk("%2.2x:", dev->dev_addr[i]);
554 printk("%2.2x.\n", dev->dev_addr[i]);
555
556out:
557 return ret;
558
559err_out_unmap_rx:
560 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
561err_out_unmap_tx:
562 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
563err_out_iounmap:
564#ifndef USE_IO_OPS
565 iounmap(ioaddr);
566err_out_free_netdev:
567#endif
568 free_netdev(dev);
569err_out_free_res:
570 pci_release_regions(pdev);
571err_out_disable:
572 pci_disable_device(pdev);
573 goto out;
574}
f3b197ac 575
1da177e4
LT
576/* Serial EEPROM section. */
577
578/* EEPROM_Ctrl bits. */
579#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
580#define EE_CS 0x02 /* EEPROM chip select. */
581#define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
582#define EE_WRITE_0 0x01
583#define EE_WRITE_1 0x09
584#define EE_DATA_READ 0x10 /* EEPROM chip data out. */
585#define EE_ENB (0x0001 | EE_CS)
586
587/* Delay between EEPROM clock transitions.
588 This serves to flush the operation to the PCI bus.
589 */
590
591#define eeprom_delay() inl(ee_addr)
592
593/* The EEPROM commands include the alway-set leading bit. */
594#define EE_WRITE_CMD (5 << 6)
595#define EE_READ64_CMD (6 << 6)
596#define EE_READ256_CMD (6 << 8)
597#define EE_ERASE_CMD (7 << 6)
598
599static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
600{
601 long ioaddr = dev->base_addr;
602
603 outl(0x00000000, ioaddr + INTMASK);
604}
605
606static inline void __epic_pci_commit(long ioaddr)
607{
608#ifndef USE_IO_OPS
609 inl(ioaddr + INTMASK);
610#endif
611}
612
613static inline void epic_napi_irq_off(struct net_device *dev,
614 struct epic_private *ep)
615{
616 long ioaddr = dev->base_addr;
617
618 outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
619 __epic_pci_commit(ioaddr);
620}
621
622static inline void epic_napi_irq_on(struct net_device *dev,
623 struct epic_private *ep)
624{
625 long ioaddr = dev->base_addr;
626
627 /* No need to commit possible posted write */
628 outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
629}
630
631static int __devinit read_eeprom(long ioaddr, int location)
632{
633 int i;
634 int retval = 0;
635 long ee_addr = ioaddr + EECTL;
636 int read_cmd = location |
637 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
638
639 outl(EE_ENB & ~EE_CS, ee_addr);
640 outl(EE_ENB, ee_addr);
641
642 /* Shift the read command bits out. */
643 for (i = 12; i >= 0; i--) {
644 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
645 outl(EE_ENB | dataval, ee_addr);
646 eeprom_delay();
647 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
648 eeprom_delay();
649 }
650 outl(EE_ENB, ee_addr);
651
652 for (i = 16; i > 0; i--) {
653 outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
654 eeprom_delay();
655 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
656 outl(EE_ENB, ee_addr);
657 eeprom_delay();
658 }
659
660 /* Terminate the EEPROM access. */
661 outl(EE_ENB & ~EE_CS, ee_addr);
662 return retval;
663}
664
665#define MII_READOP 1
666#define MII_WRITEOP 2
667static int mdio_read(struct net_device *dev, int phy_id, int location)
668{
669 long ioaddr = dev->base_addr;
670 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
671 int i;
672
673 outl(read_cmd, ioaddr + MIICtrl);
674 /* Typical operation takes 25 loops. */
675 for (i = 400; i > 0; i--) {
676 barrier();
677 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
678 /* Work around read failure bug. */
679 if (phy_id == 1 && location < 6
680 && inw(ioaddr + MIIData) == 0xffff) {
681 outl(read_cmd, ioaddr + MIICtrl);
682 continue;
683 }
684 return inw(ioaddr + MIIData);
685 }
686 }
687 return 0xffff;
688}
689
690static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
691{
692 long ioaddr = dev->base_addr;
693 int i;
694
695 outw(value, ioaddr + MIIData);
696 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
f3b197ac 697 for (i = 10000; i > 0; i--) {
1da177e4
LT
698 barrier();
699 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
700 break;
701 }
702 return;
703}
704
f3b197ac 705
1da177e4
LT
706static int epic_open(struct net_device *dev)
707{
708 struct epic_private *ep = dev->priv;
709 long ioaddr = dev->base_addr;
710 int i;
711 int retval;
712
713 /* Soft reset the chip. */
714 outl(0x4001, ioaddr + GENCTL);
715
1fb9df5d 716 if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev)))
1da177e4
LT
717 return retval;
718
719 epic_init_ring(dev);
720
721 outl(0x4000, ioaddr + GENCTL);
722 /* This magic is documented in SMSC app note 7.15 */
723 for (i = 16; i > 0; i--)
724 outl(0x0008, ioaddr + TEST1);
725
726 /* Pull the chip out of low-power mode, enable interrupts, and set for
727 PCI read multiple. The MIIcfg setting and strange write order are
728 required by the details of which bits are reset and the transceiver
729 wiring on the Ositech CardBus card.
730 */
731#if 0
732 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
733#endif
734 if (ep->chip_flags & MII_PWRDWN)
735 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
736
737#if defined(__powerpc__) || defined(__sparc__) /* Big endian */
738 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
739 inl(ioaddr + GENCTL);
740 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
741#else
742 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
743 inl(ioaddr + GENCTL);
744 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
745#endif
746
747 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
f3b197ac 748
1da177e4
LT
749 for (i = 0; i < 3; i++)
750 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
751
752 ep->tx_threshold = TX_FIFO_THRESH;
753 outl(ep->tx_threshold, ioaddr + TxThresh);
754
755 if (media2miictl[dev->if_port & 15]) {
756 if (ep->mii_phy_cnt)
757 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
758 if (dev->if_port == 1) {
759 if (debug > 1)
760 printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
761 "status %4.4x.\n",
762 dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
763 }
764 } else {
765 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
766 if (mii_lpa != 0xffff) {
767 if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
768 ep->mii.full_duplex = 1;
769 else if (! (mii_lpa & LPA_LPACK))
770 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
771 if (debug > 1)
772 printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
773 " register read of %4.4x.\n", dev->name,
774 ep->mii.full_duplex ? "full" : "half",
775 ep->phys[0], mii_lpa);
776 }
777 }
778
779 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
780 outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
781 outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
782
783 /* Start the chip's Rx process. */
784 set_rx_mode(dev);
785 outl(StartRx | RxQueued, ioaddr + COMMAND);
786
787 netif_start_queue(dev);
788
789 /* Enable interrupts by setting the interrupt mask. */
790 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
f3b197ac 791 | CntFull | TxUnderrun
1da177e4
LT
792 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
793
794 if (debug > 1)
795 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
796 "%s-duplex.\n",
797 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
798 ep->mii.full_duplex ? "full" : "half");
799
800 /* Set the timer to switch to check for link beat and perhaps switch
801 to an alternate media type. */
802 init_timer(&ep->timer);
803 ep->timer.expires = jiffies + 3*HZ;
804 ep->timer.data = (unsigned long)dev;
805 ep->timer.function = &epic_timer; /* timer handler */
806 add_timer(&ep->timer);
807
808 return 0;
809}
810
811/* Reset the chip to recover from a PCI transaction error.
812 This may occur at interrupt time. */
813static void epic_pause(struct net_device *dev)
814{
815 long ioaddr = dev->base_addr;
816 struct epic_private *ep = dev->priv;
817
818 netif_stop_queue (dev);
f3b197ac 819
1da177e4
LT
820 /* Disable interrupts by clearing the interrupt mask. */
821 outl(0x00000000, ioaddr + INTMASK);
822 /* Stop the chip's Tx and Rx DMA processes. */
823 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
824
825 /* Update the error counts. */
826 if (inw(ioaddr + COMMAND) != 0xffff) {
827 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
828 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
829 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
830 }
831
832 /* Remove the packets on the Rx queue. */
833 epic_rx(dev, RX_RING_SIZE);
834}
835
836static void epic_restart(struct net_device *dev)
837{
838 long ioaddr = dev->base_addr;
839 struct epic_private *ep = dev->priv;
840 int i;
841
842 /* Soft reset the chip. */
843 outl(0x4001, ioaddr + GENCTL);
844
845 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
846 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
847 udelay(1);
848
849 /* This magic is documented in SMSC app note 7.15 */
850 for (i = 16; i > 0; i--)
851 outl(0x0008, ioaddr + TEST1);
852
853#if defined(__powerpc__) || defined(__sparc__) /* Big endian */
854 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
855#else
856 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
857#endif
858 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
859 if (ep->chip_flags & MII_PWRDWN)
860 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
861
862 for (i = 0; i < 3; i++)
863 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
864
865 ep->tx_threshold = TX_FIFO_THRESH;
866 outl(ep->tx_threshold, ioaddr + TxThresh);
867 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
868 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
869 sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
870 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
871 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
872
873 /* Start the chip's Rx process. */
874 set_rx_mode(dev);
875 outl(StartRx | RxQueued, ioaddr + COMMAND);
876
877 /* Enable interrupts by setting the interrupt mask. */
878 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
879 | CntFull | TxUnderrun
880 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
881
882 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
883 " interrupt %4.4x.\n",
884 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
885 (int)inl(ioaddr + INTSTAT));
886 return;
887}
888
889static void check_media(struct net_device *dev)
890{
891 struct epic_private *ep = dev->priv;
892 long ioaddr = dev->base_addr;
893 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
894 int negotiated = mii_lpa & ep->mii.advertising;
895 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
896
897 if (ep->mii.force_media)
898 return;
899 if (mii_lpa == 0xffff) /* Bogus read */
900 return;
901 if (ep->mii.full_duplex != duplex) {
902 ep->mii.full_duplex = duplex;
903 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
904 " partner capability of %4.4x.\n", dev->name,
905 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
906 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
907 }
908}
909
910static void epic_timer(unsigned long data)
911{
912 struct net_device *dev = (struct net_device *)data;
913 struct epic_private *ep = dev->priv;
914 long ioaddr = dev->base_addr;
915 int next_tick = 5*HZ;
916
917 if (debug > 3) {
918 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
919 dev->name, (int)inl(ioaddr + TxSTAT));
920 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
921 "IntStatus %4.4x RxStatus %4.4x.\n",
922 dev->name, (int)inl(ioaddr + INTMASK),
923 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
924 }
925
926 check_media(dev);
927
928 ep->timer.expires = jiffies + next_tick;
929 add_timer(&ep->timer);
930}
931
932static void epic_tx_timeout(struct net_device *dev)
933{
934 struct epic_private *ep = dev->priv;
935 long ioaddr = dev->base_addr;
936
937 if (debug > 0) {
938 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
939 "Tx status %4.4x.\n",
940 dev->name, (int)inw(ioaddr + TxSTAT));
941 if (debug > 1) {
942 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
943 dev->name, ep->dirty_tx, ep->cur_tx);
944 }
945 }
946 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
947 ep->stats.tx_fifo_errors++;
948 outl(RestartTx, ioaddr + COMMAND);
949 } else {
950 epic_restart(dev);
951 outl(TxQueued, dev->base_addr + COMMAND);
952 }
953
954 dev->trans_start = jiffies;
955 ep->stats.tx_errors++;
956 if (!ep->tx_full)
957 netif_wake_queue(dev);
958}
959
960/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
961static void epic_init_ring(struct net_device *dev)
962{
963 struct epic_private *ep = dev->priv;
964 int i;
965
966 ep->tx_full = 0;
967 ep->dirty_tx = ep->cur_tx = 0;
968 ep->cur_rx = ep->dirty_rx = 0;
969 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
970
971 /* Initialize all Rx descriptors. */
972 for (i = 0; i < RX_RING_SIZE; i++) {
973 ep->rx_ring[i].rxstatus = 0;
974 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
f3b197ac 975 ep->rx_ring[i].next = ep->rx_ring_dma +
1da177e4
LT
976 (i+1)*sizeof(struct epic_rx_desc);
977 ep->rx_skbuff[i] = NULL;
978 }
979 /* Mark the last entry as wrapping the ring. */
980 ep->rx_ring[i-1].next = ep->rx_ring_dma;
981
982 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
983 for (i = 0; i < RX_RING_SIZE; i++) {
984 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
985 ep->rx_skbuff[i] = skb;
986 if (skb == NULL)
987 break;
988 skb->dev = dev; /* Mark as being used by this device. */
989 skb_reserve(skb, 2); /* 16 byte align the IP header. */
f3b197ac 990 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
689be439 991 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
992 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
993 }
994 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
995
996 /* The Tx buffer descriptor is filled in as needed, but we
997 do need to clear the ownership bit. */
998 for (i = 0; i < TX_RING_SIZE; i++) {
999 ep->tx_skbuff[i] = NULL;
1000 ep->tx_ring[i].txstatus = 0x0000;
f3b197ac 1001 ep->tx_ring[i].next = ep->tx_ring_dma +
1da177e4
LT
1002 (i+1)*sizeof(struct epic_tx_desc);
1003 }
1004 ep->tx_ring[i-1].next = ep->tx_ring_dma;
1005 return;
1006}
1007
1008static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
1009{
1010 struct epic_private *ep = dev->priv;
1011 int entry, free_count;
1012 u32 ctrl_word;
1013 unsigned long flags;
f3b197ac 1014
5b057c6b
HX
1015 if (skb_padto(skb, ETH_ZLEN))
1016 return 0;
1da177e4
LT
1017
1018 /* Caution: the write order is important here, set the field with the
1019 "ownership" bit last. */
1020
1021 /* Calculate the next Tx descriptor entry. */
1022 spin_lock_irqsave(&ep->lock, flags);
1023 free_count = ep->cur_tx - ep->dirty_tx;
1024 entry = ep->cur_tx % TX_RING_SIZE;
1025
1026 ep->tx_skbuff[entry] = skb;
f3b197ac 1027 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
1da177e4
LT
1028 skb->len, PCI_DMA_TODEVICE);
1029 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
1030 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
1031 } else if (free_count == TX_QUEUE_LEN/2) {
1032 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1033 } else if (free_count < TX_QUEUE_LEN - 1) {
1034 ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
1035 } else {
1036 /* Leave room for an additional entry. */
1037 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1038 ep->tx_full = 1;
1039 }
1040 ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
1041 ep->tx_ring[entry].txstatus =
1042 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
1043 | cpu_to_le32(DescOwn);
1044
1045 ep->cur_tx++;
1046 if (ep->tx_full)
1047 netif_stop_queue(dev);
1048
1049 spin_unlock_irqrestore(&ep->lock, flags);
1050 /* Trigger an immediate transmit demand. */
1051 outl(TxQueued, dev->base_addr + COMMAND);
1052
1053 dev->trans_start = jiffies;
1054 if (debug > 4)
1055 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1056 "flag %2.2x Tx status %8.8x.\n",
1057 dev->name, (int)skb->len, entry, ctrl_word,
1058 (int)inl(dev->base_addr + TxSTAT));
1059
1060 return 0;
1061}
1062
1063static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1064 int status)
1065{
1066 struct net_device_stats *stats = &ep->stats;
1067
1068#ifndef final_version
1069 /* There was an major error, log it. */
1070 if (debug > 1)
1071 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1072 dev->name, status);
1073#endif
1074 stats->tx_errors++;
1075 if (status & 0x1050)
1076 stats->tx_aborted_errors++;
1077 if (status & 0x0008)
1078 stats->tx_carrier_errors++;
1079 if (status & 0x0040)
1080 stats->tx_window_errors++;
1081 if (status & 0x0010)
1082 stats->tx_fifo_errors++;
1083}
1084
1085static void epic_tx(struct net_device *dev, struct epic_private *ep)
1086{
1087 unsigned int dirty_tx, cur_tx;
1088
1089 /*
1090 * Note: if this lock becomes a problem we can narrow the locked
1091 * region at the cost of occasionally grabbing the lock more times.
1092 */
1093 cur_tx = ep->cur_tx;
1094 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1095 struct sk_buff *skb;
1096 int entry = dirty_tx % TX_RING_SIZE;
1097 int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
1098
1099 if (txstatus & DescOwn)
1100 break; /* It still hasn't been Txed */
1101
1102 if (likely(txstatus & 0x0001)) {
1103 ep->stats.collisions += (txstatus >> 8) & 15;
1104 ep->stats.tx_packets++;
1105 ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1106 } else
1107 epic_tx_error(dev, ep, txstatus);
1108
1109 /* Free the original skb. */
1110 skb = ep->tx_skbuff[entry];
f3b197ac 1111 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1da177e4
LT
1112 skb->len, PCI_DMA_TODEVICE);
1113 dev_kfree_skb_irq(skb);
1114 ep->tx_skbuff[entry] = NULL;
1115 }
1116
1117#ifndef final_version
1118 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1119 printk(KERN_WARNING
1120 "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1121 dev->name, dirty_tx, cur_tx, ep->tx_full);
1122 dirty_tx += TX_RING_SIZE;
1123 }
1124#endif
1125 ep->dirty_tx = dirty_tx;
1126 if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1127 /* The ring is no longer full, allow new TX entries. */
1128 ep->tx_full = 0;
1129 netif_wake_queue(dev);
1130 }
1131}
1132
1133/* The interrupt handler does all of the Rx thread work and cleans up
1134 after the Tx thread. */
1135static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1136{
1137 struct net_device *dev = dev_instance;
1138 struct epic_private *ep = dev->priv;
1139 long ioaddr = dev->base_addr;
1140 unsigned int handled = 0;
1141 int status;
1142
1143 status = inl(ioaddr + INTSTAT);
1144 /* Acknowledge all of the current interrupt sources ASAP. */
1145 outl(status & EpicNormalEvent, ioaddr + INTSTAT);
1146
1147 if (debug > 4) {
1148 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1149 "intstat=%#8.8x.\n", dev->name, status,
1150 (int)inl(ioaddr + INTSTAT));
1151 }
1152
1153 if ((status & IntrSummary) == 0)
1154 goto out;
1155
1156 handled = 1;
1157
1158 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1159 spin_lock(&ep->napi_lock);
1160 if (netif_rx_schedule_prep(dev)) {
1161 epic_napi_irq_off(dev, ep);
1162 __netif_rx_schedule(dev);
1163 } else
1164 ep->reschedule_in_poll++;
1165 spin_unlock(&ep->napi_lock);
1166 }
1167 status &= ~EpicNapiEvent;
1168
1169 /* Check uncommon events all at once. */
1170 if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1171 if (status == EpicRemoved)
1172 goto out;
1173
1174 /* Always update the error counts to avoid overhead later. */
1175 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1176 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1177 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1178
1179 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1180 ep->stats.tx_fifo_errors++;
1181 outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1182 /* Restart the transmit process. */
1183 outl(RestartTx, ioaddr + COMMAND);
1184 }
1185 if (status & PCIBusErr170) {
1186 printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
1187 dev->name, status);
1188 epic_pause(dev);
1189 epic_restart(dev);
1190 }
1191 /* Clear all error sources. */
1192 outl(status & 0x7f18, ioaddr + INTSTAT);
1193 }
1194
1195out:
1196 if (debug > 3) {
1197 printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
1198 dev->name, status);
1199 }
1200
1201 return IRQ_RETVAL(handled);
1202}
1203
1204static int epic_rx(struct net_device *dev, int budget)
1205{
1206 struct epic_private *ep = dev->priv;
1207 int entry = ep->cur_rx % RX_RING_SIZE;
1208 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1209 int work_done = 0;
1210
1211 if (debug > 4)
1212 printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1213 ep->rx_ring[entry].rxstatus);
1214
1215 if (rx_work_limit > budget)
1216 rx_work_limit = budget;
1217
1218 /* If we own the next entry, it's a new packet. Send it up. */
1219 while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
1220 int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
1221
1222 if (debug > 4)
1223 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
1224 if (--rx_work_limit < 0)
1225 break;
1226 if (status & 0x2006) {
1227 if (debug > 2)
1228 printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1229 dev->name, status);
1230 if (status & 0x2000) {
1231 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1232 "multiple buffers, status %4.4x!\n", dev->name, status);
1233 ep->stats.rx_length_errors++;
1234 } else if (status & 0x0006)
1235 /* Rx Frame errors are counted in hardware. */
1236 ep->stats.rx_errors++;
1237 } else {
1238 /* Malloc up new buffer, compatible with net-2e. */
1239 /* Omit the four octet CRC from the length. */
1240 short pkt_len = (status >> 16) - 4;
1241 struct sk_buff *skb;
1242
1243 if (pkt_len > PKT_BUF_SZ - 4) {
1244 printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1245 "%d bytes.\n",
1246 dev->name, status, pkt_len);
1247 pkt_len = 1514;
1248 }
1249 /* Check if the packet is long enough to accept without copying
1250 to a minimally-sized skbuff. */
1251 if (pkt_len < rx_copybreak
1252 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1253 skb->dev = dev;
1254 skb_reserve(skb, 2); /* 16 byte align the IP header */
1255 pci_dma_sync_single_for_cpu(ep->pci_dev,
1256 ep->rx_ring[entry].bufaddr,
1257 ep->rx_buf_sz,
1258 PCI_DMA_FROMDEVICE);
689be439 1259 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
1da177e4
LT
1260 skb_put(skb, pkt_len);
1261 pci_dma_sync_single_for_device(ep->pci_dev,
1262 ep->rx_ring[entry].bufaddr,
1263 ep->rx_buf_sz,
1264 PCI_DMA_FROMDEVICE);
1265 } else {
f3b197ac
JG
1266 pci_unmap_single(ep->pci_dev,
1267 ep->rx_ring[entry].bufaddr,
1da177e4
LT
1268 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1269 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1270 ep->rx_skbuff[entry] = NULL;
1271 }
1272 skb->protocol = eth_type_trans(skb, dev);
1273 netif_receive_skb(skb);
1274 dev->last_rx = jiffies;
1275 ep->stats.rx_packets++;
1276 ep->stats.rx_bytes += pkt_len;
1277 }
1278 work_done++;
1279 entry = (++ep->cur_rx) % RX_RING_SIZE;
1280 }
1281
1282 /* Refill the Rx ring buffers. */
1283 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1284 entry = ep->dirty_rx % RX_RING_SIZE;
1285 if (ep->rx_skbuff[entry] == NULL) {
1286 struct sk_buff *skb;
1287 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1288 if (skb == NULL)
1289 break;
1290 skb->dev = dev; /* Mark as being used by this device. */
1291 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
f3b197ac 1292 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
689be439 1293 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
1294 work_done++;
1295 }
1296 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
1297 }
1298 return work_done;
1299}
1300
1301static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1302{
1303 long ioaddr = dev->base_addr;
1304 int status;
1305
1306 status = inl(ioaddr + INTSTAT);
1307
1308 if (status == EpicRemoved)
1309 return;
1310 if (status & RxOverflow) /* Missed a Rx frame. */
1311 ep->stats.rx_errors++;
1312 if (status & (RxOverflow | RxFull))
1313 outw(RxQueued, ioaddr + COMMAND);
1314}
1315
1316static int epic_poll(struct net_device *dev, int *budget)
1317{
1318 struct epic_private *ep = dev->priv;
b7b1d202 1319 int work_done = 0, orig_budget;
1da177e4
LT
1320 long ioaddr = dev->base_addr;
1321
1322 orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
1323
1324rx_action:
1325
1326 epic_tx(dev, ep);
1327
b7b1d202 1328 work_done += epic_rx(dev, *budget);
1da177e4
LT
1329
1330 epic_rx_err(dev, ep);
1331
1332 *budget -= work_done;
1333 dev->quota -= work_done;
1334
1335 if (netif_running(dev) && (work_done < orig_budget)) {
1336 unsigned long flags;
1337 int more;
1338
1339 /* A bit baroque but it avoids a (space hungry) spin_unlock */
1340
1341 spin_lock_irqsave(&ep->napi_lock, flags);
1342
1343 more = ep->reschedule_in_poll;
1344 if (!more) {
1345 __netif_rx_complete(dev);
1346 outl(EpicNapiEvent, ioaddr + INTSTAT);
1347 epic_napi_irq_on(dev, ep);
1348 } else
1349 ep->reschedule_in_poll--;
1350
1351 spin_unlock_irqrestore(&ep->napi_lock, flags);
1352
1353 if (more)
1354 goto rx_action;
1355 }
1356
1357 return (work_done >= orig_budget);
1358}
1359
1360static int epic_close(struct net_device *dev)
1361{
1362 long ioaddr = dev->base_addr;
1363 struct epic_private *ep = dev->priv;
1364 struct sk_buff *skb;
1365 int i;
1366
1367 netif_stop_queue(dev);
1368
1369 if (debug > 1)
1370 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1371 dev->name, (int)inl(ioaddr + INTSTAT));
1372
1373 del_timer_sync(&ep->timer);
1374
1375 epic_disable_int(dev, ep);
1376
1377 free_irq(dev->irq, dev);
1378
1379 epic_pause(dev);
1380
1381 /* Free all the skbuffs in the Rx queue. */
1382 for (i = 0; i < RX_RING_SIZE; i++) {
1383 skb = ep->rx_skbuff[i];
1384 ep->rx_skbuff[i] = NULL;
1385 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1386 ep->rx_ring[i].buflength = 0;
1387 if (skb) {
f3b197ac 1388 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1da177e4
LT
1389 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1390 dev_kfree_skb(skb);
1391 }
1392 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1393 }
1394 for (i = 0; i < TX_RING_SIZE; i++) {
1395 skb = ep->tx_skbuff[i];
1396 ep->tx_skbuff[i] = NULL;
1397 if (!skb)
1398 continue;
f3b197ac 1399 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1da177e4
LT
1400 skb->len, PCI_DMA_TODEVICE);
1401 dev_kfree_skb(skb);
1402 }
1403
1404 /* Green! Leave the chip in low-power mode. */
1405 outl(0x0008, ioaddr + GENCTL);
1406
1407 return 0;
1408}
1409
1410static struct net_device_stats *epic_get_stats(struct net_device *dev)
1411{
1412 struct epic_private *ep = dev->priv;
1413 long ioaddr = dev->base_addr;
1414
1415 if (netif_running(dev)) {
1416 /* Update the error counts. */
1417 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1418 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1419 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1420 }
1421
1422 return &ep->stats;
1423}
1424
1425/* Set or clear the multicast filter for this adaptor.
1426 Note that we only use exclusion around actually queueing the
1427 new frame, not around filling ep->setup_frame. This is non-deterministic
1428 when re-entered but still correct. */
1429
1430static void set_rx_mode(struct net_device *dev)
1431{
1432 long ioaddr = dev->base_addr;
1433 struct epic_private *ep = dev->priv;
1434 unsigned char mc_filter[8]; /* Multicast hash filter */
1435 int i;
1436
1437 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1438 outl(0x002C, ioaddr + RxCtrl);
1439 /* Unconditionally log net taps. */
1440 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1441 memset(mc_filter, 0xff, sizeof(mc_filter));
1442 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
1443 /* There is apparently a chip bug, so the multicast filter
1444 is never enabled. */
1445 /* Too many to filter perfectly -- accept all multicasts. */
1446 memset(mc_filter, 0xff, sizeof(mc_filter));
1447 outl(0x000C, ioaddr + RxCtrl);
1448 } else if (dev->mc_count == 0) {
1449 outl(0x0004, ioaddr + RxCtrl);
1450 return;
1451 } else { /* Never executed, for now. */
1452 struct dev_mc_list *mclist;
1453
1454 memset(mc_filter, 0, sizeof(mc_filter));
1455 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1456 i++, mclist = mclist->next) {
1457 unsigned int bit_nr =
1458 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
1459 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1460 }
1461 }
1462 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1463 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1464 for (i = 0; i < 4; i++)
1465 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1466 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1467 }
1468 return;
1469}
1470
1471static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1472{
1473 struct epic_private *np = dev->priv;
1474
1475 strcpy (info->driver, DRV_NAME);
1476 strcpy (info->version, DRV_VERSION);
1477 strcpy (info->bus_info, pci_name(np->pci_dev));
1478}
1479
1480static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1481{
1482 struct epic_private *np = dev->priv;
1483 int rc;
1484
1485 spin_lock_irq(&np->lock);
1486 rc = mii_ethtool_gset(&np->mii, cmd);
1487 spin_unlock_irq(&np->lock);
1488
1489 return rc;
1490}
1491
1492static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1493{
1494 struct epic_private *np = dev->priv;
1495 int rc;
1496
1497 spin_lock_irq(&np->lock);
1498 rc = mii_ethtool_sset(&np->mii, cmd);
1499 spin_unlock_irq(&np->lock);
1500
1501 return rc;
1502}
1503
1504static int netdev_nway_reset(struct net_device *dev)
1505{
1506 struct epic_private *np = dev->priv;
1507 return mii_nway_restart(&np->mii);
1508}
1509
1510static u32 netdev_get_link(struct net_device *dev)
1511{
1512 struct epic_private *np = dev->priv;
1513 return mii_link_ok(&np->mii);
1514}
1515
1516static u32 netdev_get_msglevel(struct net_device *dev)
1517{
1518 return debug;
1519}
1520
1521static void netdev_set_msglevel(struct net_device *dev, u32 value)
1522{
1523 debug = value;
1524}
1525
1526static int ethtool_begin(struct net_device *dev)
1527{
1528 unsigned long ioaddr = dev->base_addr;
1529 /* power-up, if interface is down */
1530 if (! netif_running(dev)) {
1531 outl(0x0200, ioaddr + GENCTL);
1532 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1533 }
1534 return 0;
1535}
1536
1537static void ethtool_complete(struct net_device *dev)
1538{
1539 unsigned long ioaddr = dev->base_addr;
1540 /* power-down, if interface is down */
1541 if (! netif_running(dev)) {
1542 outl(0x0008, ioaddr + GENCTL);
1543 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1544 }
1545}
1546
1547static struct ethtool_ops netdev_ethtool_ops = {
1548 .get_drvinfo = netdev_get_drvinfo,
1549 .get_settings = netdev_get_settings,
1550 .set_settings = netdev_set_settings,
1551 .nway_reset = netdev_nway_reset,
1552 .get_link = netdev_get_link,
1553 .get_msglevel = netdev_get_msglevel,
1554 .set_msglevel = netdev_set_msglevel,
1555 .get_sg = ethtool_op_get_sg,
1556 .get_tx_csum = ethtool_op_get_tx_csum,
1557 .begin = ethtool_begin,
1558 .complete = ethtool_complete
1559};
1560
1561static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1562{
1563 struct epic_private *np = dev->priv;
1564 long ioaddr = dev->base_addr;
1565 struct mii_ioctl_data *data = if_mii(rq);
1566 int rc;
1567
1568 /* power-up, if interface is down */
1569 if (! netif_running(dev)) {
1570 outl(0x0200, ioaddr + GENCTL);
1571 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1572 }
1573
1574 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1575 spin_lock_irq(&np->lock);
1576 rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1577 spin_unlock_irq(&np->lock);
1578
1579 /* power-down, if interface is down */
1580 if (! netif_running(dev)) {
1581 outl(0x0008, ioaddr + GENCTL);
1582 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1583 }
1584 return rc;
1585}
1586
1587
1588static void __devexit epic_remove_one (struct pci_dev *pdev)
1589{
1590 struct net_device *dev = pci_get_drvdata(pdev);
1591 struct epic_private *ep = dev->priv;
f3b197ac 1592
1da177e4
LT
1593 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1594 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1595 unregister_netdev(dev);
1596#ifndef USE_IO_OPS
1597 iounmap((void*) dev->base_addr);
1598#endif
1599 pci_release_regions(pdev);
1600 free_netdev(dev);
1601 pci_disable_device(pdev);
1602 pci_set_drvdata(pdev, NULL);
1603 /* pci_power_off(pdev, -1); */
1604}
1605
1606
1607#ifdef CONFIG_PM
1608
1609static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1610{
1611 struct net_device *dev = pci_get_drvdata(pdev);
1612 long ioaddr = dev->base_addr;
1613
1614 if (!netif_running(dev))
1615 return 0;
1616 epic_pause(dev);
1617 /* Put the chip into low-power mode. */
1618 outl(0x0008, ioaddr + GENCTL);
1619 /* pci_power_off(pdev, -1); */
1620 return 0;
1621}
1622
1623
1624static int epic_resume (struct pci_dev *pdev)
1625{
1626 struct net_device *dev = pci_get_drvdata(pdev);
1627
1628 if (!netif_running(dev))
1629 return 0;
1630 epic_restart(dev);
1631 /* pci_power_on(pdev); */
1632 return 0;
1633}
1634
1635#endif /* CONFIG_PM */
1636
1637
1638static struct pci_driver epic_driver = {
1639 .name = DRV_NAME,
1640 .id_table = epic_pci_tbl,
1641 .probe = epic_init_one,
1642 .remove = __devexit_p(epic_remove_one),
1643#ifdef CONFIG_PM
1644 .suspend = epic_suspend,
1645 .resume = epic_resume,
1646#endif /* CONFIG_PM */
1647};
1648
1649
1650static int __init epic_init (void)
1651{
1652/* when a module, this is printed whether or not devices are found in probe */
1653#ifdef MODULE
1654 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
1655 version, version2, version3);
1656#endif
1657
1658 return pci_module_init (&epic_driver);
1659}
1660
1661
1662static void __exit epic_cleanup (void)
1663{
1664 pci_unregister_driver (&epic_driver);
1665}
1666
1667
1668module_init(epic_init);
1669module_exit(epic_cleanup);