]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/fealnx.c
Massive net driver const-ification.
[net-next-2.6.git] / drivers / net / fealnx.c
CommitLineData
1da177e4
LT
1/*
2 Written 1998-2000 by Donald Becker.
3
4 This software may be used and distributed according to the terms of
5 the GNU General Public License (GPL), incorporated herein by reference.
6 Drivers based on or derived from this code fall under the GPL and must
7 retain the authorship, copyright and license notice. This file is not
8 a complete program and may only be used when the entire operating
9 system is licensed under the GPL.
10
11 The author may be reached as becker@scyld.com, or C/O
12 Scyld Computing Corporation
13 410 Severn Ave., Suite 210
14 Annapolis MD 21403
15
16 Support information and updates available at
17 http://www.scyld.com/network/pci-skeleton.html
18
19 Linux kernel updates:
20
21 Version 2.51, Nov 17, 2001 (jgarzik):
22 - Add ethtool support
23 - Replace some MII-related magic numbers with constants
24
25*/
26
27#define DRV_NAME "fealnx"
28#define DRV_VERSION "2.51"
29#define DRV_RELDATE "Nov-17-2001"
30
31static int debug; /* 1-> print debug message */
32static int max_interrupt_work = 20;
33
34/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
35static int multicast_filter_limit = 32;
36
37/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
38/* Setting to > 1518 effectively disables this feature. */
39static int rx_copybreak;
40
41/* Used to pass the media type, etc. */
42/* Both 'options[]' and 'full_duplex[]' should exist for driver */
43/* interoperability. */
44/* The media type is usually passed in 'options[]'. */
45#define MAX_UNITS 8 /* More are supported, limit only on options */
46static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
47static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
48
49/* Operational parameters that are set at compile time. */
50/* Keep the ring sizes a power of two for compile efficiency. */
51/* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */
52/* Making the Tx ring too large decreases the effectiveness of channel */
53/* bonding and packet priority. */
54/* There are no ill effects from too-large receive rings. */
55// 88-12-9 modify,
56// #define TX_RING_SIZE 16
57// #define RX_RING_SIZE 32
58#define TX_RING_SIZE 6
59#define RX_RING_SIZE 12
60#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc)
61#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc)
62
63/* Operational parameters that usually are not changed. */
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (2*HZ)
66
67#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
68
69
70/* Include files, designed to support most kernel versions 2.0.0 and later. */
71#include <linux/module.h>
72#include <linux/kernel.h>
73#include <linux/string.h>
74#include <linux/timer.h>
75#include <linux/errno.h>
76#include <linux/ioport.h>
77#include <linux/slab.h>
78#include <linux/interrupt.h>
79#include <linux/pci.h>
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/skbuff.h>
83#include <linux/init.h>
84#include <linux/mii.h>
85#include <linux/ethtool.h>
86#include <linux/crc32.h>
87#include <linux/delay.h>
88#include <linux/bitops.h>
89
90#include <asm/processor.h> /* Processor type for cache alignment. */
91#include <asm/io.h>
92#include <asm/uaccess.h>
93
94/* These identify the driver base version and may not be removed. */
95static char version[] __devinitdata =
96KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
97
98
99/* This driver was written to use PCI memory space, however some x86 systems
100 work only with I/O space accesses. */
101#ifndef __alpha__
102#define USE_IO_OPS
103#endif
104
105/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
106/* This is only in the support-all-kernels source code. */
107
108#define RUN_AT(x) (jiffies + (x))
109
110MODULE_AUTHOR("Myson or whoever");
111MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
112MODULE_LICENSE("GPL");
113module_param(max_interrupt_work, int, 0);
114//MODULE_PARM(min_pci_latency, "i");
115module_param(debug, int, 0);
116module_param(rx_copybreak, int, 0);
117module_param(multicast_filter_limit, int, 0);
118module_param_array(options, int, NULL, 0);
119module_param_array(full_duplex, int, NULL, 0);
120MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
121MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
122MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
123MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
124MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
125MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
126
127#define MIN_REGION_SIZE 136
128
129enum pci_flags_bit {
130 PCI_USES_IO = 1,
131 PCI_USES_MEM = 2,
132 PCI_USES_MASTER = 4,
133 PCI_ADDR0 = 0x10 << 0,
134 PCI_ADDR1 = 0x10 << 1,
135 PCI_ADDR2 = 0x10 << 2,
136 PCI_ADDR3 = 0x10 << 3,
137};
138
139/* A chip capabilities table, matching the entries in pci_tbl[] above. */
140enum chip_capability_flags {
141 HAS_MII_XCVR,
142 HAS_CHIP_XCVR,
143};
144
145/* 89/6/13 add, */
146/* for different PHY */
147enum phy_type_flags {
148 MysonPHY = 1,
149 AhdocPHY = 2,
150 SeeqPHY = 3,
151 MarvellPHY = 4,
152 Myson981 = 5,
153 LevelOnePHY = 6,
154 OtherPHY = 10,
155};
156
157struct chip_info {
158 char *chip_name;
159 int io_size;
160 int flags;
161};
162
f71e1309 163static const struct chip_info skel_netdrv_tbl[] = {
1da177e4
LT
164 {"100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR},
165 {"100/10M Ethernet PCI Adapter", 136, HAS_CHIP_XCVR},
166 {"1000/100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR},
167};
168
169/* Offsets to the Command and Status Registers. */
170enum fealnx_offsets {
171 PAR0 = 0x0, /* physical address 0-3 */
172 PAR1 = 0x04, /* physical address 4-5 */
173 MAR0 = 0x08, /* multicast address 0-3 */
174 MAR1 = 0x0C, /* multicast address 4-7 */
175 FAR0 = 0x10, /* flow-control address 0-3 */
176 FAR1 = 0x14, /* flow-control address 4-5 */
177 TCRRCR = 0x18, /* receive & transmit configuration */
178 BCR = 0x1C, /* bus command */
179 TXPDR = 0x20, /* transmit polling demand */
180 RXPDR = 0x24, /* receive polling demand */
181 RXCWP = 0x28, /* receive current word pointer */
182 TXLBA = 0x2C, /* transmit list base address */
183 RXLBA = 0x30, /* receive list base address */
184 ISR = 0x34, /* interrupt status */
185 IMR = 0x38, /* interrupt mask */
186 FTH = 0x3C, /* flow control high/low threshold */
187 MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */
188 TALLY = 0x44, /* tally counters for crc and mpa */
189 TSR = 0x48, /* tally counter for transmit status */
190 BMCRSR = 0x4c, /* basic mode control and status */
191 PHYIDENTIFIER = 0x50, /* phy identifier */
192 ANARANLPAR = 0x54, /* auto-negotiation advertisement and link
193 partner ability */
194 ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */
195 BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */
196};
197
198/* Bits in the interrupt status/enable registers. */
199/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
200enum intr_status_bits {
201 RFCON = 0x00020000, /* receive flow control xon packet */
202 RFCOFF = 0x00010000, /* receive flow control xoff packet */
203 LSCStatus = 0x00008000, /* link status change */
204 ANCStatus = 0x00004000, /* autonegotiation completed */
205 FBE = 0x00002000, /* fatal bus error */
206 FBEMask = 0x00001800, /* mask bit12-11 */
207 ParityErr = 0x00000000, /* parity error */
208 TargetErr = 0x00001000, /* target abort */
209 MasterErr = 0x00000800, /* master error */
210 TUNF = 0x00000400, /* transmit underflow */
211 ROVF = 0x00000200, /* receive overflow */
212 ETI = 0x00000100, /* transmit early int */
213 ERI = 0x00000080, /* receive early int */
214 CNTOVF = 0x00000040, /* counter overflow */
215 RBU = 0x00000020, /* receive buffer unavailable */
216 TBU = 0x00000010, /* transmit buffer unavilable */
217 TI = 0x00000008, /* transmit interrupt */
218 RI = 0x00000004, /* receive interrupt */
219 RxErr = 0x00000002, /* receive error */
220};
221
222/* Bits in the NetworkConfig register, W for writing, R for reading */
223/* FIXME: some names are invented by me. Marked with (name?) */
224/* If you have docs and know bit names, please fix 'em */
225enum rx_mode_bits {
226 CR_W_ENH = 0x02000000, /* enhanced mode (name?) */
227 CR_W_FD = 0x00100000, /* full duplex */
228 CR_W_PS10 = 0x00080000, /* 10 mbit */
229 CR_W_TXEN = 0x00040000, /* tx enable (name?) */
230 CR_W_PS1000 = 0x00010000, /* 1000 mbit */
231 /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
232 CR_W_RXMODEMASK = 0x000000e0,
233 CR_W_PROM = 0x00000080, /* promiscuous mode */
234 CR_W_AB = 0x00000040, /* accept broadcast */
235 CR_W_AM = 0x00000020, /* accept mutlicast */
236 CR_W_ARP = 0x00000008, /* receive runt pkt */
237 CR_W_ALP = 0x00000004, /* receive long pkt */
238 CR_W_SEP = 0x00000002, /* receive error pkt */
239 CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */
240
241 CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */
242 CR_R_FD = 0x00100000, /* full duplex detected */
243 CR_R_PS10 = 0x00080000, /* 10 mbit detected */
244 CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */
245};
246
247/* The Tulip Rx and Tx buffer descriptors. */
248struct fealnx_desc {
249 s32 status;
250 s32 control;
251 u32 buffer;
252 u32 next_desc;
253 struct fealnx_desc *next_desc_logical;
254 struct sk_buff *skbuff;
255 u32 reserved1;
256 u32 reserved2;
257};
258
259/* Bits in network_desc.status */
260enum rx_desc_status_bits {
261 RXOWN = 0x80000000, /* own bit */
262 FLNGMASK = 0x0fff0000, /* frame length */
263 FLNGShift = 16,
264 MARSTATUS = 0x00004000, /* multicast address received */
265 BARSTATUS = 0x00002000, /* broadcast address received */
266 PHYSTATUS = 0x00001000, /* physical address received */
267 RXFSD = 0x00000800, /* first descriptor */
268 RXLSD = 0x00000400, /* last descriptor */
269 ErrorSummary = 0x80, /* error summary */
270 RUNT = 0x40, /* runt packet received */
271 LONG = 0x20, /* long packet received */
272 FAE = 0x10, /* frame align error */
273 CRC = 0x08, /* crc error */
274 RXER = 0x04, /* receive error */
275};
276
277enum rx_desc_control_bits {
278 RXIC = 0x00800000, /* interrupt control */
279 RBSShift = 0,
280};
281
282enum tx_desc_status_bits {
283 TXOWN = 0x80000000, /* own bit */
284 JABTO = 0x00004000, /* jabber timeout */
285 CSL = 0x00002000, /* carrier sense lost */
286 LC = 0x00001000, /* late collision */
287 EC = 0x00000800, /* excessive collision */
288 UDF = 0x00000400, /* fifo underflow */
289 DFR = 0x00000200, /* deferred */
290 HF = 0x00000100, /* heartbeat fail */
291 NCRMask = 0x000000ff, /* collision retry count */
292 NCRShift = 0,
293};
294
295enum tx_desc_control_bits {
296 TXIC = 0x80000000, /* interrupt control */
297 ETIControl = 0x40000000, /* early transmit interrupt */
298 TXLD = 0x20000000, /* last descriptor */
299 TXFD = 0x10000000, /* first descriptor */
300 CRCEnable = 0x08000000, /* crc control */
301 PADEnable = 0x04000000, /* padding control */
302 RetryTxLC = 0x02000000, /* retry late collision */
303 PKTSMask = 0x3ff800, /* packet size bit21-11 */
304 PKTSShift = 11,
305 TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */
306 TBSShift = 0,
307};
308
309/* BootROM/EEPROM/MII Management Register */
310#define MASK_MIIR_MII_READ 0x00000000
311#define MASK_MIIR_MII_WRITE 0x00000008
312#define MASK_MIIR_MII_MDO 0x00000004
313#define MASK_MIIR_MII_MDI 0x00000002
314#define MASK_MIIR_MII_MDC 0x00000001
315
316/* ST+OP+PHYAD+REGAD+TA */
317#define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
318#define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */
319
320/* ------------------------------------------------------------------------- */
321/* Constants for Myson PHY */
322/* ------------------------------------------------------------------------- */
323#define MysonPHYID 0xd0000302
324/* 89-7-27 add, (begin) */
325#define MysonPHYID0 0x0302
326#define StatusRegister 18
327#define SPEED100 0x0400 // bit10
328#define FULLMODE 0x0800 // bit11
329/* 89-7-27 add, (end) */
330
331/* ------------------------------------------------------------------------- */
332/* Constants for Seeq 80225 PHY */
333/* ------------------------------------------------------------------------- */
334#define SeeqPHYID0 0x0016
335
336#define MIIRegister18 18
337#define SPD_DET_100 0x80
338#define DPLX_DET_FULL 0x40
339
340/* ------------------------------------------------------------------------- */
341/* Constants for Ahdoc 101 PHY */
342/* ------------------------------------------------------------------------- */
343#define AhdocPHYID0 0x0022
344
345#define DiagnosticReg 18
346#define DPLX_FULL 0x0800
347#define Speed_100 0x0400
348
349/* 89/6/13 add, */
350/* -------------------------------------------------------------------------- */
351/* Constants */
352/* -------------------------------------------------------------------------- */
353#define MarvellPHYID0 0x0141
354#define LevelOnePHYID0 0x0013
355
356#define MII1000BaseTControlReg 9
357#define MII1000BaseTStatusReg 10
358#define SpecificReg 17
359
360/* for 1000BaseT Control Register */
361#define PHYAbletoPerform1000FullDuplex 0x0200
362#define PHYAbletoPerform1000HalfDuplex 0x0100
363#define PHY1000AbilityMask 0x300
364
365// for phy specific status register, marvell phy.
366#define SpeedMask 0x0c000
367#define Speed_1000M 0x08000
368#define Speed_100M 0x4000
369#define Speed_10M 0
370#define Full_Duplex 0x2000
371
372// 89/12/29 add, for phy specific status register, levelone phy, (begin)
373#define LXT1000_100M 0x08000
374#define LXT1000_1000M 0x0c000
375#define LXT1000_Full 0x200
376// 89/12/29 add, for phy specific status register, levelone phy, (end)
377
378/* for 3-in-1 case, BMCRSR register */
379#define LinkIsUp2 0x00040000
380
381/* for PHY */
382#define LinkIsUp 0x0004
383
384
385struct netdev_private {
386 /* Descriptor rings first for alignment. */
387 struct fealnx_desc *rx_ring;
388 struct fealnx_desc *tx_ring;
389
390 dma_addr_t rx_ring_dma;
391 dma_addr_t tx_ring_dma;
392
393 spinlock_t lock;
394
395 struct net_device_stats stats;
396
397 /* Media monitoring timer. */
398 struct timer_list timer;
399
400 /* Reset timer */
401 struct timer_list reset_timer;
402 int reset_timer_armed;
403 unsigned long crvalue_sv;
404 unsigned long imrvalue_sv;
405
406 /* Frequently used values: keep some adjacent for cache effect. */
407 int flags;
408 struct pci_dev *pci_dev;
409 unsigned long crvalue;
410 unsigned long bcrvalue;
411 unsigned long imrvalue;
412 struct fealnx_desc *cur_rx;
413 struct fealnx_desc *lack_rxbuf;
414 int really_rx_count;
415 struct fealnx_desc *cur_tx;
416 struct fealnx_desc *cur_tx_copy;
417 int really_tx_count;
418 int free_tx_count;
419 unsigned int rx_buf_sz; /* Based on MTU+slack. */
420
421 /* These values are keep track of the transceiver/media in use. */
422 unsigned int linkok;
423 unsigned int line_speed;
424 unsigned int duplexmode;
425 unsigned int default_port:4; /* Last dev->if_port value. */
426 unsigned int PHYType;
427
428 /* MII transceiver section. */
429 int mii_cnt; /* MII device addresses. */
430 unsigned char phys[2]; /* MII device addresses. */
431 struct mii_if_info mii;
432 void __iomem *mem;
433};
434
435
436static int mdio_read(struct net_device *dev, int phy_id, int location);
437static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
438static int netdev_open(struct net_device *dev);
439static void getlinktype(struct net_device *dev);
440static void getlinkstatus(struct net_device *dev);
441static void netdev_timer(unsigned long data);
442static void reset_timer(unsigned long data);
443static void tx_timeout(struct net_device *dev);
444static void init_ring(struct net_device *dev);
445static int start_tx(struct sk_buff *skb, struct net_device *dev);
446static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
447static int netdev_rx(struct net_device *dev);
448static void set_rx_mode(struct net_device *dev);
449static void __set_rx_mode(struct net_device *dev);
450static struct net_device_stats *get_stats(struct net_device *dev);
451static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
452static struct ethtool_ops netdev_ethtool_ops;
453static int netdev_close(struct net_device *dev);
454static void reset_rx_descriptors(struct net_device *dev);
455static void reset_tx_descriptors(struct net_device *dev);
456
457static void stop_nic_rx(void __iomem *ioaddr, long crvalue)
458{
459 int delay = 0x1000;
460 iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR);
461 while (--delay) {
462 if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP)
463 break;
464 }
465}
466
467
468static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
469{
470 int delay = 0x1000;
471 iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR);
472 while (--delay) {
473 if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP))
474 == (CR_R_RXSTOP+CR_R_TXSTOP) )
475 break;
476 }
477}
478
479
480static int __devinit fealnx_init_one(struct pci_dev *pdev,
481 const struct pci_device_id *ent)
482{
483 struct netdev_private *np;
484 int i, option, err, irq;
485 static int card_idx = -1;
486 char boardname[12];
487 void __iomem *ioaddr;
488 unsigned long len;
489 unsigned int chip_id = ent->driver_data;
490 struct net_device *dev;
491 void *ring_space;
492 dma_addr_t ring_dma;
493#ifdef USE_IO_OPS
494 int bar = 0;
495#else
496 int bar = 1;
497#endif
498
499/* when built into the kernel, we only print version if device is found */
500#ifndef MODULE
501 static int printed_version;
502 if (!printed_version++)
503 printk(version);
504#endif
505
506 card_idx++;
507 sprintf(boardname, "fealnx%d", card_idx);
508
509 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
510
511 i = pci_enable_device(pdev);
512 if (i) return i;
513 pci_set_master(pdev);
514
515 len = pci_resource_len(pdev, bar);
516 if (len < MIN_REGION_SIZE) {
517 printk(KERN_ERR "%s: region size %ld too small, aborting\n",
518 boardname, len);
519 return -ENODEV;
520 }
521
522 i = pci_request_regions(pdev, boardname);
523 if (i) return i;
524
525 irq = pdev->irq;
526
527 ioaddr = pci_iomap(pdev, bar, len);
528 if (!ioaddr) {
529 err = -ENOMEM;
530 goto err_out_res;
531 }
532
533 dev = alloc_etherdev(sizeof(struct netdev_private));
534 if (!dev) {
535 err = -ENOMEM;
536 goto err_out_unmap;
537 }
538 SET_MODULE_OWNER(dev);
539 SET_NETDEV_DEV(dev, &pdev->dev);
540
541 /* read ethernet id */
542 for (i = 0; i < 6; ++i)
543 dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i);
544
545 /* Reset the chip to erase previous misconfiguration. */
546 iowrite32(0x00000001, ioaddr + BCR);
547
548 dev->base_addr = (unsigned long)ioaddr;
549 dev->irq = irq;
550
551 /* Make certain the descriptor lists are aligned. */
552 np = netdev_priv(dev);
553 np->mem = ioaddr;
554 spin_lock_init(&np->lock);
555 np->pci_dev = pdev;
556 np->flags = skel_netdrv_tbl[chip_id].flags;
557 pci_set_drvdata(pdev, dev);
558 np->mii.dev = dev;
559 np->mii.mdio_read = mdio_read;
560 np->mii.mdio_write = mdio_write;
561 np->mii.phy_id_mask = 0x1f;
562 np->mii.reg_num_mask = 0x1f;
563
564 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
565 if (!ring_space) {
566 err = -ENOMEM;
567 goto err_out_free_dev;
568 }
569 np->rx_ring = (struct fealnx_desc *)ring_space;
570 np->rx_ring_dma = ring_dma;
571
572 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
573 if (!ring_space) {
574 err = -ENOMEM;
575 goto err_out_free_rx;
576 }
577 np->tx_ring = (struct fealnx_desc *)ring_space;
578 np->tx_ring_dma = ring_dma;
579
580 /* find the connected MII xcvrs */
581 if (np->flags == HAS_MII_XCVR) {
582 int phy, phy_idx = 0;
583
584 for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
585 int mii_status = mdio_read(dev, phy, 1);
586
587 if (mii_status != 0xffff && mii_status != 0x0000) {
588 np->phys[phy_idx++] = phy;
589 printk(KERN_INFO
590 "%s: MII PHY found at address %d, status "
591 "0x%4.4x.\n", dev->name, phy, mii_status);
592 /* get phy type */
593 {
594 unsigned int data;
595
596 data = mdio_read(dev, np->phys[0], 2);
597 if (data == SeeqPHYID0)
598 np->PHYType = SeeqPHY;
599 else if (data == AhdocPHYID0)
600 np->PHYType = AhdocPHY;
601 else if (data == MarvellPHYID0)
602 np->PHYType = MarvellPHY;
603 else if (data == MysonPHYID0)
604 np->PHYType = Myson981;
605 else if (data == LevelOnePHYID0)
606 np->PHYType = LevelOnePHY;
607 else
608 np->PHYType = OtherPHY;
609 }
610 }
611 }
612
613 np->mii_cnt = phy_idx;
614 if (phy_idx == 0) {
615 printk(KERN_WARNING "%s: MII PHY not found -- this device may "
616 "not operate correctly.\n", dev->name);
617 }
618 } else {
619 np->phys[0] = 32;
620/* 89/6/23 add, (begin) */
621 /* get phy type */
622 if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID)
623 np->PHYType = MysonPHY;
624 else
625 np->PHYType = OtherPHY;
626 }
627 np->mii.phy_id = np->phys[0];
628
629 if (dev->mem_start)
630 option = dev->mem_start;
631
632 /* The lower four bits are the media type. */
633 if (option > 0) {
634 if (option & 0x200)
635 np->mii.full_duplex = 1;
636 np->default_port = option & 15;
637 }
638
639 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
640 np->mii.full_duplex = full_duplex[card_idx];
641
642 if (np->mii.full_duplex) {
643 printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name);
644/* 89/6/13 add, (begin) */
645// if (np->PHYType==MarvellPHY)
646 if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
647 unsigned int data;
648
649 data = mdio_read(dev, np->phys[0], 9);
650 data = (data & 0xfcff) | 0x0200;
651 mdio_write(dev, np->phys[0], 9, data);
652 }
653/* 89/6/13 add, (end) */
654 if (np->flags == HAS_MII_XCVR)
655 mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
656 else
657 iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR);
658 np->mii.force_media = 1;
659 }
660
661 /* The chip-specific entries in the device structure. */
662 dev->open = &netdev_open;
663 dev->hard_start_xmit = &start_tx;
664 dev->stop = &netdev_close;
665 dev->get_stats = &get_stats;
666 dev->set_multicast_list = &set_rx_mode;
667 dev->do_ioctl = &mii_ioctl;
668 dev->ethtool_ops = &netdev_ethtool_ops;
669 dev->tx_timeout = &tx_timeout;
670 dev->watchdog_timeo = TX_TIMEOUT;
671
672 err = register_netdev(dev);
673 if (err)
674 goto err_out_free_tx;
675
676 printk(KERN_INFO "%s: %s at %p, ",
677 dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr);
678 for (i = 0; i < 5; i++)
679 printk("%2.2x:", dev->dev_addr[i]);
680 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
681
682 return 0;
683
684err_out_free_tx:
685 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
686err_out_free_rx:
687 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
688err_out_free_dev:
689 free_netdev(dev);
690err_out_unmap:
691 pci_iounmap(pdev, ioaddr);
692err_out_res:
693 pci_release_regions(pdev);
694 return err;
695}
696
697
698static void __devexit fealnx_remove_one(struct pci_dev *pdev)
699{
700 struct net_device *dev = pci_get_drvdata(pdev);
701
702 if (dev) {
703 struct netdev_private *np = netdev_priv(dev);
704
705 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
706 np->tx_ring_dma);
707 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
708 np->rx_ring_dma);
709 unregister_netdev(dev);
710 pci_iounmap(pdev, np->mem);
711 free_netdev(dev);
712 pci_release_regions(pdev);
713 pci_set_drvdata(pdev, NULL);
714 } else
715 printk(KERN_ERR "fealnx: remove for unknown device\n");
716}
717
718
719static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad)
720{
721 ulong miir;
722 int i;
723 unsigned int mask, data;
724
725 /* enable MII output */
726 miir = (ulong) ioread32(miiport);
727 miir &= 0xfffffff0;
728
729 miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
730
731 /* send 32 1's preamble */
732 for (i = 0; i < 32; i++) {
733 /* low MDC; MDO is already high (miir) */
734 miir &= ~MASK_MIIR_MII_MDC;
735 iowrite32(miir, miiport);
736
737 /* high MDC */
738 miir |= MASK_MIIR_MII_MDC;
739 iowrite32(miir, miiport);
740 }
741
742 /* calculate ST+OP+PHYAD+REGAD+TA */
743 data = opcode | (phyad << 7) | (regad << 2);
744
745 /* sent out */
746 mask = 0x8000;
747 while (mask) {
748 /* low MDC, prepare MDO */
749 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
750 if (mask & data)
751 miir |= MASK_MIIR_MII_MDO;
752
753 iowrite32(miir, miiport);
754 /* high MDC */
755 miir |= MASK_MIIR_MII_MDC;
756 iowrite32(miir, miiport);
757 udelay(30);
758
759 /* next */
760 mask >>= 1;
761 if (mask == 0x2 && opcode == OP_READ)
762 miir &= ~MASK_MIIR_MII_WRITE;
763 }
764 return miir;
765}
766
767
768static int mdio_read(struct net_device *dev, int phyad, int regad)
769{
770 struct netdev_private *np = netdev_priv(dev);
771 void __iomem *miiport = np->mem + MANAGEMENT;
772 ulong miir;
773 unsigned int mask, data;
774
775 miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
776
777 /* read data */
778 mask = 0x8000;
779 data = 0;
780 while (mask) {
781 /* low MDC */
782 miir &= ~MASK_MIIR_MII_MDC;
783 iowrite32(miir, miiport);
784
785 /* read MDI */
786 miir = ioread32(miiport);
787 if (miir & MASK_MIIR_MII_MDI)
788 data |= mask;
789
790 /* high MDC, and wait */
791 miir |= MASK_MIIR_MII_MDC;
792 iowrite32(miir, miiport);
793 udelay(30);
794
795 /* next */
796 mask >>= 1;
797 }
798
799 /* low MDC */
800 miir &= ~MASK_MIIR_MII_MDC;
801 iowrite32(miir, miiport);
802
803 return data & 0xffff;
804}
805
806
807static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
808{
809 struct netdev_private *np = netdev_priv(dev);
810 void __iomem *miiport = np->mem + MANAGEMENT;
811 ulong miir;
812 unsigned int mask;
813
814 miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
815
816 /* write data */
817 mask = 0x8000;
818 while (mask) {
819 /* low MDC, prepare MDO */
820 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
821 if (mask & data)
822 miir |= MASK_MIIR_MII_MDO;
823 iowrite32(miir, miiport);
824
825 /* high MDC */
826 miir |= MASK_MIIR_MII_MDC;
827 iowrite32(miir, miiport);
828
829 /* next */
830 mask >>= 1;
831 }
832
833 /* low MDC */
834 miir &= ~MASK_MIIR_MII_MDC;
835 iowrite32(miir, miiport);
836}
837
838
839static int netdev_open(struct net_device *dev)
840{
841 struct netdev_private *np = netdev_priv(dev);
842 void __iomem *ioaddr = np->mem;
843 int i;
844
845 iowrite32(0x00000001, ioaddr + BCR); /* Reset */
846
847 if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev))
848 return -EAGAIN;
849
850 for (i = 0; i < 3; i++)
851 iowrite16(((unsigned short*)dev->dev_addr)[i],
852 ioaddr + PAR0 + i*2);
853
854 init_ring(dev);
855
856 iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
857 iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
858
859 /* Initialize other registers. */
860 /* Configure the PCI bus bursts and FIFO thresholds.
861 486: Set 8 longword burst.
862 586: no burst limit.
863 Burst length 5:3
864 0 0 0 1
865 0 0 1 4
866 0 1 0 8
867 0 1 1 16
868 1 0 0 32
869 1 0 1 64
870 1 1 0 128
871 1 1 1 256
872 Wait the specified 50 PCI cycles after a reset by initializing
873 Tx and Rx queues and the address filter list.
874 FIXME (Ueimor): optimistic for alpha + posted writes ? */
875#if defined(__powerpc__) || defined(__sparc__)
876// 89/9/1 modify,
877// np->bcrvalue=0x04 | 0x0x38; /* big-endian, 256 burst length */
878 np->bcrvalue = 0x04 | 0x10; /* big-endian, tx 8 burst length */
879 np->crvalue = 0xe00; /* rx 128 burst length */
880#elif defined(__alpha__) || defined(__x86_64__)
881// 89/9/1 modify,
882// np->bcrvalue=0x38; /* little-endian, 256 burst length */
883 np->bcrvalue = 0x10; /* little-endian, 8 burst length */
884 np->crvalue = 0xe00; /* rx 128 burst length */
885#elif defined(__i386__)
886#if defined(MODULE)
887// 89/9/1 modify,
888// np->bcrvalue=0x38; /* little-endian, 256 burst length */
889 np->bcrvalue = 0x10; /* little-endian, 8 burst length */
890 np->crvalue = 0xe00; /* rx 128 burst length */
891#else
892 /* When not a module we can work around broken '486 PCI boards. */
893#define x86 boot_cpu_data.x86
894// 89/9/1 modify,
895// np->bcrvalue=(x86 <= 4 ? 0x10 : 0x38);
896 np->bcrvalue = 0x10;
897 np->crvalue = (x86 <= 4 ? 0xa00 : 0xe00);
898 if (x86 <= 4)
899 printk(KERN_INFO "%s: This is a 386/486 PCI system, setting burst "
900 "length to %x.\n", dev->name, (x86 <= 4 ? 0x10 : 0x38));
901#endif
902#else
903// 89/9/1 modify,
904// np->bcrvalue=0x38;
905 np->bcrvalue = 0x10;
906 np->crvalue = 0xe00; /* rx 128 burst length */
907#warning Processor architecture undefined!
908#endif
909// 89/12/29 add,
910// 90/1/16 modify,
911// np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
912 np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
913 if (np->pci_dev->device == 0x891) {
914 np->bcrvalue |= 0x200; /* set PROG bit */
915 np->crvalue |= CR_W_ENH; /* set enhanced bit */
916 np->imrvalue |= ETI;
917 }
918 iowrite32(np->bcrvalue, ioaddr + BCR);
919
920 if (dev->if_port == 0)
921 dev->if_port = np->default_port;
922
923 iowrite32(0, ioaddr + RXPDR);
924// 89/9/1 modify,
925// np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */
926 np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */
927 np->mii.full_duplex = np->mii.force_media;
928 getlinkstatus(dev);
929 if (np->linkok)
930 getlinktype(dev);
931 __set_rx_mode(dev);
932
933 netif_start_queue(dev);
934
935 /* Clear and Enable interrupts by setting the interrupt mask. */
936 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
937 iowrite32(np->imrvalue, ioaddr + IMR);
938
939 if (debug)
940 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
941
942 /* Set the timer to check for link beat. */
943 init_timer(&np->timer);
944 np->timer.expires = RUN_AT(3 * HZ);
945 np->timer.data = (unsigned long) dev;
946 np->timer.function = &netdev_timer;
947
948 /* timer handler */
949 add_timer(&np->timer);
950
951 init_timer(&np->reset_timer);
952 np->reset_timer.data = (unsigned long) dev;
953 np->reset_timer.function = &reset_timer;
954 np->reset_timer_armed = 0;
955
956 return 0;
957}
958
959
960static void getlinkstatus(struct net_device *dev)
961/* function: Routine will read MII Status Register to get link status. */
962/* input : dev... pointer to the adapter block. */
963/* output : none. */
964{
965 struct netdev_private *np = netdev_priv(dev);
966 unsigned int i, DelayTime = 0x1000;
967
968 np->linkok = 0;
969
970 if (np->PHYType == MysonPHY) {
971 for (i = 0; i < DelayTime; ++i) {
972 if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
973 np->linkok = 1;
974 return;
975 }
976 udelay(100);
977 }
978 } else {
979 for (i = 0; i < DelayTime; ++i) {
980 if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
981 np->linkok = 1;
982 return;
983 }
984 udelay(100);
985 }
986 }
987}
988
989
990static void getlinktype(struct net_device *dev)
991{
992 struct netdev_private *np = netdev_priv(dev);
993
994 if (np->PHYType == MysonPHY) { /* 3-in-1 case */
995 if (ioread32(np->mem + TCRRCR) & CR_R_FD)
996 np->duplexmode = 2; /* full duplex */
997 else
998 np->duplexmode = 1; /* half duplex */
999 if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
1000 np->line_speed = 1; /* 10M */
1001 else
1002 np->line_speed = 2; /* 100M */
1003 } else {
1004 if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */
1005 unsigned int data;
1006
1007 data = mdio_read(dev, np->phys[0], MIIRegister18);
1008 if (data & SPD_DET_100)
1009 np->line_speed = 2; /* 100M */
1010 else
1011 np->line_speed = 1; /* 10M */
1012 if (data & DPLX_DET_FULL)
1013 np->duplexmode = 2; /* full duplex mode */
1014 else
1015 np->duplexmode = 1; /* half duplex mode */
1016 } else if (np->PHYType == AhdocPHY) {
1017 unsigned int data;
1018
1019 data = mdio_read(dev, np->phys[0], DiagnosticReg);
1020 if (data & Speed_100)
1021 np->line_speed = 2; /* 100M */
1022 else
1023 np->line_speed = 1; /* 10M */
1024 if (data & DPLX_FULL)
1025 np->duplexmode = 2; /* full duplex mode */
1026 else
1027 np->duplexmode = 1; /* half duplex mode */
1028 }
1029/* 89/6/13 add, (begin) */
1030 else if (np->PHYType == MarvellPHY) {
1031 unsigned int data;
1032
1033 data = mdio_read(dev, np->phys[0], SpecificReg);
1034 if (data & Full_Duplex)
1035 np->duplexmode = 2; /* full duplex mode */
1036 else
1037 np->duplexmode = 1; /* half duplex mode */
1038 data &= SpeedMask;
1039 if (data == Speed_1000M)
1040 np->line_speed = 3; /* 1000M */
1041 else if (data == Speed_100M)
1042 np->line_speed = 2; /* 100M */
1043 else
1044 np->line_speed = 1; /* 10M */
1045 }
1046/* 89/6/13 add, (end) */
1047/* 89/7/27 add, (begin) */
1048 else if (np->PHYType == Myson981) {
1049 unsigned int data;
1050
1051 data = mdio_read(dev, np->phys[0], StatusRegister);
1052
1053 if (data & SPEED100)
1054 np->line_speed = 2;
1055 else
1056 np->line_speed = 1;
1057
1058 if (data & FULLMODE)
1059 np->duplexmode = 2;
1060 else
1061 np->duplexmode = 1;
1062 }
1063/* 89/7/27 add, (end) */
1064/* 89/12/29 add */
1065 else if (np->PHYType == LevelOnePHY) {
1066 unsigned int data;
1067
1068 data = mdio_read(dev, np->phys[0], SpecificReg);
1069 if (data & LXT1000_Full)
1070 np->duplexmode = 2; /* full duplex mode */
1071 else
1072 np->duplexmode = 1; /* half duplex mode */
1073 data &= SpeedMask;
1074 if (data == LXT1000_1000M)
1075 np->line_speed = 3; /* 1000M */
1076 else if (data == LXT1000_100M)
1077 np->line_speed = 2; /* 100M */
1078 else
1079 np->line_speed = 1; /* 10M */
1080 }
1081 np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
1082 if (np->line_speed == 1)
1083 np->crvalue |= CR_W_PS10;
1084 else if (np->line_speed == 3)
1085 np->crvalue |= CR_W_PS1000;
1086 if (np->duplexmode == 2)
1087 np->crvalue |= CR_W_FD;
1088 }
1089}
1090
1091
1092/* Take lock before calling this */
1093static void allocate_rx_buffers(struct net_device *dev)
1094{
1095 struct netdev_private *np = netdev_priv(dev);
1096
1097 /* allocate skb for rx buffers */
1098 while (np->really_rx_count != RX_RING_SIZE) {
1099 struct sk_buff *skb;
1100
1101 skb = dev_alloc_skb(np->rx_buf_sz);
1102 if (skb == NULL)
1103 break; /* Better luck next round. */
1104
1105 while (np->lack_rxbuf->skbuff)
1106 np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
1107
1108 skb->dev = dev; /* Mark as being used by this device. */
1109 np->lack_rxbuf->skbuff = skb;
689be439 1110 np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data,
1da177e4
LT
1111 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1112 np->lack_rxbuf->status = RXOWN;
1113 ++np->really_rx_count;
1114 }
1115}
1116
1117
1118static void netdev_timer(unsigned long data)
1119{
1120 struct net_device *dev = (struct net_device *) data;
1121 struct netdev_private *np = netdev_priv(dev);
1122 void __iomem *ioaddr = np->mem;
1123 int old_crvalue = np->crvalue;
1124 unsigned int old_linkok = np->linkok;
1125 unsigned long flags;
1126
1127 if (debug)
1128 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
1129 "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR),
1130 ioread32(ioaddr + TCRRCR));
1131
1132 spin_lock_irqsave(&np->lock, flags);
1133
1134 if (np->flags == HAS_MII_XCVR) {
1135 getlinkstatus(dev);
1136 if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */
1137 getlinktype(dev);
1138 if (np->crvalue != old_crvalue) {
1139 stop_nic_rxtx(ioaddr, np->crvalue);
1140 iowrite32(np->crvalue, ioaddr + TCRRCR);
1141 }
1142 }
1143 }
1144
1145 allocate_rx_buffers(dev);
1146
1147 spin_unlock_irqrestore(&np->lock, flags);
1148
1149 np->timer.expires = RUN_AT(10 * HZ);
1150 add_timer(&np->timer);
1151}
1152
1153
1154/* Take lock before calling */
1155/* Reset chip and disable rx, tx and interrupts */
1156static void reset_and_disable_rxtx(struct net_device *dev)
1157{
1158 struct netdev_private *np = netdev_priv(dev);
1159 void __iomem *ioaddr = np->mem;
1160 int delay=51;
1161
1162 /* Reset the chip's Tx and Rx processes. */
1163 stop_nic_rxtx(ioaddr, 0);
1164
1165 /* Disable interrupts by clearing the interrupt mask. */
1166 iowrite32(0, ioaddr + IMR);
1167
1168 /* Reset the chip to erase previous misconfiguration. */
1169 iowrite32(0x00000001, ioaddr + BCR);
1170
1171 /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
1172 We surely wait too long (address+data phase). Who cares? */
1173 while (--delay) {
1174 ioread32(ioaddr + BCR);
1175 rmb();
1176 }
1177}
1178
1179
1180/* Take lock before calling */
1181/* Restore chip after reset */
1182static void enable_rxtx(struct net_device *dev)
1183{
1184 struct netdev_private *np = netdev_priv(dev);
1185 void __iomem *ioaddr = np->mem;
1186
1187 reset_rx_descriptors(dev);
1188
1189 iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
1190 ioaddr + TXLBA);
1191 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
1192 ioaddr + RXLBA);
1193
1194 iowrite32(np->bcrvalue, ioaddr + BCR);
1195
1196 iowrite32(0, ioaddr + RXPDR);
1197 __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
1198
1199 /* Clear and Enable interrupts by setting the interrupt mask. */
1200 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
1201 iowrite32(np->imrvalue, ioaddr + IMR);
1202
1203 iowrite32(0, ioaddr + TXPDR);
1204}
1205
1206
1207static void reset_timer(unsigned long data)
1208{
1209 struct net_device *dev = (struct net_device *) data;
1210 struct netdev_private *np = netdev_priv(dev);
1211 unsigned long flags;
1212
1213 printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
1214
1215 spin_lock_irqsave(&np->lock, flags);
1216 np->crvalue = np->crvalue_sv;
1217 np->imrvalue = np->imrvalue_sv;
1218
1219 reset_and_disable_rxtx(dev);
1220 /* works for me without this:
1221 reset_tx_descriptors(dev); */
1222 enable_rxtx(dev);
1223 netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */
1224
1225 np->reset_timer_armed = 0;
1226
1227 spin_unlock_irqrestore(&np->lock, flags);
1228}
1229
1230
1231static void tx_timeout(struct net_device *dev)
1232{
1233 struct netdev_private *np = netdev_priv(dev);
1234 void __iomem *ioaddr = np->mem;
1235 unsigned long flags;
1236 int i;
1237
1238 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
1239 " resetting...\n", dev->name, ioread32(ioaddr + ISR));
1240
1241 {
1242 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
1243 for (i = 0; i < RX_RING_SIZE; i++)
1244 printk(" %8.8x", (unsigned int) np->rx_ring[i].status);
1245 printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring);
1246 for (i = 0; i < TX_RING_SIZE; i++)
1247 printk(" %4.4x", np->tx_ring[i].status);
1248 printk("\n");
1249 }
1250
1251 spin_lock_irqsave(&np->lock, flags);
1252
1253 reset_and_disable_rxtx(dev);
1254 reset_tx_descriptors(dev);
1255 enable_rxtx(dev);
1256
1257 spin_unlock_irqrestore(&np->lock, flags);
1258
1259 dev->trans_start = jiffies;
1260 np->stats.tx_errors++;
1261 netif_wake_queue(dev); /* or .._start_.. ?? */
1262}
1263
1264
1265/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1266static void init_ring(struct net_device *dev)
1267{
1268 struct netdev_private *np = netdev_priv(dev);
1269 int i;
1270
1271 /* initialize rx variables */
1272 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1273 np->cur_rx = &np->rx_ring[0];
1274 np->lack_rxbuf = np->rx_ring;
1275 np->really_rx_count = 0;
1276
1277 /* initial rx descriptors. */
1278 for (i = 0; i < RX_RING_SIZE; i++) {
1279 np->rx_ring[i].status = 0;
1280 np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
1281 np->rx_ring[i].next_desc = np->rx_ring_dma +
1282 (i + 1)*sizeof(struct fealnx_desc);
1283 np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
1284 np->rx_ring[i].skbuff = NULL;
1285 }
1286
1287 /* for the last rx descriptor */
1288 np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
1289 np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
1290
1291 /* allocate skb for rx buffers */
1292 for (i = 0; i < RX_RING_SIZE; i++) {
1293 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1294
1295 if (skb == NULL) {
1296 np->lack_rxbuf = &np->rx_ring[i];
1297 break;
1298 }
1299
1300 ++np->really_rx_count;
1301 np->rx_ring[i].skbuff = skb;
1302 skb->dev = dev; /* Mark as being used by this device. */
689be439 1303 np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data,
1da177e4
LT
1304 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1305 np->rx_ring[i].status = RXOWN;
1306 np->rx_ring[i].control |= RXIC;
1307 }
1308
1309 /* initialize tx variables */
1310 np->cur_tx = &np->tx_ring[0];
1311 np->cur_tx_copy = &np->tx_ring[0];
1312 np->really_tx_count = 0;
1313 np->free_tx_count = TX_RING_SIZE;
1314
1315 for (i = 0; i < TX_RING_SIZE; i++) {
1316 np->tx_ring[i].status = 0;
1317 /* do we need np->tx_ring[i].control = XXX; ?? */
1318 np->tx_ring[i].next_desc = np->tx_ring_dma +
1319 (i + 1)*sizeof(struct fealnx_desc);
1320 np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
1321 np->tx_ring[i].skbuff = NULL;
1322 }
1323
1324 /* for the last tx descriptor */
1325 np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
1326 np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
1327}
1328
1329
1330static int start_tx(struct sk_buff *skb, struct net_device *dev)
1331{
1332 struct netdev_private *np = netdev_priv(dev);
1333 unsigned long flags;
1334
1335 spin_lock_irqsave(&np->lock, flags);
1336
1337 np->cur_tx_copy->skbuff = skb;
1338
1339#define one_buffer
1340#define BPT 1022
1341#if defined(one_buffer)
1342 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
1343 skb->len, PCI_DMA_TODEVICE);
1344 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1345 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
1346 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
1347// 89/12/29 add,
1348 if (np->pci_dev->device == 0x891)
1349 np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1350 np->cur_tx_copy->status = TXOWN;
1351 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1352 --np->free_tx_count;
1353#elif defined(two_buffer)
1354 if (skb->len > BPT) {
1355 struct fealnx_desc *next;
1356
1357 /* for the first descriptor */
1358 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
1359 BPT, PCI_DMA_TODEVICE);
1360 np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
1361 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
1362 np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */
1363
1364 /* for the last descriptor */
1365 next = np->cur_tx_copy->next_desc_logical;
1366 next->skbuff = skb;
1367 next->control = TXIC | TXLD | CRCEnable | PADEnable;
1368 next->control |= (skb->len << PKTSShift); /* pkt size */
1369 next->control |= ((skb->len - BPT) << TBSShift); /* buf size */
1370// 89/12/29 add,
1371 if (np->pci_dev->device == 0x891)
1372 np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1373 next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT,
1374 skb->len - BPT, PCI_DMA_TODEVICE);
1375
1376 next->status = TXOWN;
1377 np->cur_tx_copy->status = TXOWN;
1378
1379 np->cur_tx_copy = next->next_desc_logical;
1380 np->free_tx_count -= 2;
1381 } else {
1382 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
1383 skb->len, PCI_DMA_TODEVICE);
1384 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1385 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
1386 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
1387// 89/12/29 add,
1388 if (np->pci_dev->device == 0x891)
1389 np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1390 np->cur_tx_copy->status = TXOWN;
1391 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1392 --np->free_tx_count;
1393 }
1394#endif
1395
1396 if (np->free_tx_count < 2)
1397 netif_stop_queue(dev);
1398 ++np->really_tx_count;
1399 iowrite32(0, np->mem + TXPDR);
1400 dev->trans_start = jiffies;
1401
1402 spin_unlock_irqrestore(&np->lock, flags);
1403 return 0;
1404}
1405
1406
1407/* Take lock before calling */
1408/* Chip probably hosed tx ring. Clean up. */
1409static void reset_tx_descriptors(struct net_device *dev)
1410{
1411 struct netdev_private *np = netdev_priv(dev);
1412 struct fealnx_desc *cur;
1413 int i;
1414
1415 /* initialize tx variables */
1416 np->cur_tx = &np->tx_ring[0];
1417 np->cur_tx_copy = &np->tx_ring[0];
1418 np->really_tx_count = 0;
1419 np->free_tx_count = TX_RING_SIZE;
1420
1421 for (i = 0; i < TX_RING_SIZE; i++) {
1422 cur = &np->tx_ring[i];
1423 if (cur->skbuff) {
1424 pci_unmap_single(np->pci_dev, cur->buffer,
1425 cur->skbuff->len, PCI_DMA_TODEVICE);
400de2c0 1426 dev_kfree_skb_any(cur->skbuff);
1da177e4
LT
1427 cur->skbuff = NULL;
1428 }
1429 cur->status = 0;
1430 cur->control = 0; /* needed? */
1431 /* probably not needed. We do it for purely paranoid reasons */
1432 cur->next_desc = np->tx_ring_dma +
1433 (i + 1)*sizeof(struct fealnx_desc);
1434 cur->next_desc_logical = &np->tx_ring[i + 1];
1435 }
1436 /* for the last tx descriptor */
1437 np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
1438 np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
1439}
1440
1441
1442/* Take lock and stop rx before calling this */
1443static void reset_rx_descriptors(struct net_device *dev)
1444{
1445 struct netdev_private *np = netdev_priv(dev);
1446 struct fealnx_desc *cur = np->cur_rx;
1447 int i;
1448
1449 allocate_rx_buffers(dev);
1450
1451 for (i = 0; i < RX_RING_SIZE; i++) {
1452 if (cur->skbuff)
1453 cur->status = RXOWN;
1454 cur = cur->next_desc_logical;
1455 }
1456
1457 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
1458 np->mem + RXLBA);
1459}
1460
1461
1462/* The interrupt handler does all of the Rx thread work and cleans up
1463 after the Tx thread. */
1464static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1465{
1466 struct net_device *dev = (struct net_device *) dev_instance;
1467 struct netdev_private *np = netdev_priv(dev);
1468 void __iomem *ioaddr = np->mem;
1469 long boguscnt = max_interrupt_work;
1470 unsigned int num_tx = 0;
1471 int handled = 0;
1472
1473 spin_lock(&np->lock);
1474
1475 iowrite32(0, ioaddr + IMR);
1476
1477 do {
1478 u32 intr_status = ioread32(ioaddr + ISR);
1479
1480 /* Acknowledge all of the current interrupt sources ASAP. */
1481 iowrite32(intr_status, ioaddr + ISR);
1482
1483 if (debug)
1484 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
1485 intr_status);
1486
1487 if (!(intr_status & np->imrvalue))
1488 break;
1489
1490 handled = 1;
1491
1492// 90/1/16 delete,
1493//
1494// if (intr_status & FBE)
1495// { /* fatal error */
1496// stop_nic_tx(ioaddr, 0);
1497// stop_nic_rx(ioaddr, 0);
1498// break;
1499// };
1500
1501 if (intr_status & TUNF)
1502 iowrite32(0, ioaddr + TXPDR);
1503
1504 if (intr_status & CNTOVF) {
1505 /* missed pkts */
1506 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
1507
1508 /* crc error */
1509 np->stats.rx_crc_errors +=
1510 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1511 }
1512
1513 if (intr_status & (RI | RBU)) {
1514 if (intr_status & RI)
1515 netdev_rx(dev);
1516 else {
1517 stop_nic_rx(ioaddr, np->crvalue);
1518 reset_rx_descriptors(dev);
1519 iowrite32(np->crvalue, ioaddr + TCRRCR);
1520 }
1521 }
1522
1523 while (np->really_tx_count) {
1524 long tx_status = np->cur_tx->status;
1525 long tx_control = np->cur_tx->control;
1526
1527 if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */
1528 struct fealnx_desc *next;
1529
1530 next = np->cur_tx->next_desc_logical;
1531 tx_status = next->status;
1532 tx_control = next->control;
1533 }
1534
1535 if (tx_status & TXOWN)
1536 break;
1537
1538 if (!(np->crvalue & CR_W_ENH)) {
1539 if (tx_status & (CSL | LC | EC | UDF | HF)) {
1540 np->stats.tx_errors++;
1541 if (tx_status & EC)
1542 np->stats.tx_aborted_errors++;
1543 if (tx_status & CSL)
1544 np->stats.tx_carrier_errors++;
1545 if (tx_status & LC)
1546 np->stats.tx_window_errors++;
1547 if (tx_status & UDF)
1548 np->stats.tx_fifo_errors++;
1549 if ((tx_status & HF) && np->mii.full_duplex == 0)
1550 np->stats.tx_heartbeat_errors++;
1551
1552 } else {
1553 np->stats.tx_bytes +=
1554 ((tx_control & PKTSMask) >> PKTSShift);
1555
1556 np->stats.collisions +=
1557 ((tx_status & NCRMask) >> NCRShift);
1558 np->stats.tx_packets++;
1559 }
1560 } else {
1561 np->stats.tx_bytes +=
1562 ((tx_control & PKTSMask) >> PKTSShift);
1563 np->stats.tx_packets++;
1564 }
1565
1566 /* Free the original skb. */
1567 pci_unmap_single(np->pci_dev, np->cur_tx->buffer,
1568 np->cur_tx->skbuff->len, PCI_DMA_TODEVICE);
1569 dev_kfree_skb_irq(np->cur_tx->skbuff);
1570 np->cur_tx->skbuff = NULL;
1571 --np->really_tx_count;
1572 if (np->cur_tx->control & TXLD) {
1573 np->cur_tx = np->cur_tx->next_desc_logical;
1574 ++np->free_tx_count;
1575 } else {
1576 np->cur_tx = np->cur_tx->next_desc_logical;
1577 np->cur_tx = np->cur_tx->next_desc_logical;
1578 np->free_tx_count += 2;
1579 }
1580 num_tx++;
1581 } /* end of for loop */
1582
1583 if (num_tx && np->free_tx_count >= 2)
1584 netif_wake_queue(dev);
1585
1586 /* read transmit status for enhanced mode only */
1587 if (np->crvalue & CR_W_ENH) {
1588 long data;
1589
1590 data = ioread32(ioaddr + TSR);
1591 np->stats.tx_errors += (data & 0xff000000) >> 24;
1592 np->stats.tx_aborted_errors += (data & 0xff000000) >> 24;
1593 np->stats.tx_window_errors += (data & 0x00ff0000) >> 16;
1594 np->stats.collisions += (data & 0x0000ffff);
1595 }
1596
1597 if (--boguscnt < 0) {
1598 printk(KERN_WARNING "%s: Too much work at interrupt, "
1599 "status=0x%4.4x.\n", dev->name, intr_status);
1600 if (!np->reset_timer_armed) {
1601 np->reset_timer_armed = 1;
1602 np->reset_timer.expires = RUN_AT(HZ/2);
1603 add_timer(&np->reset_timer);
1604 stop_nic_rxtx(ioaddr, 0);
1605 netif_stop_queue(dev);
1606 /* or netif_tx_disable(dev); ?? */
1607 /* Prevent other paths from enabling tx,rx,intrs */
1608 np->crvalue_sv = np->crvalue;
1609 np->imrvalue_sv = np->imrvalue;
1610 np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
1611 np->imrvalue = 0;
1612 }
1613
1614 break;
1615 }
1616 } while (1);
1617
1618 /* read the tally counters */
1619 /* missed pkts */
1620 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
1621
1622 /* crc error */
1623 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1624
1625 if (debug)
1626 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1627 dev->name, ioread32(ioaddr + ISR));
1628
1629 iowrite32(np->imrvalue, ioaddr + IMR);
1630
1631 spin_unlock(&np->lock);
1632
1633 return IRQ_RETVAL(handled);
1634}
1635
1636
1637/* This routine is logically part of the interrupt handler, but separated
1638 for clarity and better register allocation. */
1639static int netdev_rx(struct net_device *dev)
1640{
1641 struct netdev_private *np = netdev_priv(dev);
1642 void __iomem *ioaddr = np->mem;
1643
1644 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1645 while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
1646 s32 rx_status = np->cur_rx->status;
1647
1648 if (np->really_rx_count == 0)
1649 break;
1650
1651 if (debug)
1652 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status);
1653
1654 if ((!((rx_status & RXFSD) && (rx_status & RXLSD)))
1655 || (rx_status & ErrorSummary)) {
1656 if (rx_status & ErrorSummary) { /* there was a fatal error */
1657 if (debug)
1658 printk(KERN_DEBUG
1659 "%s: Receive error, Rx status %8.8x.\n",
1660 dev->name, rx_status);
1661
1662 np->stats.rx_errors++; /* end of a packet. */
1663 if (rx_status & (LONG | RUNT))
1664 np->stats.rx_length_errors++;
1665 if (rx_status & RXER)
1666 np->stats.rx_frame_errors++;
1667 if (rx_status & CRC)
1668 np->stats.rx_crc_errors++;
1669 } else {
1670 int need_to_reset = 0;
1671 int desno = 0;
1672
1673 if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */
1674 struct fealnx_desc *cur;
1675
1676 /* check this packet is received completely? */
1677 cur = np->cur_rx;
1678 while (desno <= np->really_rx_count) {
1679 ++desno;
1680 if ((!(cur->status & RXOWN))
1681 && (cur->status & RXLSD))
1682 break;
1683 /* goto next rx descriptor */
1684 cur = cur->next_desc_logical;
1685 }
1686 if (desno > np->really_rx_count)
1687 need_to_reset = 1;
1688 } else /* RXLSD did not find, something error */
1689 need_to_reset = 1;
1690
1691 if (need_to_reset == 0) {
1692 int i;
1693
1694 np->stats.rx_length_errors++;
1695
1696 /* free all rx descriptors related this long pkt */
1697 for (i = 0; i < desno; ++i) {
1698 if (!np->cur_rx->skbuff) {
1699 printk(KERN_DEBUG
1700 "%s: I'm scared\n", dev->name);
1701 break;
1702 }
1703 np->cur_rx->status = RXOWN;
1704 np->cur_rx = np->cur_rx->next_desc_logical;
1705 }
1706 continue;
1707 } else { /* rx error, need to reset this chip */
1708 stop_nic_rx(ioaddr, np->crvalue);
1709 reset_rx_descriptors(dev);
1710 iowrite32(np->crvalue, ioaddr + TCRRCR);
1711 }
1712 break; /* exit the while loop */
1713 }
1714 } else { /* this received pkt is ok */
1715
1716 struct sk_buff *skb;
1717 /* Omit the four octet CRC from the length. */
1718 short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
1719
1720#ifndef final_version
1721 if (debug)
1722 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1723 " status %x.\n", pkt_len, rx_status);
1724#endif
1725
1726 /* Check if the packet is long enough to accept without copying
1727 to a minimally-sized skbuff. */
1728 if (pkt_len < rx_copybreak &&
1729 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1730 skb->dev = dev;
1731 skb_reserve(skb, 2); /* 16 byte align the IP header */
1732 pci_dma_sync_single_for_cpu(np->pci_dev,
1733 np->cur_rx->buffer,
1734 np->rx_buf_sz,
1735 PCI_DMA_FROMDEVICE);
1736 /* Call copy + cksum if available. */
1737
1738#if ! defined(__alpha__)
1739 eth_copy_and_sum(skb,
689be439 1740 np->cur_rx->skbuff->data, pkt_len, 0);
1da177e4
LT
1741 skb_put(skb, pkt_len);
1742#else
1743 memcpy(skb_put(skb, pkt_len),
689be439 1744 np->cur_rx->skbuff->data, pkt_len);
1da177e4
LT
1745#endif
1746 pci_dma_sync_single_for_device(np->pci_dev,
1747 np->cur_rx->buffer,
1748 np->rx_buf_sz,
1749 PCI_DMA_FROMDEVICE);
1750 } else {
1751 pci_unmap_single(np->pci_dev,
1752 np->cur_rx->buffer,
1753 np->rx_buf_sz,
1754 PCI_DMA_FROMDEVICE);
1755 skb_put(skb = np->cur_rx->skbuff, pkt_len);
1756 np->cur_rx->skbuff = NULL;
1757 --np->really_rx_count;
1758 }
1759 skb->protocol = eth_type_trans(skb, dev);
1760 netif_rx(skb);
1761 dev->last_rx = jiffies;
1762 np->stats.rx_packets++;
1763 np->stats.rx_bytes += pkt_len;
1764 }
1765
1766 np->cur_rx = np->cur_rx->next_desc_logical;
1767 } /* end of while loop */
1768
1769 /* allocate skb for rx buffers */
1770 allocate_rx_buffers(dev);
1771
1772 return 0;
1773}
1774
1775
1776static struct net_device_stats *get_stats(struct net_device *dev)
1777{
1778 struct netdev_private *np = netdev_priv(dev);
1779 void __iomem *ioaddr = np->mem;
1780
1781 /* The chip only need report frame silently dropped. */
1782 if (netif_running(dev)) {
1783 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
1784 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1785 }
1786
1787 return &np->stats;
1788}
1789
1790
1791/* for dev->set_multicast_list */
1792static void set_rx_mode(struct net_device *dev)
1793{
1794 spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock;
1795 unsigned long flags;
1796 spin_lock_irqsave(lp, flags);
1797 __set_rx_mode(dev);
1798 spin_unlock_irqrestore(lp, flags);
1799}
1800
1801
1802/* Take lock before calling */
1803static void __set_rx_mode(struct net_device *dev)
1804{
1805 struct netdev_private *np = netdev_priv(dev);
1806 void __iomem *ioaddr = np->mem;
1807 u32 mc_filter[2]; /* Multicast hash filter */
1808 u32 rx_mode;
1809
1810 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1811 /* Unconditionally log net taps. */
1812 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1813 memset(mc_filter, 0xff, sizeof(mc_filter));
1814 rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
1815 } else if ((dev->mc_count > multicast_filter_limit)
1816 || (dev->flags & IFF_ALLMULTI)) {
1817 /* Too many to match, or accept all multicasts. */
1818 memset(mc_filter, 0xff, sizeof(mc_filter));
1819 rx_mode = CR_W_AB | CR_W_AM;
1820 } else {
1821 struct dev_mc_list *mclist;
1822 int i;
1823
1824 memset(mc_filter, 0, sizeof(mc_filter));
1825 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1826 i++, mclist = mclist->next) {
1827 unsigned int bit;
1828 bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
1829 mc_filter[bit >> 5] |= (1 << bit);
1830 }
1831 rx_mode = CR_W_AB | CR_W_AM;
1832 }
1833
1834 stop_nic_rxtx(ioaddr, np->crvalue);
1835
1836 iowrite32(mc_filter[0], ioaddr + MAR0);
1837 iowrite32(mc_filter[1], ioaddr + MAR1);
1838 np->crvalue &= ~CR_W_RXMODEMASK;
1839 np->crvalue |= rx_mode;
1840 iowrite32(np->crvalue, ioaddr + TCRRCR);
1841}
1842
1843static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1844{
1845 struct netdev_private *np = netdev_priv(dev);
1846
1847 strcpy(info->driver, DRV_NAME);
1848 strcpy(info->version, DRV_VERSION);
1849 strcpy(info->bus_info, pci_name(np->pci_dev));
1850}
1851
1852static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1853{
1854 struct netdev_private *np = netdev_priv(dev);
1855 int rc;
1856
1857 spin_lock_irq(&np->lock);
1858 rc = mii_ethtool_gset(&np->mii, cmd);
1859 spin_unlock_irq(&np->lock);
1860
1861 return rc;
1862}
1863
1864static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1865{
1866 struct netdev_private *np = netdev_priv(dev);
1867 int rc;
1868
1869 spin_lock_irq(&np->lock);
1870 rc = mii_ethtool_sset(&np->mii, cmd);
1871 spin_unlock_irq(&np->lock);
1872
1873 return rc;
1874}
1875
1876static int netdev_nway_reset(struct net_device *dev)
1877{
1878 struct netdev_private *np = netdev_priv(dev);
1879 return mii_nway_restart(&np->mii);
1880}
1881
1882static u32 netdev_get_link(struct net_device *dev)
1883{
1884 struct netdev_private *np = netdev_priv(dev);
1885 return mii_link_ok(&np->mii);
1886}
1887
1888static u32 netdev_get_msglevel(struct net_device *dev)
1889{
1890 return debug;
1891}
1892
1893static void netdev_set_msglevel(struct net_device *dev, u32 value)
1894{
1895 debug = value;
1896}
1897
1898static struct ethtool_ops netdev_ethtool_ops = {
1899 .get_drvinfo = netdev_get_drvinfo,
1900 .get_settings = netdev_get_settings,
1901 .set_settings = netdev_set_settings,
1902 .nway_reset = netdev_nway_reset,
1903 .get_link = netdev_get_link,
1904 .get_msglevel = netdev_get_msglevel,
1905 .set_msglevel = netdev_set_msglevel,
1906 .get_sg = ethtool_op_get_sg,
1907 .get_tx_csum = ethtool_op_get_tx_csum,
1908};
1909
1910static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1911{
1912 struct netdev_private *np = netdev_priv(dev);
1913 int rc;
1914
1915 if (!netif_running(dev))
1916 return -EINVAL;
1917
1918 spin_lock_irq(&np->lock);
1919 rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
1920 spin_unlock_irq(&np->lock);
1921
1922 return rc;
1923}
1924
1925
1926static int netdev_close(struct net_device *dev)
1927{
1928 struct netdev_private *np = netdev_priv(dev);
1929 void __iomem *ioaddr = np->mem;
1930 int i;
1931
1932 netif_stop_queue(dev);
1933
1934 /* Disable interrupts by clearing the interrupt mask. */
1935 iowrite32(0x0000, ioaddr + IMR);
1936
1937 /* Stop the chip's Tx and Rx processes. */
1938 stop_nic_rxtx(ioaddr, 0);
1939
1940 del_timer_sync(&np->timer);
1941 del_timer_sync(&np->reset_timer);
1942
1943 free_irq(dev->irq, dev);
1944
1945 /* Free all the skbuffs in the Rx queue. */
1946 for (i = 0; i < RX_RING_SIZE; i++) {
1947 struct sk_buff *skb = np->rx_ring[i].skbuff;
1948
1949 np->rx_ring[i].status = 0;
1950 if (skb) {
1951 pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer,
1952 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1953 dev_kfree_skb(skb);
1954 np->rx_ring[i].skbuff = NULL;
1955 }
1956 }
1957
1958 for (i = 0; i < TX_RING_SIZE; i++) {
1959 struct sk_buff *skb = np->tx_ring[i].skbuff;
1960
1961 if (skb) {
1962 pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer,
1963 skb->len, PCI_DMA_TODEVICE);
1964 dev_kfree_skb(skb);
1965 np->tx_ring[i].skbuff = NULL;
1966 }
1967 }
1968
1969 return 0;
1970}
1971
1972static struct pci_device_id fealnx_pci_tbl[] = {
1973 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1974 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
1975 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
1976 {} /* terminate list */
1977};
1978MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
1979
1980
1981static struct pci_driver fealnx_driver = {
1982 .name = "fealnx",
1983 .id_table = fealnx_pci_tbl,
1984 .probe = fealnx_init_one,
1985 .remove = __devexit_p(fealnx_remove_one),
1986};
1987
1988static int __init fealnx_init(void)
1989{
1990/* when a module, this is printed whether or not devices are found in probe */
1991#ifdef MODULE
1992 printk(version);
1993#endif
1994
1995 return pci_module_init(&fealnx_driver);
1996}
1997
1998static void __exit fealnx_exit(void)
1999{
2000 pci_unregister_driver(&fealnx_driver);
2001}
2002
2003module_init(fealnx_init);
2004module_exit(fealnx_exit);