]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tulip/de2104x.c
[PATCH] sky2: set lower pause threshold to prevent overrun
[net-next-2.6.git] / drivers / net / tulip / de2104x.c
CommitLineData
1da177e4
LT
1/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2/*
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
7
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
14
15 See the file COPYING in this distribution for more information.
16
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
26 Tx descriptor bit
27
28 */
29
30#define DRV_NAME "de2104x"
31#define DRV_VERSION "0.7"
32#define DRV_RELDATE "Mar 17, 2004"
33
1da177e4
LT
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/init.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/ethtool.h>
42#include <linux/compiler.h>
43#include <linux/rtnetlink.h>
44#include <linux/crc32.h>
45
46#include <asm/io.h>
47#include <asm/irq.h>
48#include <asm/uaccess.h>
49#include <asm/unaligned.h>
50
51/* These identify the driver base version and may not be removed. */
52static char version[] =
53KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
54
55MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
56MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
57MODULE_LICENSE("GPL");
58MODULE_VERSION(DRV_VERSION);
59
60static int debug = -1;
61module_param (debug, int, 0);
62MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
63
64/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
65#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
66 || defined(__sparc_) || defined(__ia64__) \
67 || defined(__sh__) || defined(__mips__)
68static int rx_copybreak = 1518;
69#else
70static int rx_copybreak = 100;
71#endif
72module_param (rx_copybreak, int, 0);
73MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
74
75#define PFX DRV_NAME ": "
76
77#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
78 NETIF_MSG_PROBE | \
79 NETIF_MSG_LINK | \
80 NETIF_MSG_IFDOWN | \
81 NETIF_MSG_IFUP | \
82 NETIF_MSG_RX_ERR | \
83 NETIF_MSG_TX_ERR)
84
85#define DE_RX_RING_SIZE 64
86#define DE_TX_RING_SIZE 64
87#define DE_RING_BYTES \
88 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
89 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
90#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
91#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
92#define TX_BUFFS_AVAIL(CP) \
93 (((CP)->tx_tail <= (CP)->tx_head) ? \
94 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
95 (CP)->tx_tail - (CP)->tx_head - 1)
96
97#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
98#define RX_OFFSET 2
99
100#define DE_SETUP_SKB ((struct sk_buff *) 1)
101#define DE_DUMMY_SKB ((struct sk_buff *) 2)
102#define DE_SETUP_FRAME_WORDS 96
103#define DE_EEPROM_WORDS 256
104#define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
105#define DE_MAX_MEDIA 5
106
107#define DE_MEDIA_TP_AUTO 0
108#define DE_MEDIA_BNC 1
109#define DE_MEDIA_AUI 2
110#define DE_MEDIA_TP 3
111#define DE_MEDIA_TP_FD 4
112#define DE_MEDIA_INVALID DE_MAX_MEDIA
113#define DE_MEDIA_FIRST 0
114#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
115#define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
116
117#define DE_TIMER_LINK (60 * HZ)
118#define DE_TIMER_NO_LINK (5 * HZ)
119
120#define DE_NUM_REGS 16
121#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
122#define DE_REGS_VER 1
123
124/* Time in jiffies before concluding the transmitter is hung. */
125#define TX_TIMEOUT (6*HZ)
126
127#define DE_UNALIGNED_16(a) (u16)(get_unaligned((u16 *)(a)))
128
129/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
130 to support a pre-NWay full-duplex signaling mechanism using short frames.
131 No one knows what it should be, but if left at its default value some
132 10base2(!) packets trigger a full-duplex-request interrupt. */
133#define FULL_DUPLEX_MAGIC 0x6969
134
135enum {
136 /* NIC registers */
137 BusMode = 0x00,
138 TxPoll = 0x08,
139 RxPoll = 0x10,
140 RxRingAddr = 0x18,
141 TxRingAddr = 0x20,
142 MacStatus = 0x28,
143 MacMode = 0x30,
144 IntrMask = 0x38,
145 RxMissed = 0x40,
146 ROMCmd = 0x48,
147 CSR11 = 0x58,
148 SIAStatus = 0x60,
149 CSR13 = 0x68,
150 CSR14 = 0x70,
151 CSR15 = 0x78,
152 PCIPM = 0x40,
153
154 /* BusMode bits */
155 CmdReset = (1 << 0),
156 CacheAlign16 = 0x00008000,
157 BurstLen4 = 0x00000400,
158
159 /* Rx/TxPoll bits */
160 NormalTxPoll = (1 << 0),
161 NormalRxPoll = (1 << 0),
162
163 /* Tx/Rx descriptor status bits */
164 DescOwn = (1 << 31),
165 RxError = (1 << 15),
166 RxErrLong = (1 << 7),
167 RxErrCRC = (1 << 1),
168 RxErrFIFO = (1 << 0),
169 RxErrRunt = (1 << 11),
170 RxErrFrame = (1 << 14),
171 RingEnd = (1 << 25),
172 FirstFrag = (1 << 29),
173 LastFrag = (1 << 30),
174 TxError = (1 << 15),
175 TxFIFOUnder = (1 << 1),
176 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
177 TxMaxCol = (1 << 8),
178 TxOWC = (1 << 9),
179 TxJabber = (1 << 14),
180 SetupFrame = (1 << 27),
181 TxSwInt = (1 << 31),
182
183 /* MacStatus bits */
184 IntrOK = (1 << 16),
185 IntrErr = (1 << 15),
186 RxIntr = (1 << 6),
187 RxEmpty = (1 << 7),
188 TxIntr = (1 << 0),
189 TxEmpty = (1 << 2),
190 PciErr = (1 << 13),
191 TxState = (1 << 22) | (1 << 21) | (1 << 20),
192 RxState = (1 << 19) | (1 << 18) | (1 << 17),
193 LinkFail = (1 << 12),
194 LinkPass = (1 << 4),
195 RxStopped = (1 << 8),
196 TxStopped = (1 << 1),
197
198 /* MacMode bits */
199 TxEnable = (1 << 13),
200 RxEnable = (1 << 1),
201 RxTx = TxEnable | RxEnable,
202 FullDuplex = (1 << 9),
203 AcceptAllMulticast = (1 << 7),
204 AcceptAllPhys = (1 << 6),
205 BOCnt = (1 << 5),
206 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
207 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
208
209 /* ROMCmd bits */
210 EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
211 EE_CS = 0x01, /* EEPROM chip select. */
212 EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
213 EE_WRITE_0 = 0x01,
214 EE_WRITE_1 = 0x05,
215 EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
216 EE_ENB = (0x4800 | EE_CS),
217
218 /* The EEPROM commands include the alway-set leading bit. */
219 EE_READ_CMD = 6,
220
221 /* RxMissed bits */
222 RxMissedOver = (1 << 16),
223 RxMissedMask = 0xffff,
224
225 /* SROM-related bits */
226 SROMC0InfoLeaf = 27,
227 MediaBlockMask = 0x3f,
228 MediaCustomCSRs = (1 << 6),
f3b197ac 229
1da177e4
LT
230 /* PCIPM bits */
231 PM_Sleep = (1 << 31),
232 PM_Snooze = (1 << 30),
233 PM_Mask = PM_Sleep | PM_Snooze,
f3b197ac 234
1da177e4
LT
235 /* SIAStatus bits */
236 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
237 NWayRestart = (1 << 12),
238 NonselPortActive = (1 << 9),
239 LinkFailStatus = (1 << 2),
240 NetCxnErr = (1 << 1),
241};
242
243static const u32 de_intr_mask =
244 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
245 LinkPass | LinkFail | PciErr;
246
247/*
248 * Set the programmable burst length to 4 longwords for all:
249 * DMA errors result without these values. Cache align 16 long.
250 */
251static const u32 de_bus_mode = CacheAlign16 | BurstLen4;
252
253struct de_srom_media_block {
254 u8 opts;
255 u16 csr13;
256 u16 csr14;
257 u16 csr15;
258} __attribute__((packed));
259
260struct de_srom_info_leaf {
261 u16 default_media;
262 u8 n_blocks;
263 u8 unused;
264} __attribute__((packed));
265
266struct de_desc {
267 u32 opts1;
268 u32 opts2;
269 u32 addr1;
270 u32 addr2;
271};
272
273struct media_info {
274 u16 type; /* DE_MEDIA_xxx */
275 u16 csr13;
276 u16 csr14;
277 u16 csr15;
278};
279
280struct ring_info {
281 struct sk_buff *skb;
282 dma_addr_t mapping;
283};
284
285struct de_private {
286 unsigned tx_head;
287 unsigned tx_tail;
288 unsigned rx_tail;
289
290 void __iomem *regs;
291 struct net_device *dev;
292 spinlock_t lock;
293
294 struct de_desc *rx_ring;
295 struct de_desc *tx_ring;
296 struct ring_info tx_skb[DE_TX_RING_SIZE];
297 struct ring_info rx_skb[DE_RX_RING_SIZE];
298 unsigned rx_buf_sz;
299 dma_addr_t ring_dma;
300
301 u32 msg_enable;
302
303 struct net_device_stats net_stats;
304
305 struct pci_dev *pdev;
306
307 u16 setup_frame[DE_SETUP_FRAME_WORDS];
308
309 u32 media_type;
310 u32 media_supported;
311 u32 media_advertise;
312 struct media_info media[DE_MAX_MEDIA];
313 struct timer_list media_timer;
314
315 u8 *ee_data;
316 unsigned board_idx;
317 unsigned de21040 : 1;
318 unsigned media_lock : 1;
319};
320
321
322static void de_set_rx_mode (struct net_device *dev);
323static void de_tx (struct de_private *de);
324static void de_clean_rings (struct de_private *de);
325static void de_media_interrupt (struct de_private *de, u32 status);
326static void de21040_media_timer (unsigned long data);
327static void de21041_media_timer (unsigned long data);
328static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
329
330
331static struct pci_device_id de_pci_tbl[] = {
332 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
333 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
334 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
335 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
336 { },
337};
338MODULE_DEVICE_TABLE(pci, de_pci_tbl);
339
340static const char * const media_name[DE_MAX_MEDIA] = {
341 "10baseT auto",
342 "BNC",
343 "AUI",
344 "10baseT-HD",
345 "10baseT-FD"
346};
347
348/* 21040 transceiver register settings:
349 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
350static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
351static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
352static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
353
354/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
355static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
356static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
357static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
358
359
360#define dr32(reg) readl(de->regs + (reg))
361#define dw32(reg,val) writel((val), de->regs + (reg))
362
363
364static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
365 u32 status, u32 len)
366{
367 if (netif_msg_rx_err (de))
368 printk (KERN_DEBUG
369 "%s: rx err, slot %d status 0x%x len %d\n",
370 de->dev->name, rx_tail, status, len);
371
372 if ((status & 0x38000300) != 0x0300) {
373 /* Ingore earlier buffers. */
374 if ((status & 0xffff) != 0x7fff) {
375 if (netif_msg_rx_err(de))
376 printk(KERN_WARNING "%s: Oversized Ethernet frame "
377 "spanned multiple buffers, status %8.8x!\n",
378 de->dev->name, status);
379 de->net_stats.rx_length_errors++;
380 }
381 } else if (status & RxError) {
382 /* There was a fatal error. */
383 de->net_stats.rx_errors++; /* end of a packet.*/
384 if (status & 0x0890) de->net_stats.rx_length_errors++;
385 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
386 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
387 }
388}
389
390static void de_rx (struct de_private *de)
391{
392 unsigned rx_tail = de->rx_tail;
393 unsigned rx_work = DE_RX_RING_SIZE;
394 unsigned drop = 0;
395 int rc;
396
397 while (rx_work--) {
398 u32 status, len;
399 dma_addr_t mapping;
400 struct sk_buff *skb, *copy_skb;
401 unsigned copying_skb, buflen;
402
403 skb = de->rx_skb[rx_tail].skb;
7e0b58f3 404 BUG_ON(!skb);
1da177e4
LT
405 rmb();
406 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
407 if (status & DescOwn)
408 break;
409
410 len = ((status >> 16) & 0x7ff) - 4;
411 mapping = de->rx_skb[rx_tail].mapping;
412
413 if (unlikely(drop)) {
414 de->net_stats.rx_dropped++;
415 goto rx_next;
416 }
417
418 if (unlikely((status & 0x38008300) != 0x0300)) {
419 de_rx_err_acct(de, rx_tail, status, len);
420 goto rx_next;
421 }
422
423 copying_skb = (len <= rx_copybreak);
424
425 if (unlikely(netif_msg_rx_status(de)))
426 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
427 de->dev->name, rx_tail, status, len,
428 copying_skb);
429
430 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
431 copy_skb = dev_alloc_skb (buflen);
432 if (unlikely(!copy_skb)) {
433 de->net_stats.rx_dropped++;
434 drop = 1;
435 rx_work = 100;
436 goto rx_next;
437 }
438 copy_skb->dev = de->dev;
439
440 if (!copying_skb) {
441 pci_unmap_single(de->pdev, mapping,
442 buflen, PCI_DMA_FROMDEVICE);
443 skb_put(skb, len);
444
445 mapping =
446 de->rx_skb[rx_tail].mapping =
689be439 447 pci_map_single(de->pdev, copy_skb->data,
1da177e4
LT
448 buflen, PCI_DMA_FROMDEVICE);
449 de->rx_skb[rx_tail].skb = copy_skb;
450 } else {
451 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
452 skb_reserve(copy_skb, RX_OFFSET);
689be439 453 memcpy(skb_put(copy_skb, len), skb->data, len);
1da177e4
LT
454
455 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
456
457 /* We'll reuse the original ring buffer. */
458 skb = copy_skb;
459 }
460
461 skb->protocol = eth_type_trans (skb, de->dev);
462
463 de->net_stats.rx_packets++;
464 de->net_stats.rx_bytes += skb->len;
465 de->dev->last_rx = jiffies;
466 rc = netif_rx (skb);
467 if (rc == NET_RX_DROP)
468 drop = 1;
469
470rx_next:
471 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
472 if (rx_tail == (DE_RX_RING_SIZE - 1))
473 de->rx_ring[rx_tail].opts2 =
474 cpu_to_le32(RingEnd | de->rx_buf_sz);
475 else
476 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
477 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
478 rx_tail = NEXT_RX(rx_tail);
479 }
480
481 if (!rx_work)
482 printk(KERN_WARNING "%s: rx work limit reached\n", de->dev->name);
483
484 de->rx_tail = rx_tail;
485}
486
7d12e780 487static irqreturn_t de_interrupt (int irq, void *dev_instance)
1da177e4
LT
488{
489 struct net_device *dev = dev_instance;
490 struct de_private *de = dev->priv;
491 u32 status;
492
493 status = dr32(MacStatus);
494 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
495 return IRQ_NONE;
496
497 if (netif_msg_intr(de))
498 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
499 dev->name, status, dr32(MacMode), de->rx_tail, de->tx_head, de->tx_tail);
500
501 dw32(MacStatus, status);
502
503 if (status & (RxIntr | RxEmpty)) {
504 de_rx(de);
505 if (status & RxEmpty)
506 dw32(RxPoll, NormalRxPoll);
507 }
508
509 spin_lock(&de->lock);
510
511 if (status & (TxIntr | TxEmpty))
512 de_tx(de);
513
514 if (status & (LinkPass | LinkFail))
515 de_media_interrupt(de, status);
516
517 spin_unlock(&de->lock);
518
519 if (status & PciErr) {
520 u16 pci_status;
521
522 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
523 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
524 printk(KERN_ERR "%s: PCI bus error, status=%08x, PCI status=%04x\n",
525 dev->name, status, pci_status);
526 }
527
528 return IRQ_HANDLED;
529}
530
531static void de_tx (struct de_private *de)
532{
533 unsigned tx_head = de->tx_head;
534 unsigned tx_tail = de->tx_tail;
535
536 while (tx_tail != tx_head) {
537 struct sk_buff *skb;
538 u32 status;
539
540 rmb();
541 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
542 if (status & DescOwn)
543 break;
544
545 skb = de->tx_skb[tx_tail].skb;
7e0b58f3 546 BUG_ON(!skb);
1da177e4
LT
547 if (unlikely(skb == DE_DUMMY_SKB))
548 goto next;
549
550 if (unlikely(skb == DE_SETUP_SKB)) {
551 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
552 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
553 goto next;
554 }
555
556 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
557 skb->len, PCI_DMA_TODEVICE);
558
559 if (status & LastFrag) {
560 if (status & TxError) {
561 if (netif_msg_tx_err(de))
562 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
563 de->dev->name, status);
564 de->net_stats.tx_errors++;
565 if (status & TxOWC)
566 de->net_stats.tx_window_errors++;
567 if (status & TxMaxCol)
568 de->net_stats.tx_aborted_errors++;
569 if (status & TxLinkFail)
570 de->net_stats.tx_carrier_errors++;
571 if (status & TxFIFOUnder)
572 de->net_stats.tx_fifo_errors++;
573 } else {
574 de->net_stats.tx_packets++;
575 de->net_stats.tx_bytes += skb->len;
576 if (netif_msg_tx_done(de))
577 printk(KERN_DEBUG "%s: tx done, slot %d\n", de->dev->name, tx_tail);
578 }
579 dev_kfree_skb_irq(skb);
580 }
581
582next:
583 de->tx_skb[tx_tail].skb = NULL;
584
585 tx_tail = NEXT_TX(tx_tail);
586 }
587
588 de->tx_tail = tx_tail;
589
590 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
591 netif_wake_queue(de->dev);
592}
593
594static int de_start_xmit (struct sk_buff *skb, struct net_device *dev)
595{
596 struct de_private *de = dev->priv;
597 unsigned int entry, tx_free;
598 u32 mapping, len, flags = FirstFrag | LastFrag;
599 struct de_desc *txd;
600
601 spin_lock_irq(&de->lock);
602
603 tx_free = TX_BUFFS_AVAIL(de);
604 if (tx_free == 0) {
605 netif_stop_queue(dev);
606 spin_unlock_irq(&de->lock);
607 return 1;
608 }
609 tx_free--;
610
611 entry = de->tx_head;
612
613 txd = &de->tx_ring[entry];
614
615 len = skb->len;
616 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
617 if (entry == (DE_TX_RING_SIZE - 1))
618 flags |= RingEnd;
619 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
620 flags |= TxSwInt;
621 flags |= len;
622 txd->opts2 = cpu_to_le32(flags);
623 txd->addr1 = cpu_to_le32(mapping);
624
625 de->tx_skb[entry].skb = skb;
626 de->tx_skb[entry].mapping = mapping;
627 wmb();
628
629 txd->opts1 = cpu_to_le32(DescOwn);
630 wmb();
631
632 de->tx_head = NEXT_TX(entry);
633 if (netif_msg_tx_queued(de))
634 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
635 dev->name, entry, skb->len);
636
637 if (tx_free == 0)
638 netif_stop_queue(dev);
639
640 spin_unlock_irq(&de->lock);
641
642 /* Trigger an immediate transmit demand. */
643 dw32(TxPoll, NormalTxPoll);
644 dev->trans_start = jiffies;
645
646 return 0;
647}
648
649/* Set or clear the multicast filter for this adaptor.
650 Note that we only use exclusion around actually queueing the
651 new frame, not around filling de->setup_frame. This is non-deterministic
652 when re-entered but still correct. */
653
654#undef set_bit_le
655#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
656
657static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
658{
659 struct de_private *de = dev->priv;
660 u16 hash_table[32];
661 struct dev_mc_list *mclist;
662 int i;
663 u16 *eaddrs;
664
665 memset(hash_table, 0, sizeof(hash_table));
666 set_bit_le(255, hash_table); /* Broadcast entry */
667 /* This should work on big-endian machines as well. */
668 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
669 i++, mclist = mclist->next) {
670 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
671
672 set_bit_le(index, hash_table);
673
674 for (i = 0; i < 32; i++) {
675 *setup_frm++ = hash_table[i];
676 *setup_frm++ = hash_table[i];
677 }
678 setup_frm = &de->setup_frame[13*6];
679 }
680
681 /* Fill the final entry with our physical address. */
682 eaddrs = (u16 *)dev->dev_addr;
683 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
684 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
685 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
686}
687
688static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
689{
690 struct de_private *de = dev->priv;
691 struct dev_mc_list *mclist;
692 int i;
693 u16 *eaddrs;
694
695 /* We have <= 14 addresses so we can use the wonderful
696 16 address perfect filtering of the Tulip. */
697 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
698 i++, mclist = mclist->next) {
699 eaddrs = (u16 *)mclist->dmi_addr;
700 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
701 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
702 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
703 }
704 /* Fill the unused entries with the broadcast address. */
705 memset(setup_frm, 0xff, (15-i)*12);
706 setup_frm = &de->setup_frame[15*6];
707
708 /* Fill the final entry with our physical address. */
709 eaddrs = (u16 *)dev->dev_addr;
710 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
711 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
712 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
713}
714
715
716static void __de_set_rx_mode (struct net_device *dev)
717{
718 struct de_private *de = dev->priv;
719 u32 macmode;
720 unsigned int entry;
721 u32 mapping;
722 struct de_desc *txd;
723 struct de_desc *dummy_txd = NULL;
724
725 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
726
727 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
728 macmode |= AcceptAllMulticast | AcceptAllPhys;
729 goto out;
730 }
731
732 if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
733 /* Too many to filter well -- accept all multicasts. */
734 macmode |= AcceptAllMulticast;
735 goto out;
736 }
737
738 /* Note that only the low-address shortword of setup_frame is valid!
739 The values are doubled for big-endian architectures. */
740 if (dev->mc_count > 14) /* Must use a multicast hash table. */
741 build_setup_frame_hash (de->setup_frame, dev);
742 else
743 build_setup_frame_perfect (de->setup_frame, dev);
744
745 /*
746 * Now add this frame to the Tx list.
747 */
748
749 entry = de->tx_head;
750
751 /* Avoid a chip errata by prefixing a dummy entry. */
752 if (entry != 0) {
753 de->tx_skb[entry].skb = DE_DUMMY_SKB;
754
755 dummy_txd = &de->tx_ring[entry];
756 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
757 cpu_to_le32(RingEnd) : 0;
758 dummy_txd->addr1 = 0;
759
760 /* Must set DescOwned later to avoid race with chip */
761
762 entry = NEXT_TX(entry);
763 }
764
765 de->tx_skb[entry].skb = DE_SETUP_SKB;
766 de->tx_skb[entry].mapping = mapping =
767 pci_map_single (de->pdev, de->setup_frame,
768 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
769
770 /* Put the setup frame on the Tx list. */
771 txd = &de->tx_ring[entry];
772 if (entry == (DE_TX_RING_SIZE - 1))
773 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
774 else
775 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
776 txd->addr1 = cpu_to_le32(mapping);
777 wmb();
778
779 txd->opts1 = cpu_to_le32(DescOwn);
780 wmb();
781
782 if (dummy_txd) {
783 dummy_txd->opts1 = cpu_to_le32(DescOwn);
784 wmb();
785 }
786
787 de->tx_head = NEXT_TX(entry);
788
7e0b58f3 789 BUG_ON(TX_BUFFS_AVAIL(de) < 0);
1da177e4
LT
790 if (TX_BUFFS_AVAIL(de) == 0)
791 netif_stop_queue(dev);
792
793 /* Trigger an immediate transmit demand. */
794 dw32(TxPoll, NormalTxPoll);
795
796out:
797 if (macmode != dr32(MacMode))
798 dw32(MacMode, macmode);
799}
800
801static void de_set_rx_mode (struct net_device *dev)
802{
803 unsigned long flags;
804 struct de_private *de = dev->priv;
805
806 spin_lock_irqsave (&de->lock, flags);
807 __de_set_rx_mode(dev);
808 spin_unlock_irqrestore (&de->lock, flags);
809}
810
811static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
812{
813 if (unlikely(rx_missed & RxMissedOver))
814 de->net_stats.rx_missed_errors += RxMissedMask;
815 else
816 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
817}
818
819static void __de_get_stats(struct de_private *de)
820{
821 u32 tmp = dr32(RxMissed); /* self-clearing */
822
823 de_rx_missed(de, tmp);
824}
825
826static struct net_device_stats *de_get_stats(struct net_device *dev)
827{
828 struct de_private *de = dev->priv;
829
830 /* The chip only need report frame silently dropped. */
831 spin_lock_irq(&de->lock);
832 if (netif_running(dev) && netif_device_present(dev))
833 __de_get_stats(de);
834 spin_unlock_irq(&de->lock);
835
836 return &de->net_stats;
837}
838
839static inline int de_is_running (struct de_private *de)
840{
841 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
842}
843
844static void de_stop_rxtx (struct de_private *de)
845{
846 u32 macmode;
847 unsigned int work = 1000;
848
849 macmode = dr32(MacMode);
850 if (macmode & RxTx) {
851 dw32(MacMode, macmode & ~RxTx);
852 dr32(MacMode);
853 }
854
855 while (--work > 0) {
856 if (!de_is_running(de))
857 return;
858 cpu_relax();
859 }
f3b197ac 860
1da177e4
LT
861 printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
862}
863
864static inline void de_start_rxtx (struct de_private *de)
865{
866 u32 macmode;
867
868 macmode = dr32(MacMode);
869 if ((macmode & RxTx) != RxTx) {
870 dw32(MacMode, macmode | RxTx);
871 dr32(MacMode);
872 }
873}
874
875static void de_stop_hw (struct de_private *de)
876{
877
878 udelay(5);
879 dw32(IntrMask, 0);
880
881 de_stop_rxtx(de);
882
883 dw32(MacStatus, dr32(MacStatus));
884
885 udelay(10);
886
887 de->rx_tail = 0;
888 de->tx_head = de->tx_tail = 0;
889}
890
891static void de_link_up(struct de_private *de)
892{
893 if (!netif_carrier_ok(de->dev)) {
894 netif_carrier_on(de->dev);
895 if (netif_msg_link(de))
896 printk(KERN_INFO "%s: link up, media %s\n",
897 de->dev->name, media_name[de->media_type]);
898 }
899}
900
901static void de_link_down(struct de_private *de)
902{
903 if (netif_carrier_ok(de->dev)) {
904 netif_carrier_off(de->dev);
905 if (netif_msg_link(de))
906 printk(KERN_INFO "%s: link down\n", de->dev->name);
907 }
908}
909
910static void de_set_media (struct de_private *de)
911{
912 unsigned media = de->media_type;
913 u32 macmode = dr32(MacMode);
914
7e0b58f3 915 BUG_ON(de_is_running(de));
1da177e4
LT
916
917 if (de->de21040)
918 dw32(CSR11, FULL_DUPLEX_MAGIC);
919 dw32(CSR13, 0); /* Reset phy */
920 dw32(CSR14, de->media[media].csr14);
921 dw32(CSR15, de->media[media].csr15);
922 dw32(CSR13, de->media[media].csr13);
923
924 /* must delay 10ms before writing to other registers,
925 * especially CSR6
926 */
927 mdelay(10);
928
929 if (media == DE_MEDIA_TP_FD)
930 macmode |= FullDuplex;
931 else
932 macmode &= ~FullDuplex;
f3b197ac 933
1da177e4
LT
934 if (netif_msg_link(de)) {
935 printk(KERN_INFO "%s: set link %s\n"
936 KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
937 KERN_INFO "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
938 de->dev->name, media_name[media],
939 de->dev->name, dr32(MacMode), dr32(SIAStatus),
940 dr32(CSR13), dr32(CSR14), dr32(CSR15),
941 de->dev->name, macmode, de->media[media].csr13,
942 de->media[media].csr14, de->media[media].csr15);
943 }
944 if (macmode != dr32(MacMode))
945 dw32(MacMode, macmode);
946}
947
948static void de_next_media (struct de_private *de, u32 *media,
949 unsigned int n_media)
950{
951 unsigned int i;
952
953 for (i = 0; i < n_media; i++) {
954 if (de_ok_to_advertise(de, media[i])) {
955 de->media_type = media[i];
956 return;
957 }
958 }
959}
960
961static void de21040_media_timer (unsigned long data)
962{
963 struct de_private *de = (struct de_private *) data;
964 struct net_device *dev = de->dev;
965 u32 status = dr32(SIAStatus);
966 unsigned int carrier;
967 unsigned long flags;
f3b197ac 968
1da177e4 969 carrier = (status & NetCxnErr) ? 0 : 1;
f3b197ac 970
1da177e4
LT
971 if (carrier) {
972 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
973 goto no_link_yet;
974
975 de->media_timer.expires = jiffies + DE_TIMER_LINK;
976 add_timer(&de->media_timer);
977 if (!netif_carrier_ok(dev))
978 de_link_up(de);
979 else
980 if (netif_msg_timer(de))
981 printk(KERN_INFO "%s: %s link ok, status %x\n",
982 dev->name, media_name[de->media_type],
983 status);
984 return;
985 }
986
f3b197ac 987 de_link_down(de);
1da177e4
LT
988
989 if (de->media_lock)
990 return;
991
992 if (de->media_type == DE_MEDIA_AUI) {
993 u32 next_state = DE_MEDIA_TP;
994 de_next_media(de, &next_state, 1);
995 } else {
996 u32 next_state = DE_MEDIA_AUI;
997 de_next_media(de, &next_state, 1);
998 }
999
1000 spin_lock_irqsave(&de->lock, flags);
1001 de_stop_rxtx(de);
1002 spin_unlock_irqrestore(&de->lock, flags);
1003 de_set_media(de);
1004 de_start_rxtx(de);
1005
1006no_link_yet:
1007 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1008 add_timer(&de->media_timer);
1009
1010 if (netif_msg_timer(de))
1011 printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
1012 dev->name, media_name[de->media_type], status);
1013}
1014
1015static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1016{
1017 switch (new_media) {
1018 case DE_MEDIA_TP_AUTO:
1019 if (!(de->media_advertise & ADVERTISED_Autoneg))
1020 return 0;
1021 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1022 return 0;
1023 break;
1024 case DE_MEDIA_BNC:
1025 if (!(de->media_advertise & ADVERTISED_BNC))
1026 return 0;
1027 break;
1028 case DE_MEDIA_AUI:
1029 if (!(de->media_advertise & ADVERTISED_AUI))
1030 return 0;
1031 break;
1032 case DE_MEDIA_TP:
1033 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1034 return 0;
1035 break;
1036 case DE_MEDIA_TP_FD:
1037 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1038 return 0;
1039 break;
1040 }
f3b197ac 1041
1da177e4
LT
1042 return 1;
1043}
1044
1045static void de21041_media_timer (unsigned long data)
1046{
1047 struct de_private *de = (struct de_private *) data;
1048 struct net_device *dev = de->dev;
1049 u32 status = dr32(SIAStatus);
1050 unsigned int carrier;
1051 unsigned long flags;
f3b197ac 1052
1da177e4 1053 carrier = (status & NetCxnErr) ? 0 : 1;
f3b197ac 1054
1da177e4
LT
1055 if (carrier) {
1056 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1057 de->media_type == DE_MEDIA_TP ||
1058 de->media_type == DE_MEDIA_TP_FD) &&
1059 (status & LinkFailStatus))
1060 goto no_link_yet;
1061
1062 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1063 add_timer(&de->media_timer);
1064 if (!netif_carrier_ok(dev))
1065 de_link_up(de);
1066 else
1067 if (netif_msg_timer(de))
1068 printk(KERN_INFO "%s: %s link ok, mode %x status %x\n",
1069 dev->name, media_name[de->media_type],
1070 dr32(MacMode), status);
1071 return;
1072 }
1073
f3b197ac 1074 de_link_down(de);
1da177e4
LT
1075
1076 /* if media type locked, don't switch media */
1077 if (de->media_lock)
1078 goto set_media;
1079
1080 /* if activity detected, use that as hint for new media type */
1081 if (status & NonselPortActive) {
1082 unsigned int have_media = 1;
1083
1084 /* if AUI/BNC selected, then activity is on TP port */
1085 if (de->media_type == DE_MEDIA_AUI ||
1086 de->media_type == DE_MEDIA_BNC) {
1087 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1088 de->media_type = DE_MEDIA_TP_AUTO;
1089 else
1090 have_media = 0;
1091 }
1092
1093 /* TP selected. If there is only TP and BNC, then it's BNC */
1094 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1095 de_ok_to_advertise(de, DE_MEDIA_BNC))
1096 de->media_type = DE_MEDIA_BNC;
1097
1098 /* TP selected. If there is only TP and AUI, then it's AUI */
1099 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1100 de_ok_to_advertise(de, DE_MEDIA_AUI))
1101 de->media_type = DE_MEDIA_AUI;
1102
1103 /* otherwise, ignore the hint */
1104 else
1105 have_media = 0;
1106
1107 if (have_media)
1108 goto set_media;
1109 }
1110
1111 /*
1112 * Absent or ambiguous activity hint, move to next advertised
1113 * media state. If de->media_type is left unchanged, this
1114 * simply resets the PHY and reloads the current media settings.
1115 */
1116 if (de->media_type == DE_MEDIA_AUI) {
1117 u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1118 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1119 } else if (de->media_type == DE_MEDIA_BNC) {
1120 u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
1121 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1122 } else {
1123 u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1124 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1125 }
f3b197ac 1126
1da177e4
LT
1127set_media:
1128 spin_lock_irqsave(&de->lock, flags);
1129 de_stop_rxtx(de);
1130 spin_unlock_irqrestore(&de->lock, flags);
1131 de_set_media(de);
1132 de_start_rxtx(de);
1133
1134no_link_yet:
1135 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1136 add_timer(&de->media_timer);
1137
1138 if (netif_msg_timer(de))
1139 printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
1140 dev->name, media_name[de->media_type], status);
1141}
1142
1143static void de_media_interrupt (struct de_private *de, u32 status)
1144{
1145 if (status & LinkPass) {
1146 de_link_up(de);
1147 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1148 return;
1149 }
f3b197ac 1150
7e0b58f3 1151 BUG_ON(!(status & LinkFail));
1da177e4
LT
1152
1153 if (netif_carrier_ok(de->dev)) {
1154 de_link_down(de);
1155 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1156 }
1157}
1158
1159static int de_reset_mac (struct de_private *de)
1160{
1161 u32 status, tmp;
1162
1163 /*
1164 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1165 * in this area.
1166 */
1167
1168 if (dr32(BusMode) == 0xffffffff)
1169 return -EBUSY;
1170
1171 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1172 dw32 (BusMode, CmdReset);
1173 mdelay (1);
1174
1175 dw32 (BusMode, de_bus_mode);
1176 mdelay (1);
1177
1178 for (tmp = 0; tmp < 5; tmp++) {
1179 dr32 (BusMode);
1180 mdelay (1);
1181 }
1182
1183 mdelay (1);
1184
1185 status = dr32(MacStatus);
1186 if (status & (RxState | TxState))
1187 return -EBUSY;
1188 if (status == 0xffffffff)
1189 return -ENODEV;
1190 return 0;
1191}
1192
1193static void de_adapter_wake (struct de_private *de)
1194{
1195 u32 pmctl;
1196
1197 if (de->de21040)
1198 return;
1199
1200 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1201 if (pmctl & PM_Mask) {
1202 pmctl &= ~PM_Mask;
1203 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1204
1205 /* de4x5.c delays, so we do too */
1206 msleep(10);
1207 }
1208}
1209
1210static void de_adapter_sleep (struct de_private *de)
1211{
1212 u32 pmctl;
1213
1214 if (de->de21040)
1215 return;
1216
1217 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1218 pmctl |= PM_Sleep;
1219 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1220}
1221
1222static int de_init_hw (struct de_private *de)
1223{
1224 struct net_device *dev = de->dev;
1225 u32 macmode;
1226 int rc;
1227
1228 de_adapter_wake(de);
f3b197ac 1229
1da177e4
LT
1230 macmode = dr32(MacMode) & ~MacModeClear;
1231
1232 rc = de_reset_mac(de);
1233 if (rc)
1234 return rc;
1235
1236 de_set_media(de); /* reset phy */
1237
1238 dw32(RxRingAddr, de->ring_dma);
1239 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1240
1241 dw32(MacMode, RxTx | macmode);
1242
1243 dr32(RxMissed); /* self-clearing */
1244
1245 dw32(IntrMask, de_intr_mask);
1246
1247 de_set_rx_mode(dev);
1248
1249 return 0;
1250}
1251
1252static int de_refill_rx (struct de_private *de)
1253{
1254 unsigned i;
1255
1256 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1257 struct sk_buff *skb;
1258
1259 skb = dev_alloc_skb(de->rx_buf_sz);
1260 if (!skb)
1261 goto err_out;
1262
1263 skb->dev = de->dev;
1264
1265 de->rx_skb[i].mapping = pci_map_single(de->pdev,
689be439 1266 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
1267 de->rx_skb[i].skb = skb;
1268
1269 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1270 if (i == (DE_RX_RING_SIZE - 1))
1271 de->rx_ring[i].opts2 =
1272 cpu_to_le32(RingEnd | de->rx_buf_sz);
1273 else
1274 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1275 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1276 de->rx_ring[i].addr2 = 0;
1277 }
1278
1279 return 0;
1280
1281err_out:
1282 de_clean_rings(de);
1283 return -ENOMEM;
1284}
1285
1286static int de_init_rings (struct de_private *de)
1287{
1288 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1289 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1290
1291 de->rx_tail = 0;
1292 de->tx_head = de->tx_tail = 0;
1293
1294 return de_refill_rx (de);
1295}
1296
1297static int de_alloc_rings (struct de_private *de)
1298{
1299 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1300 if (!de->rx_ring)
1301 return -ENOMEM;
1302 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1303 return de_init_rings(de);
1304}
1305
1306static void de_clean_rings (struct de_private *de)
1307{
1308 unsigned i;
1309
1310 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1311 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1312 wmb();
1313 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1314 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1315 wmb();
1316
1317 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1318 if (de->rx_skb[i].skb) {
1319 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1320 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1321 dev_kfree_skb(de->rx_skb[i].skb);
1322 }
1323 }
1324
1325 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1326 struct sk_buff *skb = de->tx_skb[i].skb;
1327 if ((skb) && (skb != DE_DUMMY_SKB)) {
1328 if (skb != DE_SETUP_SKB) {
1da177e4
LT
1329 de->net_stats.tx_dropped++;
1330 pci_unmap_single(de->pdev,
1331 de->tx_skb[i].mapping,
1332 skb->len, PCI_DMA_TODEVICE);
5185c7c2 1333 dev_kfree_skb(skb);
1da177e4
LT
1334 } else {
1335 pci_unmap_single(de->pdev,
1336 de->tx_skb[i].mapping,
1337 sizeof(de->setup_frame),
1338 PCI_DMA_TODEVICE);
1339 }
1340 }
1341 }
1342
1343 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1344 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1345}
1346
1347static void de_free_rings (struct de_private *de)
1348{
1349 de_clean_rings(de);
1350 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1351 de->rx_ring = NULL;
1352 de->tx_ring = NULL;
1353}
1354
1355static int de_open (struct net_device *dev)
1356{
1357 struct de_private *de = dev->priv;
1358 int rc;
1da177e4
LT
1359
1360 if (netif_msg_ifup(de))
1361 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1362
1363 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1364
1365 rc = de_alloc_rings(de);
1366 if (rc) {
1367 printk(KERN_ERR "%s: ring allocation failure, err=%d\n",
1368 dev->name, rc);
1369 return rc;
1370 }
1371
3f735b76 1372 dw32(IntrMask, 0);
1da177e4 1373
1fb9df5d 1374 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1da177e4
LT
1375 if (rc) {
1376 printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
1377 dev->name, dev->irq, rc);
3f735b76
FR
1378 goto err_out_free;
1379 }
1380
1381 rc = de_init_hw(de);
1382 if (rc) {
1383 printk(KERN_ERR "%s: h/w init failure, err=%d\n",
1384 dev->name, rc);
1385 goto err_out_free_irq;
1da177e4
LT
1386 }
1387
1388 netif_start_queue(dev);
1389 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1390
1391 return 0;
1392
3f735b76
FR
1393err_out_free_irq:
1394 free_irq(dev->irq, dev);
1da177e4
LT
1395err_out_free:
1396 de_free_rings(de);
1397 return rc;
1398}
1399
1400static int de_close (struct net_device *dev)
1401{
1402 struct de_private *de = dev->priv;
1403 unsigned long flags;
1404
1405 if (netif_msg_ifdown(de))
1406 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1407
1408 del_timer_sync(&de->media_timer);
1409
1410 spin_lock_irqsave(&de->lock, flags);
1411 de_stop_hw(de);
1412 netif_stop_queue(dev);
1413 netif_carrier_off(dev);
1414 spin_unlock_irqrestore(&de->lock, flags);
f3b197ac 1415
1da177e4
LT
1416 free_irq(dev->irq, dev);
1417
1418 de_free_rings(de);
1419 de_adapter_sleep(de);
1420 pci_disable_device(de->pdev);
1421 return 0;
1422}
1423
1424static void de_tx_timeout (struct net_device *dev)
1425{
1426 struct de_private *de = dev->priv;
1427
1428 printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1429 dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1430 de->rx_tail, de->tx_head, de->tx_tail);
1431
1432 del_timer_sync(&de->media_timer);
1433
1434 disable_irq(dev->irq);
1435 spin_lock_irq(&de->lock);
1436
1437 de_stop_hw(de);
1438 netif_stop_queue(dev);
1439 netif_carrier_off(dev);
1440
1441 spin_unlock_irq(&de->lock);
1442 enable_irq(dev->irq);
f3b197ac 1443
1da177e4
LT
1444 /* Update the error counts. */
1445 __de_get_stats(de);
1446
1447 synchronize_irq(dev->irq);
1448 de_clean_rings(de);
1449
39bf4295
FR
1450 de_init_rings(de);
1451
1da177e4 1452 de_init_hw(de);
f3b197ac 1453
1da177e4
LT
1454 netif_wake_queue(dev);
1455}
1456
1457static void __de_get_regs(struct de_private *de, u8 *buf)
1458{
1459 int i;
1460 u32 *rbuf = (u32 *)buf;
f3b197ac 1461
1da177e4
LT
1462 /* read all CSRs */
1463 for (i = 0; i < DE_NUM_REGS; i++)
1464 rbuf[i] = dr32(i * 8);
1465
1466 /* handle self-clearing RxMissed counter, CSR8 */
1467 de_rx_missed(de, rbuf[8]);
1468}
1469
1470static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1471{
1472 ecmd->supported = de->media_supported;
1473 ecmd->transceiver = XCVR_INTERNAL;
1474 ecmd->phy_address = 0;
1475 ecmd->advertising = de->media_advertise;
f3b197ac 1476
1da177e4
LT
1477 switch (de->media_type) {
1478 case DE_MEDIA_AUI:
1479 ecmd->port = PORT_AUI;
1480 ecmd->speed = 5;
1481 break;
1482 case DE_MEDIA_BNC:
1483 ecmd->port = PORT_BNC;
1484 ecmd->speed = 2;
1485 break;
1486 default:
1487 ecmd->port = PORT_TP;
1488 ecmd->speed = SPEED_10;
1489 break;
1490 }
f3b197ac 1491
1da177e4
LT
1492 if (dr32(MacMode) & FullDuplex)
1493 ecmd->duplex = DUPLEX_FULL;
1494 else
1495 ecmd->duplex = DUPLEX_HALF;
1496
1497 if (de->media_lock)
1498 ecmd->autoneg = AUTONEG_DISABLE;
1499 else
1500 ecmd->autoneg = AUTONEG_ENABLE;
1501
1502 /* ignore maxtxpkt, maxrxpkt for now */
1503
1504 return 0;
1505}
1506
1507static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1508{
1509 u32 new_media;
1510 unsigned int media_lock;
1511
1512 if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
1513 return -EINVAL;
1514 if (de->de21040 && ecmd->speed == 2)
1515 return -EINVAL;
1516 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1517 return -EINVAL;
1518 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1519 return -EINVAL;
1520 if (de->de21040 && ecmd->port == PORT_BNC)
1521 return -EINVAL;
1522 if (ecmd->transceiver != XCVR_INTERNAL)
1523 return -EINVAL;
1524 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1525 return -EINVAL;
1526 if (ecmd->advertising & ~de->media_supported)
1527 return -EINVAL;
1528 if (ecmd->autoneg == AUTONEG_ENABLE &&
1529 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1530 return -EINVAL;
f3b197ac 1531
1da177e4
LT
1532 switch (ecmd->port) {
1533 case PORT_AUI:
1534 new_media = DE_MEDIA_AUI;
1535 if (!(ecmd->advertising & ADVERTISED_AUI))
1536 return -EINVAL;
1537 break;
1538 case PORT_BNC:
1539 new_media = DE_MEDIA_BNC;
1540 if (!(ecmd->advertising & ADVERTISED_BNC))
1541 return -EINVAL;
1542 break;
1543 default:
1544 if (ecmd->autoneg == AUTONEG_ENABLE)
1545 new_media = DE_MEDIA_TP_AUTO;
1546 else if (ecmd->duplex == DUPLEX_FULL)
1547 new_media = DE_MEDIA_TP_FD;
1548 else
1549 new_media = DE_MEDIA_TP;
1550 if (!(ecmd->advertising & ADVERTISED_TP))
1551 return -EINVAL;
1552 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1553 return -EINVAL;
1554 break;
1555 }
f3b197ac 1556
1da177e4 1557 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
f3b197ac 1558
1da177e4
LT
1559 if ((new_media == de->media_type) &&
1560 (media_lock == de->media_lock) &&
1561 (ecmd->advertising == de->media_advertise))
1562 return 0; /* nothing to change */
f3b197ac 1563
1da177e4
LT
1564 de_link_down(de);
1565 de_stop_rxtx(de);
f3b197ac 1566
1da177e4
LT
1567 de->media_type = new_media;
1568 de->media_lock = media_lock;
1569 de->media_advertise = ecmd->advertising;
1570 de_set_media(de);
f3b197ac 1571
1da177e4
LT
1572 return 0;
1573}
1574
1575static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1576{
1577 struct de_private *de = dev->priv;
1578
1579 strcpy (info->driver, DRV_NAME);
1580 strcpy (info->version, DRV_VERSION);
1581 strcpy (info->bus_info, pci_name(de->pdev));
1582 info->eedump_len = DE_EEPROM_SIZE;
1583}
1584
1585static int de_get_regs_len(struct net_device *dev)
1586{
1587 return DE_REGS_SIZE;
1588}
1589
1590static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1591{
1592 struct de_private *de = dev->priv;
1593 int rc;
1594
1595 spin_lock_irq(&de->lock);
1596 rc = __de_get_settings(de, ecmd);
1597 spin_unlock_irq(&de->lock);
1598
1599 return rc;
1600}
1601
1602static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1603{
1604 struct de_private *de = dev->priv;
1605 int rc;
1606
1607 spin_lock_irq(&de->lock);
1608 rc = __de_set_settings(de, ecmd);
1609 spin_unlock_irq(&de->lock);
1610
1611 return rc;
1612}
1613
1614static u32 de_get_msglevel(struct net_device *dev)
1615{
1616 struct de_private *de = dev->priv;
1617
1618 return de->msg_enable;
1619}
1620
1621static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1622{
1623 struct de_private *de = dev->priv;
1624
1625 de->msg_enable = msglvl;
1626}
1627
1628static int de_get_eeprom(struct net_device *dev,
1629 struct ethtool_eeprom *eeprom, u8 *data)
1630{
1631 struct de_private *de = dev->priv;
1632
1633 if (!de->ee_data)
1634 return -EOPNOTSUPP;
1635 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1636 (eeprom->len != DE_EEPROM_SIZE))
1637 return -EINVAL;
1638 memcpy(data, de->ee_data, eeprom->len);
1639
1640 return 0;
1641}
1642
1643static int de_nway_reset(struct net_device *dev)
1644{
1645 struct de_private *de = dev->priv;
1646 u32 status;
1647
1648 if (de->media_type != DE_MEDIA_TP_AUTO)
1649 return -EINVAL;
1650 if (netif_carrier_ok(de->dev))
1651 de_link_down(de);
1652
1653 status = dr32(SIAStatus);
1654 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1655 if (netif_msg_link(de))
1656 printk(KERN_INFO "%s: link nway restart, status %x,%x\n",
1657 de->dev->name, status, dr32(SIAStatus));
1658 return 0;
1659}
1660
1661static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1662 void *data)
1663{
1664 struct de_private *de = dev->priv;
1665
1666 regs->version = (DE_REGS_VER << 2) | de->de21040;
1667
1668 spin_lock_irq(&de->lock);
1669 __de_get_regs(de, data);
1670 spin_unlock_irq(&de->lock);
1671}
1672
7282d491 1673static const struct ethtool_ops de_ethtool_ops = {
1da177e4
LT
1674 .get_link = ethtool_op_get_link,
1675 .get_tx_csum = ethtool_op_get_tx_csum,
1676 .get_sg = ethtool_op_get_sg,
1677 .get_drvinfo = de_get_drvinfo,
1678 .get_regs_len = de_get_regs_len,
1679 .get_settings = de_get_settings,
1680 .set_settings = de_set_settings,
1681 .get_msglevel = de_get_msglevel,
1682 .set_msglevel = de_set_msglevel,
1683 .get_eeprom = de_get_eeprom,
1684 .nway_reset = de_nway_reset,
1685 .get_regs = de_get_regs,
1686};
1687
1688static void __init de21040_get_mac_address (struct de_private *de)
1689{
1690 unsigned i;
1691
1692 dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
1693
1694 for (i = 0; i < 6; i++) {
1695 int value, boguscnt = 100000;
1696 do
1697 value = dr32(ROMCmd);
1698 while (value < 0 && --boguscnt > 0);
1699 de->dev->dev_addr[i] = value;
1700 udelay(1);
1701 if (boguscnt <= 0)
1702 printk(KERN_WARNING PFX "timeout reading 21040 MAC address byte %u\n", i);
1703 }
1704}
1705
1706static void __init de21040_get_media_info(struct de_private *de)
1707{
1708 unsigned int i;
1709
1710 de->media_type = DE_MEDIA_TP;
1711 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1712 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1713 de->media_advertise = de->media_supported;
1714
1715 for (i = 0; i < DE_MAX_MEDIA; i++) {
1716 switch (i) {
1717 case DE_MEDIA_AUI:
1718 case DE_MEDIA_TP:
1719 case DE_MEDIA_TP_FD:
1720 de->media[i].type = i;
1721 de->media[i].csr13 = t21040_csr13[i];
1722 de->media[i].csr14 = t21040_csr14[i];
1723 de->media[i].csr15 = t21040_csr15[i];
1724 break;
1725 default:
1726 de->media[i].type = DE_MEDIA_INVALID;
1727 break;
1728 }
1729 }
1730}
1731
1732/* Note: this routine returns extra data bits for size detection. */
1733static unsigned __init tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1734{
1735 int i;
1736 unsigned retval = 0;
1737 void __iomem *ee_addr = regs + ROMCmd;
1738 int read_cmd = location | (EE_READ_CMD << addr_len);
1739
1740 writel(EE_ENB & ~EE_CS, ee_addr);
1741 writel(EE_ENB, ee_addr);
1742
1743 /* Shift the read command bits out. */
1744 for (i = 4 + addr_len; i >= 0; i--) {
1745 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1746 writel(EE_ENB | dataval, ee_addr);
1747 readl(ee_addr);
1748 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1749 readl(ee_addr);
1750 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1751 }
1752 writel(EE_ENB, ee_addr);
1753 readl(ee_addr);
1754
1755 for (i = 16; i > 0; i--) {
1756 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1757 readl(ee_addr);
1758 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1759 writel(EE_ENB, ee_addr);
1760 readl(ee_addr);
1761 }
1762
1763 /* Terminate the EEPROM access. */
1764 writel(EE_ENB & ~EE_CS, ee_addr);
1765 return retval;
1766}
1767
1768static void __init de21041_get_srom_info (struct de_private *de)
1769{
1770 unsigned i, sa_offset = 0, ofs;
1771 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1772 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1773 struct de_srom_info_leaf *il;
1774 void *bufp;
1775
1776 /* download entire eeprom */
1777 for (i = 0; i < DE_EEPROM_WORDS; i++)
1778 ((u16 *)ee_data)[i] =
1779 le16_to_cpu(tulip_read_eeprom(de->regs, i, ee_addr_size));
1780
1781 /* DEC now has a specification but early board makers
1782 just put the address in the first EEPROM locations. */
1783 /* This does memcmp(eedata, eedata+16, 8) */
bc053d45
RB
1784
1785#ifndef CONFIG_MIPS_COBALT
1786
1da177e4
LT
1787 for (i = 0; i < 8; i ++)
1788 if (ee_data[i] != ee_data[16+i])
1789 sa_offset = 20;
1790
bc053d45
RB
1791#endif
1792
1da177e4
LT
1793 /* store MAC address */
1794 for (i = 0; i < 6; i ++)
1795 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1796
1797 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1798 ofs = ee_data[SROMC0InfoLeaf];
1799 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1800 goto bad_srom;
1801
1802 /* get pointer to info leaf */
1803 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1804
1805 /* paranoia checks */
1806 if (il->n_blocks == 0)
1807 goto bad_srom;
1808 if ((sizeof(ee_data) - ofs) <
1809 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1810 goto bad_srom;
1811
1812 /* get default media type */
1813 switch (DE_UNALIGNED_16(&il->default_media)) {
1814 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1815 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1816 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1817 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1818 }
f3b197ac 1819
1da177e4
LT
1820 if (netif_msg_probe(de))
1821 printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
1822 de->board_idx, ofs,
1823 media_name[de->media_type]);
1824
1825 /* init SIA register values to defaults */
1826 for (i = 0; i < DE_MAX_MEDIA; i++) {
1827 de->media[i].type = DE_MEDIA_INVALID;
1828 de->media[i].csr13 = 0xffff;
1829 de->media[i].csr14 = 0xffff;
1830 de->media[i].csr15 = 0xffff;
1831 }
1832
1833 /* parse media blocks to see what medias are supported,
1834 * and if any custom CSR values are provided
1835 */
1836 bufp = ((void *)il) + sizeof(*il);
1837 for (i = 0; i < il->n_blocks; i++) {
1838 struct de_srom_media_block *ib = bufp;
1839 unsigned idx;
1840
1841 /* index based on media type in media block */
1842 switch(ib->opts & MediaBlockMask) {
1843 case 0: /* 10baseT */
1844 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1845 | SUPPORTED_Autoneg;
1846 idx = DE_MEDIA_TP;
1847 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1848 break;
1849 case 1: /* BNC */
1850 de->media_supported |= SUPPORTED_BNC;
1851 idx = DE_MEDIA_BNC;
1852 break;
1853 case 2: /* AUI */
1854 de->media_supported |= SUPPORTED_AUI;
1855 idx = DE_MEDIA_AUI;
1856 break;
1857 case 4: /* 10baseT-FD */
1858 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1859 | SUPPORTED_Autoneg;
1860 idx = DE_MEDIA_TP_FD;
1861 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1862 break;
1863 default:
1864 goto bad_srom;
1865 }
1866
1867 de->media[idx].type = idx;
1868
1869 if (netif_msg_probe(de))
1870 printk(KERN_INFO "de%d: media block #%u: %s",
1871 de->board_idx, i,
1872 media_name[de->media[idx].type]);
1873
1874 bufp += sizeof (ib->opts);
1875
1876 if (ib->opts & MediaCustomCSRs) {
1877 de->media[idx].csr13 = DE_UNALIGNED_16(&ib->csr13);
1878 de->media[idx].csr14 = DE_UNALIGNED_16(&ib->csr14);
1879 de->media[idx].csr15 = DE_UNALIGNED_16(&ib->csr15);
1880 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1881 sizeof(ib->csr15);
1882
1883 if (netif_msg_probe(de))
1884 printk(" (%x,%x,%x)\n",
1885 de->media[idx].csr13,
1886 de->media[idx].csr14,
1887 de->media[idx].csr15);
f3b197ac 1888
1da177e4
LT
1889 } else if (netif_msg_probe(de))
1890 printk("\n");
1891
1892 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1893 break;
1894 }
1895
1896 de->media_advertise = de->media_supported;
1897
1898fill_defaults:
1899 /* fill in defaults, for cases where custom CSRs not used */
1900 for (i = 0; i < DE_MAX_MEDIA; i++) {
1901 if (de->media[i].csr13 == 0xffff)
1902 de->media[i].csr13 = t21041_csr13[i];
1903 if (de->media[i].csr14 == 0xffff)
1904 de->media[i].csr14 = t21041_csr14[i];
1905 if (de->media[i].csr15 == 0xffff)
1906 de->media[i].csr15 = t21041_csr15[i];
1907 }
1908
1909 de->ee_data = kmalloc(DE_EEPROM_SIZE, GFP_KERNEL);
1910 if (de->ee_data)
1911 memcpy(de->ee_data, &ee_data[0], DE_EEPROM_SIZE);
1912
1913 return;
1914
1915bad_srom:
1916 /* for error cases, it's ok to assume we support all these */
1917 for (i = 0; i < DE_MAX_MEDIA; i++)
1918 de->media[i].type = i;
1919 de->media_supported =
1920 SUPPORTED_10baseT_Half |
1921 SUPPORTED_10baseT_Full |
1922 SUPPORTED_Autoneg |
1923 SUPPORTED_TP |
1924 SUPPORTED_AUI |
1925 SUPPORTED_BNC;
1926 goto fill_defaults;
1927}
1928
1929static int __init de_init_one (struct pci_dev *pdev,
1930 const struct pci_device_id *ent)
1931{
1932 struct net_device *dev;
1933 struct de_private *de;
1934 int rc;
1935 void __iomem *regs;
afc7097f 1936 unsigned long pciaddr;
1da177e4
LT
1937 static int board_idx = -1;
1938
1939 board_idx++;
1940
1941#ifndef MODULE
1942 if (board_idx == 0)
1943 printk("%s", version);
1944#endif
1945
1946 /* allocate a new ethernet device structure, and fill in defaults */
1947 dev = alloc_etherdev(sizeof(struct de_private));
1948 if (!dev)
1949 return -ENOMEM;
1950
1951 SET_MODULE_OWNER(dev);
1952 SET_NETDEV_DEV(dev, &pdev->dev);
1953 dev->open = de_open;
1954 dev->stop = de_close;
1955 dev->set_multicast_list = de_set_rx_mode;
1956 dev->hard_start_xmit = de_start_xmit;
1957 dev->get_stats = de_get_stats;
1958 dev->ethtool_ops = &de_ethtool_ops;
1959 dev->tx_timeout = de_tx_timeout;
1960 dev->watchdog_timeo = TX_TIMEOUT;
1961
1962 de = dev->priv;
1963 de->de21040 = ent->driver_data == 0 ? 1 : 0;
1964 de->pdev = pdev;
1965 de->dev = dev;
1966 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
1967 de->board_idx = board_idx;
1968 spin_lock_init (&de->lock);
1969 init_timer(&de->media_timer);
1970 if (de->de21040)
1971 de->media_timer.function = de21040_media_timer;
1972 else
1973 de->media_timer.function = de21041_media_timer;
1974 de->media_timer.data = (unsigned long) de;
1975
1976 netif_carrier_off(dev);
1977 netif_stop_queue(dev);
1978
1979 /* wake up device, assign resources */
1980 rc = pci_enable_device(pdev);
1981 if (rc)
1982 goto err_out_free;
1983
1984 /* reserve PCI resources to ensure driver atomicity */
1985 rc = pci_request_regions(pdev, DRV_NAME);
1986 if (rc)
1987 goto err_out_disable;
1988
1989 /* check for invalid IRQ value */
1990 if (pdev->irq < 2) {
1991 rc = -EIO;
1992 printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n",
1993 pdev->irq, pci_name(pdev));
1994 goto err_out_res;
1995 }
1996
1997 dev->irq = pdev->irq;
1998
1999 /* obtain and check validity of PCI I/O address */
2000 pciaddr = pci_resource_start(pdev, 1);
2001 if (!pciaddr) {
2002 rc = -EIO;
2003 printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
2004 pci_name(pdev));
2005 goto err_out_res;
2006 }
2007 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2008 rc = -EIO;
7c7459d1
GKH
2009 printk(KERN_ERR PFX "MMIO resource (%llx) too small on pci dev %s\n",
2010 (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev));
1da177e4
LT
2011 goto err_out_res;
2012 }
2013
2014 /* remap CSR registers */
2015 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2016 if (!regs) {
2017 rc = -EIO;
7c7459d1
GKH
2018 printk(KERN_ERR PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2019 (unsigned long long)pci_resource_len(pdev, 1),
2020 pciaddr, pci_name(pdev));
1da177e4
LT
2021 goto err_out_res;
2022 }
2023 dev->base_addr = (unsigned long) regs;
2024 de->regs = regs;
2025
2026 de_adapter_wake(de);
2027
2028 /* make sure hardware is not running */
2029 rc = de_reset_mac(de);
2030 if (rc) {
2031 printk(KERN_ERR PFX "Cannot reset MAC, pci dev %s\n",
2032 pci_name(pdev));
2033 goto err_out_iomap;
2034 }
2035
2036 /* get MAC address, initialize default media type and
2037 * get list of supported media
2038 */
2039 if (de->de21040) {
2040 de21040_get_mac_address(de);
2041 de21040_get_media_info(de);
2042 } else {
2043 de21041_get_srom_info(de);
2044 }
2045
2046 /* register new network interface with kernel */
2047 rc = register_netdev(dev);
2048 if (rc)
2049 goto err_out_iomap;
2050
2051 /* print info about board and interface just registered */
2052 printk (KERN_INFO "%s: %s at 0x%lx, "
2053 "%02x:%02x:%02x:%02x:%02x:%02x, "
2054 "IRQ %d\n",
2055 dev->name,
2056 de->de21040 ? "21040" : "21041",
2057 dev->base_addr,
2058 dev->dev_addr[0], dev->dev_addr[1],
2059 dev->dev_addr[2], dev->dev_addr[3],
2060 dev->dev_addr[4], dev->dev_addr[5],
2061 dev->irq);
2062
2063 pci_set_drvdata(pdev, dev);
2064
2065 /* enable busmastering */
2066 pci_set_master(pdev);
2067
2068 /* put adapter to sleep */
2069 de_adapter_sleep(de);
2070
2071 return 0;
2072
2073err_out_iomap:
b4558ea9 2074 kfree(de->ee_data);
1da177e4
LT
2075 iounmap(regs);
2076err_out_res:
2077 pci_release_regions(pdev);
2078err_out_disable:
2079 pci_disable_device(pdev);
2080err_out_free:
2081 free_netdev(dev);
2082 return rc;
2083}
2084
2085static void __exit de_remove_one (struct pci_dev *pdev)
2086{
2087 struct net_device *dev = pci_get_drvdata(pdev);
2088 struct de_private *de = dev->priv;
2089
7e0b58f3 2090 BUG_ON(!dev);
1da177e4 2091 unregister_netdev(dev);
b4558ea9 2092 kfree(de->ee_data);
1da177e4
LT
2093 iounmap(de->regs);
2094 pci_release_regions(pdev);
2095 pci_disable_device(pdev);
2096 pci_set_drvdata(pdev, NULL);
2097 free_netdev(dev);
2098}
2099
2100#ifdef CONFIG_PM
2101
05adc3b7 2102static int de_suspend (struct pci_dev *pdev, pm_message_t state)
1da177e4
LT
2103{
2104 struct net_device *dev = pci_get_drvdata (pdev);
2105 struct de_private *de = dev->priv;
2106
2107 rtnl_lock();
2108 if (netif_running (dev)) {
2109 del_timer_sync(&de->media_timer);
2110
2111 disable_irq(dev->irq);
2112 spin_lock_irq(&de->lock);
2113
2114 de_stop_hw(de);
2115 netif_stop_queue(dev);
2116 netif_device_detach(dev);
2117 netif_carrier_off(dev);
2118
2119 spin_unlock_irq(&de->lock);
2120 enable_irq(dev->irq);
f3b197ac 2121
1da177e4
LT
2122 /* Update the error counts. */
2123 __de_get_stats(de);
2124
2125 synchronize_irq(dev->irq);
2126 de_clean_rings(de);
2127
2128 de_adapter_sleep(de);
2129 pci_disable_device(pdev);
2130 } else {
2131 netif_device_detach(dev);
2132 }
2133 rtnl_unlock();
2134 return 0;
2135}
2136
2137static int de_resume (struct pci_dev *pdev)
2138{
2139 struct net_device *dev = pci_get_drvdata (pdev);
2140 struct de_private *de = dev->priv;
9f486ae1 2141 int retval = 0;
1da177e4
LT
2142
2143 rtnl_lock();
2144 if (netif_device_present(dev))
2145 goto out;
9f486ae1
VH
2146 if (!netif_running(dev))
2147 goto out_attach;
2148 if ((retval = pci_enable_device(pdev))) {
2149 printk (KERN_ERR "%s: pci_enable_device failed in resume\n",
2150 dev->name);
2151 goto out;
1da177e4 2152 }
9f486ae1
VH
2153 de_init_hw(de);
2154out_attach:
2155 netif_device_attach(dev);
1da177e4
LT
2156out:
2157 rtnl_unlock();
2158 return 0;
2159}
2160
2161#endif /* CONFIG_PM */
2162
2163static struct pci_driver de_driver = {
2164 .name = DRV_NAME,
2165 .id_table = de_pci_tbl,
2166 .probe = de_init_one,
2167 .remove = __exit_p(de_remove_one),
2168#ifdef CONFIG_PM
2169 .suspend = de_suspend,
2170 .resume = de_resume,
2171#endif
2172};
2173
2174static int __init de_init (void)
2175{
2176#ifdef MODULE
2177 printk("%s", version);
2178#endif
29917620 2179 return pci_register_driver(&de_driver);
1da177e4
LT
2180}
2181
2182static void __exit de_exit (void)
2183{
2184 pci_unregister_driver (&de_driver);
2185}
2186
2187module_init(de_init);
2188module_exit(de_exit);