]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tulip/de2104x.c
Merge branch 'ebt_config_compat_v4' of git://git.breakpoint.cc/fw/nf-next-2.6
[net-next-2.6.git] / drivers / net / tulip / de2104x.c
CommitLineData
1da177e4
LT
1/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2/*
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
7
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
14
15 See the file COPYING in this distribution for more information.
16
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
26 Tx descriptor bit
27
28 */
29
30#define DRV_NAME "de2104x"
31#define DRV_VERSION "0.7"
32#define DRV_RELDATE "Mar 17, 2004"
33
1da177e4
LT
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/init.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/ethtool.h>
42#include <linux/compiler.h>
43#include <linux/rtnetlink.h>
44#include <linux/crc32.h>
45
46#include <asm/io.h>
47#include <asm/irq.h>
48#include <asm/uaccess.h>
49#include <asm/unaligned.h>
50
51/* These identify the driver base version and may not be removed. */
52static char version[] =
53KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
54
55MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
56MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
57MODULE_LICENSE("GPL");
58MODULE_VERSION(DRV_VERSION);
59
60static int debug = -1;
61module_param (debug, int, 0);
62MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
63
64/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
8e95a202
JP
65#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
66 defined(CONFIG_SPARC) || defined(__ia64__) || \
67 defined(__sh__) || defined(__mips__)
1da177e4
LT
68static int rx_copybreak = 1518;
69#else
70static int rx_copybreak = 100;
71#endif
72module_param (rx_copybreak, int, 0);
73MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
74
75#define PFX DRV_NAME ": "
76
77#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
78 NETIF_MSG_PROBE | \
79 NETIF_MSG_LINK | \
80 NETIF_MSG_IFDOWN | \
81 NETIF_MSG_IFUP | \
82 NETIF_MSG_RX_ERR | \
83 NETIF_MSG_TX_ERR)
84
b77e5228
RS
85/* Descriptor skip length in 32 bit longwords. */
86#ifndef CONFIG_DE2104X_DSL
87#define DSL 0
88#else
89#define DSL CONFIG_DE2104X_DSL
90#endif
91
1da177e4
LT
92#define DE_RX_RING_SIZE 64
93#define DE_TX_RING_SIZE 64
94#define DE_RING_BYTES \
95 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
96 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
97#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
98#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
99#define TX_BUFFS_AVAIL(CP) \
100 (((CP)->tx_tail <= (CP)->tx_head) ? \
101 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
102 (CP)->tx_tail - (CP)->tx_head - 1)
103
104#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
105#define RX_OFFSET 2
106
107#define DE_SETUP_SKB ((struct sk_buff *) 1)
108#define DE_DUMMY_SKB ((struct sk_buff *) 2)
109#define DE_SETUP_FRAME_WORDS 96
110#define DE_EEPROM_WORDS 256
111#define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
112#define DE_MAX_MEDIA 5
113
114#define DE_MEDIA_TP_AUTO 0
115#define DE_MEDIA_BNC 1
116#define DE_MEDIA_AUI 2
117#define DE_MEDIA_TP 3
118#define DE_MEDIA_TP_FD 4
119#define DE_MEDIA_INVALID DE_MAX_MEDIA
120#define DE_MEDIA_FIRST 0
121#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
122#define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
123
124#define DE_TIMER_LINK (60 * HZ)
125#define DE_TIMER_NO_LINK (5 * HZ)
126
127#define DE_NUM_REGS 16
128#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
129#define DE_REGS_VER 1
130
131/* Time in jiffies before concluding the transmitter is hung. */
132#define TX_TIMEOUT (6*HZ)
133
1da177e4
LT
134/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
135 to support a pre-NWay full-duplex signaling mechanism using short frames.
136 No one knows what it should be, but if left at its default value some
137 10base2(!) packets trigger a full-duplex-request interrupt. */
138#define FULL_DUPLEX_MAGIC 0x6969
139
140enum {
141 /* NIC registers */
142 BusMode = 0x00,
143 TxPoll = 0x08,
144 RxPoll = 0x10,
145 RxRingAddr = 0x18,
146 TxRingAddr = 0x20,
147 MacStatus = 0x28,
148 MacMode = 0x30,
149 IntrMask = 0x38,
150 RxMissed = 0x40,
151 ROMCmd = 0x48,
152 CSR11 = 0x58,
153 SIAStatus = 0x60,
154 CSR13 = 0x68,
155 CSR14 = 0x70,
156 CSR15 = 0x78,
157 PCIPM = 0x40,
158
159 /* BusMode bits */
160 CmdReset = (1 << 0),
161 CacheAlign16 = 0x00008000,
162 BurstLen4 = 0x00000400,
b77e5228 163 DescSkipLen = (DSL << 2),
1da177e4
LT
164
165 /* Rx/TxPoll bits */
166 NormalTxPoll = (1 << 0),
167 NormalRxPoll = (1 << 0),
168
169 /* Tx/Rx descriptor status bits */
170 DescOwn = (1 << 31),
171 RxError = (1 << 15),
172 RxErrLong = (1 << 7),
173 RxErrCRC = (1 << 1),
174 RxErrFIFO = (1 << 0),
175 RxErrRunt = (1 << 11),
176 RxErrFrame = (1 << 14),
177 RingEnd = (1 << 25),
178 FirstFrag = (1 << 29),
179 LastFrag = (1 << 30),
180 TxError = (1 << 15),
181 TxFIFOUnder = (1 << 1),
182 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
183 TxMaxCol = (1 << 8),
184 TxOWC = (1 << 9),
185 TxJabber = (1 << 14),
186 SetupFrame = (1 << 27),
187 TxSwInt = (1 << 31),
188
189 /* MacStatus bits */
190 IntrOK = (1 << 16),
191 IntrErr = (1 << 15),
192 RxIntr = (1 << 6),
193 RxEmpty = (1 << 7),
194 TxIntr = (1 << 0),
195 TxEmpty = (1 << 2),
196 PciErr = (1 << 13),
197 TxState = (1 << 22) | (1 << 21) | (1 << 20),
198 RxState = (1 << 19) | (1 << 18) | (1 << 17),
199 LinkFail = (1 << 12),
200 LinkPass = (1 << 4),
201 RxStopped = (1 << 8),
202 TxStopped = (1 << 1),
203
204 /* MacMode bits */
205 TxEnable = (1 << 13),
206 RxEnable = (1 << 1),
207 RxTx = TxEnable | RxEnable,
208 FullDuplex = (1 << 9),
209 AcceptAllMulticast = (1 << 7),
210 AcceptAllPhys = (1 << 6),
211 BOCnt = (1 << 5),
212 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
213 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
214
215 /* ROMCmd bits */
216 EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
217 EE_CS = 0x01, /* EEPROM chip select. */
218 EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
219 EE_WRITE_0 = 0x01,
220 EE_WRITE_1 = 0x05,
221 EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
222 EE_ENB = (0x4800 | EE_CS),
223
224 /* The EEPROM commands include the alway-set leading bit. */
225 EE_READ_CMD = 6,
226
227 /* RxMissed bits */
228 RxMissedOver = (1 << 16),
229 RxMissedMask = 0xffff,
230
231 /* SROM-related bits */
232 SROMC0InfoLeaf = 27,
233 MediaBlockMask = 0x3f,
234 MediaCustomCSRs = (1 << 6),
f3b197ac 235
1da177e4
LT
236 /* PCIPM bits */
237 PM_Sleep = (1 << 31),
238 PM_Snooze = (1 << 30),
239 PM_Mask = PM_Sleep | PM_Snooze,
f3b197ac 240
1da177e4
LT
241 /* SIAStatus bits */
242 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
243 NWayRestart = (1 << 12),
244 NonselPortActive = (1 << 9),
245 LinkFailStatus = (1 << 2),
246 NetCxnErr = (1 << 1),
247};
248
249static const u32 de_intr_mask =
250 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
251 LinkPass | LinkFail | PciErr;
252
253/*
254 * Set the programmable burst length to 4 longwords for all:
255 * DMA errors result without these values. Cache align 16 long.
256 */
b77e5228 257static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
1da177e4
LT
258
259struct de_srom_media_block {
260 u8 opts;
261 u16 csr13;
262 u16 csr14;
263 u16 csr15;
264} __attribute__((packed));
265
266struct de_srom_info_leaf {
267 u16 default_media;
268 u8 n_blocks;
269 u8 unused;
270} __attribute__((packed));
271
272struct de_desc {
c559a5bc
AV
273 __le32 opts1;
274 __le32 opts2;
275 __le32 addr1;
276 __le32 addr2;
b77e5228
RS
277#if DSL
278 __le32 skip[DSL];
279#endif
1da177e4
LT
280};
281
282struct media_info {
283 u16 type; /* DE_MEDIA_xxx */
284 u16 csr13;
285 u16 csr14;
286 u16 csr15;
287};
288
289struct ring_info {
290 struct sk_buff *skb;
291 dma_addr_t mapping;
292};
293
294struct de_private {
295 unsigned tx_head;
296 unsigned tx_tail;
297 unsigned rx_tail;
298
299 void __iomem *regs;
300 struct net_device *dev;
301 spinlock_t lock;
302
303 struct de_desc *rx_ring;
304 struct de_desc *tx_ring;
305 struct ring_info tx_skb[DE_TX_RING_SIZE];
306 struct ring_info rx_skb[DE_RX_RING_SIZE];
307 unsigned rx_buf_sz;
308 dma_addr_t ring_dma;
309
310 u32 msg_enable;
311
312 struct net_device_stats net_stats;
313
314 struct pci_dev *pdev;
315
316 u16 setup_frame[DE_SETUP_FRAME_WORDS];
317
318 u32 media_type;
319 u32 media_supported;
320 u32 media_advertise;
321 struct media_info media[DE_MAX_MEDIA];
322 struct timer_list media_timer;
323
324 u8 *ee_data;
325 unsigned board_idx;
326 unsigned de21040 : 1;
327 unsigned media_lock : 1;
328};
329
330
331static void de_set_rx_mode (struct net_device *dev);
332static void de_tx (struct de_private *de);
333static void de_clean_rings (struct de_private *de);
334static void de_media_interrupt (struct de_private *de, u32 status);
335static void de21040_media_timer (unsigned long data);
336static void de21041_media_timer (unsigned long data);
337static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
338
339
a3aa1884 340static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
1da177e4
LT
341 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
342 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
343 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
344 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
345 { },
346};
347MODULE_DEVICE_TABLE(pci, de_pci_tbl);
348
349static const char * const media_name[DE_MAX_MEDIA] = {
350 "10baseT auto",
351 "BNC",
352 "AUI",
353 "10baseT-HD",
354 "10baseT-FD"
355};
356
357/* 21040 transceiver register settings:
358 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
359static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
360static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
361static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
362
363/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
364static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
365static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
366static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
367
368
369#define dr32(reg) readl(de->regs + (reg))
370#define dw32(reg,val) writel((val), de->regs + (reg))
371
372
373static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
374 u32 status, u32 len)
375{
376 if (netif_msg_rx_err (de))
377 printk (KERN_DEBUG
378 "%s: rx err, slot %d status 0x%x len %d\n",
379 de->dev->name, rx_tail, status, len);
380
381 if ((status & 0x38000300) != 0x0300) {
382 /* Ingore earlier buffers. */
383 if ((status & 0xffff) != 0x7fff) {
384 if (netif_msg_rx_err(de))
f639dc7d
JP
385 dev_warn(&de->dev->dev,
386 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
387 status);
1da177e4
LT
388 de->net_stats.rx_length_errors++;
389 }
390 } else if (status & RxError) {
391 /* There was a fatal error. */
392 de->net_stats.rx_errors++; /* end of a packet.*/
393 if (status & 0x0890) de->net_stats.rx_length_errors++;
394 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
395 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
396 }
397}
398
399static void de_rx (struct de_private *de)
400{
401 unsigned rx_tail = de->rx_tail;
402 unsigned rx_work = DE_RX_RING_SIZE;
403 unsigned drop = 0;
404 int rc;
405
46578a69 406 while (--rx_work) {
1da177e4
LT
407 u32 status, len;
408 dma_addr_t mapping;
409 struct sk_buff *skb, *copy_skb;
410 unsigned copying_skb, buflen;
411
412 skb = de->rx_skb[rx_tail].skb;
7e0b58f3 413 BUG_ON(!skb);
1da177e4
LT
414 rmb();
415 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
416 if (status & DescOwn)
417 break;
418
419 len = ((status >> 16) & 0x7ff) - 4;
420 mapping = de->rx_skb[rx_tail].mapping;
421
422 if (unlikely(drop)) {
423 de->net_stats.rx_dropped++;
424 goto rx_next;
425 }
426
427 if (unlikely((status & 0x38008300) != 0x0300)) {
428 de_rx_err_acct(de, rx_tail, status, len);
429 goto rx_next;
430 }
431
432 copying_skb = (len <= rx_copybreak);
433
434 if (unlikely(netif_msg_rx_status(de)))
435 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
436 de->dev->name, rx_tail, status, len,
437 copying_skb);
438
439 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
440 copy_skb = dev_alloc_skb (buflen);
441 if (unlikely(!copy_skb)) {
442 de->net_stats.rx_dropped++;
443 drop = 1;
444 rx_work = 100;
445 goto rx_next;
446 }
1da177e4
LT
447
448 if (!copying_skb) {
449 pci_unmap_single(de->pdev, mapping,
450 buflen, PCI_DMA_FROMDEVICE);
451 skb_put(skb, len);
452
453 mapping =
454 de->rx_skb[rx_tail].mapping =
689be439 455 pci_map_single(de->pdev, copy_skb->data,
1da177e4
LT
456 buflen, PCI_DMA_FROMDEVICE);
457 de->rx_skb[rx_tail].skb = copy_skb;
458 } else {
459 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
460 skb_reserve(copy_skb, RX_OFFSET);
d626f62b
ACM
461 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
462 len);
1da177e4
LT
463 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
464
465 /* We'll reuse the original ring buffer. */
466 skb = copy_skb;
467 }
468
469 skb->protocol = eth_type_trans (skb, de->dev);
470
471 de->net_stats.rx_packets++;
472 de->net_stats.rx_bytes += skb->len;
1da177e4
LT
473 rc = netif_rx (skb);
474 if (rc == NET_RX_DROP)
475 drop = 1;
476
477rx_next:
1da177e4
LT
478 if (rx_tail == (DE_RX_RING_SIZE - 1))
479 de->rx_ring[rx_tail].opts2 =
480 cpu_to_le32(RingEnd | de->rx_buf_sz);
481 else
482 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
483 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
b991d2bc
RS
484 wmb();
485 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
1da177e4
LT
486 rx_tail = NEXT_RX(rx_tail);
487 }
488
489 if (!rx_work)
f639dc7d 490 dev_warn(&de->dev->dev, "rx work limit reached\n");
1da177e4
LT
491
492 de->rx_tail = rx_tail;
493}
494
7d12e780 495static irqreturn_t de_interrupt (int irq, void *dev_instance)
1da177e4
LT
496{
497 struct net_device *dev = dev_instance;
8f15ea42 498 struct de_private *de = netdev_priv(dev);
1da177e4
LT
499 u32 status;
500
501 status = dr32(MacStatus);
502 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
503 return IRQ_NONE;
504
505 if (netif_msg_intr(de))
506 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
f639dc7d
JP
507 dev->name, status, dr32(MacMode),
508 de->rx_tail, de->tx_head, de->tx_tail);
1da177e4
LT
509
510 dw32(MacStatus, status);
511
512 if (status & (RxIntr | RxEmpty)) {
513 de_rx(de);
514 if (status & RxEmpty)
515 dw32(RxPoll, NormalRxPoll);
516 }
517
518 spin_lock(&de->lock);
519
520 if (status & (TxIntr | TxEmpty))
521 de_tx(de);
522
523 if (status & (LinkPass | LinkFail))
524 de_media_interrupt(de, status);
525
526 spin_unlock(&de->lock);
527
528 if (status & PciErr) {
529 u16 pci_status;
530
531 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
532 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
f639dc7d
JP
533 dev_err(&de->dev->dev,
534 "PCI bus error, status=%08x, PCI status=%04x\n",
535 status, pci_status);
1da177e4
LT
536 }
537
538 return IRQ_HANDLED;
539}
540
541static void de_tx (struct de_private *de)
542{
543 unsigned tx_head = de->tx_head;
544 unsigned tx_tail = de->tx_tail;
545
546 while (tx_tail != tx_head) {
547 struct sk_buff *skb;
548 u32 status;
549
550 rmb();
551 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
552 if (status & DescOwn)
553 break;
554
555 skb = de->tx_skb[tx_tail].skb;
7e0b58f3 556 BUG_ON(!skb);
1da177e4
LT
557 if (unlikely(skb == DE_DUMMY_SKB))
558 goto next;
559
560 if (unlikely(skb == DE_SETUP_SKB)) {
561 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
562 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
563 goto next;
564 }
565
566 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
567 skb->len, PCI_DMA_TODEVICE);
568
569 if (status & LastFrag) {
570 if (status & TxError) {
571 if (netif_msg_tx_err(de))
572 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
573 de->dev->name, status);
574 de->net_stats.tx_errors++;
575 if (status & TxOWC)
576 de->net_stats.tx_window_errors++;
577 if (status & TxMaxCol)
578 de->net_stats.tx_aborted_errors++;
579 if (status & TxLinkFail)
580 de->net_stats.tx_carrier_errors++;
581 if (status & TxFIFOUnder)
582 de->net_stats.tx_fifo_errors++;
583 } else {
584 de->net_stats.tx_packets++;
585 de->net_stats.tx_bytes += skb->len;
586 if (netif_msg_tx_done(de))
f639dc7d
JP
587 printk(KERN_DEBUG "%s: tx done, slot %d\n",
588 de->dev->name, tx_tail);
1da177e4
LT
589 }
590 dev_kfree_skb_irq(skb);
591 }
592
593next:
594 de->tx_skb[tx_tail].skb = NULL;
595
596 tx_tail = NEXT_TX(tx_tail);
597 }
598
599 de->tx_tail = tx_tail;
600
601 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
602 netif_wake_queue(de->dev);
603}
604
ad096463
SH
605static netdev_tx_t de_start_xmit (struct sk_buff *skb,
606 struct net_device *dev)
1da177e4 607{
8f15ea42 608 struct de_private *de = netdev_priv(dev);
1da177e4
LT
609 unsigned int entry, tx_free;
610 u32 mapping, len, flags = FirstFrag | LastFrag;
611 struct de_desc *txd;
612
613 spin_lock_irq(&de->lock);
614
615 tx_free = TX_BUFFS_AVAIL(de);
616 if (tx_free == 0) {
617 netif_stop_queue(dev);
618 spin_unlock_irq(&de->lock);
5b548140 619 return NETDEV_TX_BUSY;
1da177e4
LT
620 }
621 tx_free--;
622
623 entry = de->tx_head;
624
625 txd = &de->tx_ring[entry];
626
627 len = skb->len;
628 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
629 if (entry == (DE_TX_RING_SIZE - 1))
630 flags |= RingEnd;
631 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
632 flags |= TxSwInt;
633 flags |= len;
634 txd->opts2 = cpu_to_le32(flags);
635 txd->addr1 = cpu_to_le32(mapping);
636
637 de->tx_skb[entry].skb = skb;
638 de->tx_skb[entry].mapping = mapping;
639 wmb();
640
641 txd->opts1 = cpu_to_le32(DescOwn);
642 wmb();
643
644 de->tx_head = NEXT_TX(entry);
645 if (netif_msg_tx_queued(de))
646 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
647 dev->name, entry, skb->len);
648
649 if (tx_free == 0)
650 netif_stop_queue(dev);
651
652 spin_unlock_irq(&de->lock);
653
654 /* Trigger an immediate transmit demand. */
655 dw32(TxPoll, NormalTxPoll);
656 dev->trans_start = jiffies;
657
6ed10654 658 return NETDEV_TX_OK;
1da177e4
LT
659}
660
661/* Set or clear the multicast filter for this adaptor.
662 Note that we only use exclusion around actually queueing the
663 new frame, not around filling de->setup_frame. This is non-deterministic
664 when re-entered but still correct. */
665
666#undef set_bit_le
667#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
668
669static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
670{
8f15ea42 671 struct de_private *de = netdev_priv(dev);
1da177e4
LT
672 u16 hash_table[32];
673 struct dev_mc_list *mclist;
674 int i;
675 u16 *eaddrs;
676
677 memset(hash_table, 0, sizeof(hash_table));
678 set_bit_le(255, hash_table); /* Broadcast entry */
679 /* This should work on big-endian machines as well. */
4cd24eaf 680 for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
1da177e4
LT
681 i++, mclist = mclist->next) {
682 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
683
684 set_bit_le(index, hash_table);
685
686 for (i = 0; i < 32; i++) {
687 *setup_frm++ = hash_table[i];
688 *setup_frm++ = hash_table[i];
689 }
690 setup_frm = &de->setup_frame[13*6];
691 }
692
693 /* Fill the final entry with our physical address. */
694 eaddrs = (u16 *)dev->dev_addr;
695 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
696 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
697 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
698}
699
700static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
701{
8f15ea42 702 struct de_private *de = netdev_priv(dev);
1da177e4
LT
703 struct dev_mc_list *mclist;
704 int i;
705 u16 *eaddrs;
706
707 /* We have <= 14 addresses so we can use the wonderful
708 16 address perfect filtering of the Tulip. */
4cd24eaf 709 for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
1da177e4
LT
710 i++, mclist = mclist->next) {
711 eaddrs = (u16 *)mclist->dmi_addr;
712 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
713 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
714 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
715 }
716 /* Fill the unused entries with the broadcast address. */
717 memset(setup_frm, 0xff, (15-i)*12);
718 setup_frm = &de->setup_frame[15*6];
719
720 /* Fill the final entry with our physical address. */
721 eaddrs = (u16 *)dev->dev_addr;
722 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
723 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
724 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
725}
726
727
728static void __de_set_rx_mode (struct net_device *dev)
729{
8f15ea42 730 struct de_private *de = netdev_priv(dev);
1da177e4
LT
731 u32 macmode;
732 unsigned int entry;
733 u32 mapping;
734 struct de_desc *txd;
735 struct de_desc *dummy_txd = NULL;
736
737 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
738
739 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
740 macmode |= AcceptAllMulticast | AcceptAllPhys;
741 goto out;
742 }
743
4cd24eaf 744 if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
1da177e4
LT
745 /* Too many to filter well -- accept all multicasts. */
746 macmode |= AcceptAllMulticast;
747 goto out;
748 }
749
750 /* Note that only the low-address shortword of setup_frame is valid!
751 The values are doubled for big-endian architectures. */
4cd24eaf 752 if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
1da177e4
LT
753 build_setup_frame_hash (de->setup_frame, dev);
754 else
755 build_setup_frame_perfect (de->setup_frame, dev);
756
757 /*
758 * Now add this frame to the Tx list.
759 */
760
761 entry = de->tx_head;
762
763 /* Avoid a chip errata by prefixing a dummy entry. */
764 if (entry != 0) {
765 de->tx_skb[entry].skb = DE_DUMMY_SKB;
766
767 dummy_txd = &de->tx_ring[entry];
768 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
769 cpu_to_le32(RingEnd) : 0;
770 dummy_txd->addr1 = 0;
771
772 /* Must set DescOwned later to avoid race with chip */
773
774 entry = NEXT_TX(entry);
775 }
776
777 de->tx_skb[entry].skb = DE_SETUP_SKB;
778 de->tx_skb[entry].mapping = mapping =
779 pci_map_single (de->pdev, de->setup_frame,
780 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
781
782 /* Put the setup frame on the Tx list. */
783 txd = &de->tx_ring[entry];
784 if (entry == (DE_TX_RING_SIZE - 1))
785 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
786 else
787 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
788 txd->addr1 = cpu_to_le32(mapping);
789 wmb();
790
791 txd->opts1 = cpu_to_le32(DescOwn);
792 wmb();
793
794 if (dummy_txd) {
795 dummy_txd->opts1 = cpu_to_le32(DescOwn);
796 wmb();
797 }
798
799 de->tx_head = NEXT_TX(entry);
800
1da177e4
LT
801 if (TX_BUFFS_AVAIL(de) == 0)
802 netif_stop_queue(dev);
803
804 /* Trigger an immediate transmit demand. */
805 dw32(TxPoll, NormalTxPoll);
806
807out:
808 if (macmode != dr32(MacMode))
809 dw32(MacMode, macmode);
810}
811
812static void de_set_rx_mode (struct net_device *dev)
813{
814 unsigned long flags;
8f15ea42 815 struct de_private *de = netdev_priv(dev);
1da177e4
LT
816
817 spin_lock_irqsave (&de->lock, flags);
818 __de_set_rx_mode(dev);
819 spin_unlock_irqrestore (&de->lock, flags);
820}
821
822static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
823{
824 if (unlikely(rx_missed & RxMissedOver))
825 de->net_stats.rx_missed_errors += RxMissedMask;
826 else
827 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
828}
829
830static void __de_get_stats(struct de_private *de)
831{
832 u32 tmp = dr32(RxMissed); /* self-clearing */
833
834 de_rx_missed(de, tmp);
835}
836
837static struct net_device_stats *de_get_stats(struct net_device *dev)
838{
8f15ea42 839 struct de_private *de = netdev_priv(dev);
1da177e4
LT
840
841 /* The chip only need report frame silently dropped. */
842 spin_lock_irq(&de->lock);
843 if (netif_running(dev) && netif_device_present(dev))
844 __de_get_stats(de);
845 spin_unlock_irq(&de->lock);
846
847 return &de->net_stats;
848}
849
850static inline int de_is_running (struct de_private *de)
851{
852 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
853}
854
855static void de_stop_rxtx (struct de_private *de)
856{
857 u32 macmode;
69cac988 858 unsigned int i = 1300/100;
1da177e4
LT
859
860 macmode = dr32(MacMode);
861 if (macmode & RxTx) {
862 dw32(MacMode, macmode & ~RxTx);
863 dr32(MacMode);
864 }
865
69cac988
GG
866 /* wait until in-flight frame completes.
867 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
868 * Typically expect this loop to end in < 50 us on 100BT.
869 */
870 while (--i) {
1da177e4
LT
871 if (!de_is_running(de))
872 return;
69cac988 873 udelay(100);
1da177e4 874 }
f3b197ac 875
f639dc7d 876 dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
1da177e4
LT
877}
878
879static inline void de_start_rxtx (struct de_private *de)
880{
881 u32 macmode;
882
883 macmode = dr32(MacMode);
884 if ((macmode & RxTx) != RxTx) {
885 dw32(MacMode, macmode | RxTx);
886 dr32(MacMode);
887 }
888}
889
890static void de_stop_hw (struct de_private *de)
891{
892
893 udelay(5);
894 dw32(IntrMask, 0);
895
896 de_stop_rxtx(de);
897
898 dw32(MacStatus, dr32(MacStatus));
899
900 udelay(10);
901
902 de->rx_tail = 0;
903 de->tx_head = de->tx_tail = 0;
904}
905
906static void de_link_up(struct de_private *de)
907{
908 if (!netif_carrier_ok(de->dev)) {
909 netif_carrier_on(de->dev);
910 if (netif_msg_link(de))
f639dc7d
JP
911 dev_info(&de->dev->dev, "link up, media %s\n",
912 media_name[de->media_type]);
1da177e4
LT
913 }
914}
915
916static void de_link_down(struct de_private *de)
917{
918 if (netif_carrier_ok(de->dev)) {
919 netif_carrier_off(de->dev);
920 if (netif_msg_link(de))
f639dc7d 921 dev_info(&de->dev->dev, "link down\n");
1da177e4
LT
922 }
923}
924
925static void de_set_media (struct de_private *de)
926{
927 unsigned media = de->media_type;
928 u32 macmode = dr32(MacMode);
929
f25f0f8d 930 if (de_is_running(de))
f639dc7d
JP
931 dev_warn(&de->dev->dev,
932 "chip is running while changing media!\n");
1da177e4
LT
933
934 if (de->de21040)
935 dw32(CSR11, FULL_DUPLEX_MAGIC);
936 dw32(CSR13, 0); /* Reset phy */
937 dw32(CSR14, de->media[media].csr14);
938 dw32(CSR15, de->media[media].csr15);
939 dw32(CSR13, de->media[media].csr13);
940
941 /* must delay 10ms before writing to other registers,
942 * especially CSR6
943 */
944 mdelay(10);
945
946 if (media == DE_MEDIA_TP_FD)
947 macmode |= FullDuplex;
948 else
949 macmode &= ~FullDuplex;
f3b197ac 950
1da177e4 951 if (netif_msg_link(de)) {
f639dc7d
JP
952 dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
953 dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
954 dr32(MacMode), dr32(SIAStatus),
955 dr32(CSR13), dr32(CSR14), dr32(CSR15));
956
957 dev_info(&de->dev->dev,
958 "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
959 macmode, de->media[media].csr13,
960 de->media[media].csr14, de->media[media].csr15);
1da177e4
LT
961 }
962 if (macmode != dr32(MacMode))
963 dw32(MacMode, macmode);
964}
965
966static void de_next_media (struct de_private *de, u32 *media,
967 unsigned int n_media)
968{
969 unsigned int i;
970
971 for (i = 0; i < n_media; i++) {
972 if (de_ok_to_advertise(de, media[i])) {
973 de->media_type = media[i];
974 return;
975 }
976 }
977}
978
979static void de21040_media_timer (unsigned long data)
980{
981 struct de_private *de = (struct de_private *) data;
982 struct net_device *dev = de->dev;
983 u32 status = dr32(SIAStatus);
984 unsigned int carrier;
985 unsigned long flags;
f3b197ac 986
1da177e4 987 carrier = (status & NetCxnErr) ? 0 : 1;
f3b197ac 988
1da177e4
LT
989 if (carrier) {
990 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
991 goto no_link_yet;
992
993 de->media_timer.expires = jiffies + DE_TIMER_LINK;
994 add_timer(&de->media_timer);
995 if (!netif_carrier_ok(dev))
996 de_link_up(de);
997 else
998 if (netif_msg_timer(de))
f639dc7d
JP
999 dev_info(&dev->dev, "%s link ok, status %x\n",
1000 media_name[de->media_type], status);
1da177e4
LT
1001 return;
1002 }
1003
f3b197ac 1004 de_link_down(de);
1da177e4
LT
1005
1006 if (de->media_lock)
1007 return;
1008
1009 if (de->media_type == DE_MEDIA_AUI) {
1010 u32 next_state = DE_MEDIA_TP;
1011 de_next_media(de, &next_state, 1);
1012 } else {
1013 u32 next_state = DE_MEDIA_AUI;
1014 de_next_media(de, &next_state, 1);
1015 }
1016
1017 spin_lock_irqsave(&de->lock, flags);
1018 de_stop_rxtx(de);
1019 spin_unlock_irqrestore(&de->lock, flags);
1020 de_set_media(de);
1021 de_start_rxtx(de);
1022
1023no_link_yet:
1024 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1025 add_timer(&de->media_timer);
1026
1027 if (netif_msg_timer(de))
f639dc7d
JP
1028 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1029 media_name[de->media_type], status);
1da177e4
LT
1030}
1031
1032static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1033{
1034 switch (new_media) {
1035 case DE_MEDIA_TP_AUTO:
1036 if (!(de->media_advertise & ADVERTISED_Autoneg))
1037 return 0;
1038 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1039 return 0;
1040 break;
1041 case DE_MEDIA_BNC:
1042 if (!(de->media_advertise & ADVERTISED_BNC))
1043 return 0;
1044 break;
1045 case DE_MEDIA_AUI:
1046 if (!(de->media_advertise & ADVERTISED_AUI))
1047 return 0;
1048 break;
1049 case DE_MEDIA_TP:
1050 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1051 return 0;
1052 break;
1053 case DE_MEDIA_TP_FD:
1054 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1055 return 0;
1056 break;
1057 }
f3b197ac 1058
1da177e4
LT
1059 return 1;
1060}
1061
1062static void de21041_media_timer (unsigned long data)
1063{
1064 struct de_private *de = (struct de_private *) data;
1065 struct net_device *dev = de->dev;
1066 u32 status = dr32(SIAStatus);
1067 unsigned int carrier;
1068 unsigned long flags;
f3b197ac 1069
1da177e4 1070 carrier = (status & NetCxnErr) ? 0 : 1;
f3b197ac 1071
1da177e4
LT
1072 if (carrier) {
1073 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1074 de->media_type == DE_MEDIA_TP ||
1075 de->media_type == DE_MEDIA_TP_FD) &&
1076 (status & LinkFailStatus))
1077 goto no_link_yet;
1078
1079 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1080 add_timer(&de->media_timer);
1081 if (!netif_carrier_ok(dev))
1082 de_link_up(de);
1083 else
1084 if (netif_msg_timer(de))
f639dc7d
JP
1085 dev_info(&dev->dev,
1086 "%s link ok, mode %x status %x\n",
1087 media_name[de->media_type],
1088 dr32(MacMode), status);
1da177e4
LT
1089 return;
1090 }
1091
f3b197ac 1092 de_link_down(de);
1da177e4
LT
1093
1094 /* if media type locked, don't switch media */
1095 if (de->media_lock)
1096 goto set_media;
1097
1098 /* if activity detected, use that as hint for new media type */
1099 if (status & NonselPortActive) {
1100 unsigned int have_media = 1;
1101
1102 /* if AUI/BNC selected, then activity is on TP port */
1103 if (de->media_type == DE_MEDIA_AUI ||
1104 de->media_type == DE_MEDIA_BNC) {
1105 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1106 de->media_type = DE_MEDIA_TP_AUTO;
1107 else
1108 have_media = 0;
1109 }
1110
1111 /* TP selected. If there is only TP and BNC, then it's BNC */
1112 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1113 de_ok_to_advertise(de, DE_MEDIA_BNC))
1114 de->media_type = DE_MEDIA_BNC;
1115
1116 /* TP selected. If there is only TP and AUI, then it's AUI */
1117 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1118 de_ok_to_advertise(de, DE_MEDIA_AUI))
1119 de->media_type = DE_MEDIA_AUI;
1120
1121 /* otherwise, ignore the hint */
1122 else
1123 have_media = 0;
1124
1125 if (have_media)
1126 goto set_media;
1127 }
1128
1129 /*
1130 * Absent or ambiguous activity hint, move to next advertised
1131 * media state. If de->media_type is left unchanged, this
1132 * simply resets the PHY and reloads the current media settings.
1133 */
1134 if (de->media_type == DE_MEDIA_AUI) {
1135 u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1136 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1137 } else if (de->media_type == DE_MEDIA_BNC) {
1138 u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
1139 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1140 } else {
1141 u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1142 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1143 }
f3b197ac 1144
1da177e4
LT
1145set_media:
1146 spin_lock_irqsave(&de->lock, flags);
1147 de_stop_rxtx(de);
1148 spin_unlock_irqrestore(&de->lock, flags);
1149 de_set_media(de);
1150 de_start_rxtx(de);
1151
1152no_link_yet:
1153 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1154 add_timer(&de->media_timer);
1155
1156 if (netif_msg_timer(de))
f639dc7d
JP
1157 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1158 media_name[de->media_type], status);
1da177e4
LT
1159}
1160
1161static void de_media_interrupt (struct de_private *de, u32 status)
1162{
1163 if (status & LinkPass) {
1164 de_link_up(de);
1165 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1166 return;
1167 }
f3b197ac 1168
7e0b58f3 1169 BUG_ON(!(status & LinkFail));
1da177e4
LT
1170
1171 if (netif_carrier_ok(de->dev)) {
1172 de_link_down(de);
1173 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1174 }
1175}
1176
1177static int de_reset_mac (struct de_private *de)
1178{
1179 u32 status, tmp;
1180
1181 /*
1182 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1183 * in this area.
1184 */
1185
1186 if (dr32(BusMode) == 0xffffffff)
1187 return -EBUSY;
1188
1189 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1190 dw32 (BusMode, CmdReset);
1191 mdelay (1);
1192
1193 dw32 (BusMode, de_bus_mode);
1194 mdelay (1);
1195
1196 for (tmp = 0; tmp < 5; tmp++) {
1197 dr32 (BusMode);
1198 mdelay (1);
1199 }
1200
1201 mdelay (1);
1202
1203 status = dr32(MacStatus);
1204 if (status & (RxState | TxState))
1205 return -EBUSY;
1206 if (status == 0xffffffff)
1207 return -ENODEV;
1208 return 0;
1209}
1210
1211static void de_adapter_wake (struct de_private *de)
1212{
1213 u32 pmctl;
1214
1215 if (de->de21040)
1216 return;
1217
1218 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1219 if (pmctl & PM_Mask) {
1220 pmctl &= ~PM_Mask;
1221 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1222
1223 /* de4x5.c delays, so we do too */
1224 msleep(10);
1225 }
1226}
1227
1228static void de_adapter_sleep (struct de_private *de)
1229{
1230 u32 pmctl;
1231
1232 if (de->de21040)
1233 return;
1234
1235 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1236 pmctl |= PM_Sleep;
1237 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1238}
1239
1240static int de_init_hw (struct de_private *de)
1241{
1242 struct net_device *dev = de->dev;
1243 u32 macmode;
1244 int rc;
1245
1246 de_adapter_wake(de);
f3b197ac 1247
1da177e4
LT
1248 macmode = dr32(MacMode) & ~MacModeClear;
1249
1250 rc = de_reset_mac(de);
1251 if (rc)
1252 return rc;
1253
1254 de_set_media(de); /* reset phy */
1255
1256 dw32(RxRingAddr, de->ring_dma);
1257 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1258
1259 dw32(MacMode, RxTx | macmode);
1260
1261 dr32(RxMissed); /* self-clearing */
1262
1263 dw32(IntrMask, de_intr_mask);
1264
1265 de_set_rx_mode(dev);
1266
1267 return 0;
1268}
1269
1270static int de_refill_rx (struct de_private *de)
1271{
1272 unsigned i;
1273
1274 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1275 struct sk_buff *skb;
1276
1277 skb = dev_alloc_skb(de->rx_buf_sz);
1278 if (!skb)
1279 goto err_out;
1280
1281 skb->dev = de->dev;
1282
1283 de->rx_skb[i].mapping = pci_map_single(de->pdev,
689be439 1284 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
1285 de->rx_skb[i].skb = skb;
1286
1287 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1288 if (i == (DE_RX_RING_SIZE - 1))
1289 de->rx_ring[i].opts2 =
1290 cpu_to_le32(RingEnd | de->rx_buf_sz);
1291 else
1292 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1293 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1294 de->rx_ring[i].addr2 = 0;
1295 }
1296
1297 return 0;
1298
1299err_out:
1300 de_clean_rings(de);
1301 return -ENOMEM;
1302}
1303
1304static int de_init_rings (struct de_private *de)
1305{
1306 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1307 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1308
1309 de->rx_tail = 0;
1310 de->tx_head = de->tx_tail = 0;
1311
1312 return de_refill_rx (de);
1313}
1314
1315static int de_alloc_rings (struct de_private *de)
1316{
1317 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1318 if (!de->rx_ring)
1319 return -ENOMEM;
1320 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1321 return de_init_rings(de);
1322}
1323
1324static void de_clean_rings (struct de_private *de)
1325{
1326 unsigned i;
1327
1328 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1329 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1330 wmb();
1331 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1332 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1333 wmb();
1334
1335 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1336 if (de->rx_skb[i].skb) {
1337 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1338 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1339 dev_kfree_skb(de->rx_skb[i].skb);
1340 }
1341 }
1342
1343 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1344 struct sk_buff *skb = de->tx_skb[i].skb;
1345 if ((skb) && (skb != DE_DUMMY_SKB)) {
1346 if (skb != DE_SETUP_SKB) {
1da177e4
LT
1347 de->net_stats.tx_dropped++;
1348 pci_unmap_single(de->pdev,
1349 de->tx_skb[i].mapping,
1350 skb->len, PCI_DMA_TODEVICE);
5185c7c2 1351 dev_kfree_skb(skb);
1da177e4
LT
1352 } else {
1353 pci_unmap_single(de->pdev,
1354 de->tx_skb[i].mapping,
1355 sizeof(de->setup_frame),
1356 PCI_DMA_TODEVICE);
1357 }
1358 }
1359 }
1360
1361 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1362 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1363}
1364
1365static void de_free_rings (struct de_private *de)
1366{
1367 de_clean_rings(de);
1368 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1369 de->rx_ring = NULL;
1370 de->tx_ring = NULL;
1371}
1372
1373static int de_open (struct net_device *dev)
1374{
8f15ea42 1375 struct de_private *de = netdev_priv(dev);
1da177e4 1376 int rc;
1da177e4
LT
1377
1378 if (netif_msg_ifup(de))
1379 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1380
1381 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1382
1383 rc = de_alloc_rings(de);
1384 if (rc) {
f639dc7d 1385 dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
1da177e4
LT
1386 return rc;
1387 }
1388
3f735b76 1389 dw32(IntrMask, 0);
1da177e4 1390
1fb9df5d 1391 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1da177e4 1392 if (rc) {
f639dc7d
JP
1393 dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
1394 dev->irq, rc);
3f735b76
FR
1395 goto err_out_free;
1396 }
1397
1398 rc = de_init_hw(de);
1399 if (rc) {
f639dc7d 1400 dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
3f735b76 1401 goto err_out_free_irq;
1da177e4
LT
1402 }
1403
1404 netif_start_queue(dev);
1405 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1406
1407 return 0;
1408
3f735b76
FR
1409err_out_free_irq:
1410 free_irq(dev->irq, dev);
1da177e4
LT
1411err_out_free:
1412 de_free_rings(de);
1413 return rc;
1414}
1415
1416static int de_close (struct net_device *dev)
1417{
8f15ea42 1418 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1419 unsigned long flags;
1420
1421 if (netif_msg_ifdown(de))
1422 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1423
1424 del_timer_sync(&de->media_timer);
1425
1426 spin_lock_irqsave(&de->lock, flags);
1427 de_stop_hw(de);
1428 netif_stop_queue(dev);
1429 netif_carrier_off(dev);
1430 spin_unlock_irqrestore(&de->lock, flags);
f3b197ac 1431
1da177e4
LT
1432 free_irq(dev->irq, dev);
1433
1434 de_free_rings(de);
1435 de_adapter_sleep(de);
1da177e4
LT
1436 return 0;
1437}
1438
1439static void de_tx_timeout (struct net_device *dev)
1440{
8f15ea42 1441 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1442
1443 printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1444 dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1445 de->rx_tail, de->tx_head, de->tx_tail);
1446
1447 del_timer_sync(&de->media_timer);
1448
1449 disable_irq(dev->irq);
1450 spin_lock_irq(&de->lock);
1451
1452 de_stop_hw(de);
1453 netif_stop_queue(dev);
1454 netif_carrier_off(dev);
1455
1456 spin_unlock_irq(&de->lock);
1457 enable_irq(dev->irq);
f3b197ac 1458
1da177e4
LT
1459 /* Update the error counts. */
1460 __de_get_stats(de);
1461
1462 synchronize_irq(dev->irq);
1463 de_clean_rings(de);
1464
39bf4295
FR
1465 de_init_rings(de);
1466
1da177e4 1467 de_init_hw(de);
f3b197ac 1468
1da177e4
LT
1469 netif_wake_queue(dev);
1470}
1471
1472static void __de_get_regs(struct de_private *de, u8 *buf)
1473{
1474 int i;
1475 u32 *rbuf = (u32 *)buf;
f3b197ac 1476
1da177e4
LT
1477 /* read all CSRs */
1478 for (i = 0; i < DE_NUM_REGS; i++)
1479 rbuf[i] = dr32(i * 8);
1480
1481 /* handle self-clearing RxMissed counter, CSR8 */
1482 de_rx_missed(de, rbuf[8]);
1483}
1484
1485static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1486{
1487 ecmd->supported = de->media_supported;
1488 ecmd->transceiver = XCVR_INTERNAL;
1489 ecmd->phy_address = 0;
1490 ecmd->advertising = de->media_advertise;
f3b197ac 1491
1da177e4
LT
1492 switch (de->media_type) {
1493 case DE_MEDIA_AUI:
1494 ecmd->port = PORT_AUI;
1495 ecmd->speed = 5;
1496 break;
1497 case DE_MEDIA_BNC:
1498 ecmd->port = PORT_BNC;
1499 ecmd->speed = 2;
1500 break;
1501 default:
1502 ecmd->port = PORT_TP;
1503 ecmd->speed = SPEED_10;
1504 break;
1505 }
f3b197ac 1506
1da177e4
LT
1507 if (dr32(MacMode) & FullDuplex)
1508 ecmd->duplex = DUPLEX_FULL;
1509 else
1510 ecmd->duplex = DUPLEX_HALF;
1511
1512 if (de->media_lock)
1513 ecmd->autoneg = AUTONEG_DISABLE;
1514 else
1515 ecmd->autoneg = AUTONEG_ENABLE;
1516
1517 /* ignore maxtxpkt, maxrxpkt for now */
1518
1519 return 0;
1520}
1521
1522static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1523{
1524 u32 new_media;
1525 unsigned int media_lock;
1526
1527 if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
1528 return -EINVAL;
1529 if (de->de21040 && ecmd->speed == 2)
1530 return -EINVAL;
1531 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1532 return -EINVAL;
1533 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1534 return -EINVAL;
1535 if (de->de21040 && ecmd->port == PORT_BNC)
1536 return -EINVAL;
1537 if (ecmd->transceiver != XCVR_INTERNAL)
1538 return -EINVAL;
1539 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1540 return -EINVAL;
1541 if (ecmd->advertising & ~de->media_supported)
1542 return -EINVAL;
1543 if (ecmd->autoneg == AUTONEG_ENABLE &&
1544 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1545 return -EINVAL;
f3b197ac 1546
1da177e4
LT
1547 switch (ecmd->port) {
1548 case PORT_AUI:
1549 new_media = DE_MEDIA_AUI;
1550 if (!(ecmd->advertising & ADVERTISED_AUI))
1551 return -EINVAL;
1552 break;
1553 case PORT_BNC:
1554 new_media = DE_MEDIA_BNC;
1555 if (!(ecmd->advertising & ADVERTISED_BNC))
1556 return -EINVAL;
1557 break;
1558 default:
1559 if (ecmd->autoneg == AUTONEG_ENABLE)
1560 new_media = DE_MEDIA_TP_AUTO;
1561 else if (ecmd->duplex == DUPLEX_FULL)
1562 new_media = DE_MEDIA_TP_FD;
1563 else
1564 new_media = DE_MEDIA_TP;
1565 if (!(ecmd->advertising & ADVERTISED_TP))
1566 return -EINVAL;
1567 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1568 return -EINVAL;
1569 break;
1570 }
f3b197ac 1571
1da177e4 1572 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
f3b197ac 1573
1da177e4
LT
1574 if ((new_media == de->media_type) &&
1575 (media_lock == de->media_lock) &&
1576 (ecmd->advertising == de->media_advertise))
1577 return 0; /* nothing to change */
f3b197ac 1578
1da177e4
LT
1579 de_link_down(de);
1580 de_stop_rxtx(de);
f3b197ac 1581
1da177e4
LT
1582 de->media_type = new_media;
1583 de->media_lock = media_lock;
1584 de->media_advertise = ecmd->advertising;
1585 de_set_media(de);
f3b197ac 1586
1da177e4
LT
1587 return 0;
1588}
1589
1590static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1591{
8f15ea42 1592 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1593
1594 strcpy (info->driver, DRV_NAME);
1595 strcpy (info->version, DRV_VERSION);
1596 strcpy (info->bus_info, pci_name(de->pdev));
1597 info->eedump_len = DE_EEPROM_SIZE;
1598}
1599
1600static int de_get_regs_len(struct net_device *dev)
1601{
1602 return DE_REGS_SIZE;
1603}
1604
1605static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1606{
8f15ea42 1607 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1608 int rc;
1609
1610 spin_lock_irq(&de->lock);
1611 rc = __de_get_settings(de, ecmd);
1612 spin_unlock_irq(&de->lock);
1613
1614 return rc;
1615}
1616
1617static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1618{
8f15ea42 1619 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1620 int rc;
1621
1622 spin_lock_irq(&de->lock);
1623 rc = __de_set_settings(de, ecmd);
1624 spin_unlock_irq(&de->lock);
1625
1626 return rc;
1627}
1628
1629static u32 de_get_msglevel(struct net_device *dev)
1630{
8f15ea42 1631 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1632
1633 return de->msg_enable;
1634}
1635
1636static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1637{
8f15ea42 1638 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1639
1640 de->msg_enable = msglvl;
1641}
1642
1643static int de_get_eeprom(struct net_device *dev,
1644 struct ethtool_eeprom *eeprom, u8 *data)
1645{
8f15ea42 1646 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1647
1648 if (!de->ee_data)
1649 return -EOPNOTSUPP;
1650 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1651 (eeprom->len != DE_EEPROM_SIZE))
1652 return -EINVAL;
1653 memcpy(data, de->ee_data, eeprom->len);
1654
1655 return 0;
1656}
1657
1658static int de_nway_reset(struct net_device *dev)
1659{
8f15ea42 1660 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1661 u32 status;
1662
1663 if (de->media_type != DE_MEDIA_TP_AUTO)
1664 return -EINVAL;
1665 if (netif_carrier_ok(de->dev))
1666 de_link_down(de);
1667
1668 status = dr32(SIAStatus);
1669 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1670 if (netif_msg_link(de))
f639dc7d
JP
1671 dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
1672 status, dr32(SIAStatus));
1da177e4
LT
1673 return 0;
1674}
1675
1676static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1677 void *data)
1678{
8f15ea42 1679 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1680
1681 regs->version = (DE_REGS_VER << 2) | de->de21040;
1682
1683 spin_lock_irq(&de->lock);
1684 __de_get_regs(de, data);
1685 spin_unlock_irq(&de->lock);
1686}
1687
7282d491 1688static const struct ethtool_ops de_ethtool_ops = {
1da177e4 1689 .get_link = ethtool_op_get_link,
1da177e4
LT
1690 .get_drvinfo = de_get_drvinfo,
1691 .get_regs_len = de_get_regs_len,
1692 .get_settings = de_get_settings,
1693 .set_settings = de_set_settings,
1694 .get_msglevel = de_get_msglevel,
1695 .set_msglevel = de_set_msglevel,
1696 .get_eeprom = de_get_eeprom,
1697 .nway_reset = de_nway_reset,
1698 .get_regs = de_get_regs,
1699};
1700
4c44fd00 1701static void __devinit de21040_get_mac_address (struct de_private *de)
1da177e4
LT
1702{
1703 unsigned i;
1704
1705 dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
bc0da3fc 1706 udelay(5);
1da177e4
LT
1707
1708 for (i = 0; i < 6; i++) {
1709 int value, boguscnt = 100000;
ec1d1ebb 1710 do {
1da177e4 1711 value = dr32(ROMCmd);
ec1d1ebb 1712 } while (value < 0 && --boguscnt > 0);
1da177e4
LT
1713 de->dev->dev_addr[i] = value;
1714 udelay(1);
1715 if (boguscnt <= 0)
f639dc7d 1716 pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
1da177e4
LT
1717 }
1718}
1719
4c44fd00 1720static void __devinit de21040_get_media_info(struct de_private *de)
1da177e4
LT
1721{
1722 unsigned int i;
1723
1724 de->media_type = DE_MEDIA_TP;
1725 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1726 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1727 de->media_advertise = de->media_supported;
1728
1729 for (i = 0; i < DE_MAX_MEDIA; i++) {
1730 switch (i) {
1731 case DE_MEDIA_AUI:
1732 case DE_MEDIA_TP:
1733 case DE_MEDIA_TP_FD:
1734 de->media[i].type = i;
1735 de->media[i].csr13 = t21040_csr13[i];
1736 de->media[i].csr14 = t21040_csr14[i];
1737 de->media[i].csr15 = t21040_csr15[i];
1738 break;
1739 default:
1740 de->media[i].type = DE_MEDIA_INVALID;
1741 break;
1742 }
1743 }
1744}
1745
1746/* Note: this routine returns extra data bits for size detection. */
4a1d2d81 1747static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1da177e4
LT
1748{
1749 int i;
1750 unsigned retval = 0;
1751 void __iomem *ee_addr = regs + ROMCmd;
1752 int read_cmd = location | (EE_READ_CMD << addr_len);
1753
1754 writel(EE_ENB & ~EE_CS, ee_addr);
1755 writel(EE_ENB, ee_addr);
1756
1757 /* Shift the read command bits out. */
1758 for (i = 4 + addr_len; i >= 0; i--) {
1759 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1760 writel(EE_ENB | dataval, ee_addr);
1761 readl(ee_addr);
1762 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1763 readl(ee_addr);
1764 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1765 }
1766 writel(EE_ENB, ee_addr);
1767 readl(ee_addr);
1768
1769 for (i = 16; i > 0; i--) {
1770 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1771 readl(ee_addr);
1772 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1773 writel(EE_ENB, ee_addr);
1774 readl(ee_addr);
1775 }
1776
1777 /* Terminate the EEPROM access. */
1778 writel(EE_ENB & ~EE_CS, ee_addr);
1779 return retval;
1780}
1781
4c44fd00 1782static void __devinit de21041_get_srom_info (struct de_private *de)
1da177e4
LT
1783{
1784 unsigned i, sa_offset = 0, ofs;
1785 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1786 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1787 struct de_srom_info_leaf *il;
1788 void *bufp;
1789
1790 /* download entire eeprom */
1791 for (i = 0; i < DE_EEPROM_WORDS; i++)
c559a5bc
AV
1792 ((__le16 *)ee_data)[i] =
1793 cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1da177e4
LT
1794
1795 /* DEC now has a specification but early board makers
1796 just put the address in the first EEPROM locations. */
1797 /* This does memcmp(eedata, eedata+16, 8) */
bc053d45
RB
1798
1799#ifndef CONFIG_MIPS_COBALT
1800
1da177e4
LT
1801 for (i = 0; i < 8; i ++)
1802 if (ee_data[i] != ee_data[16+i])
1803 sa_offset = 20;
1804
bc053d45
RB
1805#endif
1806
1da177e4
LT
1807 /* store MAC address */
1808 for (i = 0; i < 6; i ++)
1809 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1810
1811 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1812 ofs = ee_data[SROMC0InfoLeaf];
1813 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1814 goto bad_srom;
1815
1816 /* get pointer to info leaf */
1817 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1818
1819 /* paranoia checks */
1820 if (il->n_blocks == 0)
1821 goto bad_srom;
1822 if ((sizeof(ee_data) - ofs) <
1823 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1824 goto bad_srom;
1825
1826 /* get default media type */
445854f4 1827 switch (get_unaligned(&il->default_media)) {
1da177e4
LT
1828 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1829 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1830 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1831 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1832 }
f3b197ac 1833
1da177e4 1834 if (netif_msg_probe(de))
f639dc7d
JP
1835 pr_info("de%d: SROM leaf offset %u, default media %s\n",
1836 de->board_idx, ofs, media_name[de->media_type]);
1da177e4
LT
1837
1838 /* init SIA register values to defaults */
1839 for (i = 0; i < DE_MAX_MEDIA; i++) {
1840 de->media[i].type = DE_MEDIA_INVALID;
1841 de->media[i].csr13 = 0xffff;
1842 de->media[i].csr14 = 0xffff;
1843 de->media[i].csr15 = 0xffff;
1844 }
1845
1846 /* parse media blocks to see what medias are supported,
1847 * and if any custom CSR values are provided
1848 */
1849 bufp = ((void *)il) + sizeof(*il);
1850 for (i = 0; i < il->n_blocks; i++) {
1851 struct de_srom_media_block *ib = bufp;
1852 unsigned idx;
1853
1854 /* index based on media type in media block */
1855 switch(ib->opts & MediaBlockMask) {
1856 case 0: /* 10baseT */
1857 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1858 | SUPPORTED_Autoneg;
1859 idx = DE_MEDIA_TP;
1860 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1861 break;
1862 case 1: /* BNC */
1863 de->media_supported |= SUPPORTED_BNC;
1864 idx = DE_MEDIA_BNC;
1865 break;
1866 case 2: /* AUI */
1867 de->media_supported |= SUPPORTED_AUI;
1868 idx = DE_MEDIA_AUI;
1869 break;
1870 case 4: /* 10baseT-FD */
1871 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1872 | SUPPORTED_Autoneg;
1873 idx = DE_MEDIA_TP_FD;
1874 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1875 break;
1876 default:
1877 goto bad_srom;
1878 }
1879
1880 de->media[idx].type = idx;
1881
1882 if (netif_msg_probe(de))
f639dc7d
JP
1883 pr_info("de%d: media block #%u: %s",
1884 de->board_idx, i,
1885 media_name[de->media[idx].type]);
1da177e4
LT
1886
1887 bufp += sizeof (ib->opts);
1888
1889 if (ib->opts & MediaCustomCSRs) {
445854f4
HH
1890 de->media[idx].csr13 = get_unaligned(&ib->csr13);
1891 de->media[idx].csr14 = get_unaligned(&ib->csr14);
1892 de->media[idx].csr15 = get_unaligned(&ib->csr15);
1da177e4
LT
1893 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1894 sizeof(ib->csr15);
1895
1896 if (netif_msg_probe(de))
f639dc7d
JP
1897 pr_cont(" (%x,%x,%x)\n",
1898 de->media[idx].csr13,
1899 de->media[idx].csr14,
1900 de->media[idx].csr15);
f3b197ac 1901
1da177e4 1902 } else if (netif_msg_probe(de))
f639dc7d 1903 pr_cont("\n");
1da177e4
LT
1904
1905 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1906 break;
1907 }
1908
1909 de->media_advertise = de->media_supported;
1910
1911fill_defaults:
1912 /* fill in defaults, for cases where custom CSRs not used */
1913 for (i = 0; i < DE_MAX_MEDIA; i++) {
1914 if (de->media[i].csr13 == 0xffff)
1915 de->media[i].csr13 = t21041_csr13[i];
1916 if (de->media[i].csr14 == 0xffff)
1917 de->media[i].csr14 = t21041_csr14[i];
1918 if (de->media[i].csr15 == 0xffff)
1919 de->media[i].csr15 = t21041_csr15[i];
1920 }
1921
c3a9392e 1922 de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1da177e4
LT
1923
1924 return;
1925
1926bad_srom:
1927 /* for error cases, it's ok to assume we support all these */
1928 for (i = 0; i < DE_MAX_MEDIA; i++)
1929 de->media[i].type = i;
1930 de->media_supported =
1931 SUPPORTED_10baseT_Half |
1932 SUPPORTED_10baseT_Full |
1933 SUPPORTED_Autoneg |
1934 SUPPORTED_TP |
1935 SUPPORTED_AUI |
1936 SUPPORTED_BNC;
1937 goto fill_defaults;
1938}
1939
90d8743d
SH
1940static const struct net_device_ops de_netdev_ops = {
1941 .ndo_open = de_open,
1942 .ndo_stop = de_close,
1943 .ndo_set_multicast_list = de_set_rx_mode,
1944 .ndo_start_xmit = de_start_xmit,
1945 .ndo_get_stats = de_get_stats,
1946 .ndo_tx_timeout = de_tx_timeout,
1947 .ndo_change_mtu = eth_change_mtu,
1948 .ndo_set_mac_address = eth_mac_addr,
1949 .ndo_validate_addr = eth_validate_addr,
1950};
1951
4a1d2d81 1952static int __devinit de_init_one (struct pci_dev *pdev,
1da177e4
LT
1953 const struct pci_device_id *ent)
1954{
1955 struct net_device *dev;
1956 struct de_private *de;
1957 int rc;
1958 void __iomem *regs;
afc7097f 1959 unsigned long pciaddr;
1da177e4
LT
1960 static int board_idx = -1;
1961
1962 board_idx++;
1963
1964#ifndef MODULE
1965 if (board_idx == 0)
1966 printk("%s", version);
1967#endif
1968
1969 /* allocate a new ethernet device structure, and fill in defaults */
1970 dev = alloc_etherdev(sizeof(struct de_private));
1971 if (!dev)
1972 return -ENOMEM;
1973
90d8743d 1974 dev->netdev_ops = &de_netdev_ops;
1da177e4 1975 SET_NETDEV_DEV(dev, &pdev->dev);
1da177e4 1976 dev->ethtool_ops = &de_ethtool_ops;
1da177e4
LT
1977 dev->watchdog_timeo = TX_TIMEOUT;
1978
8f15ea42 1979 de = netdev_priv(dev);
1da177e4
LT
1980 de->de21040 = ent->driver_data == 0 ? 1 : 0;
1981 de->pdev = pdev;
1982 de->dev = dev;
1983 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
1984 de->board_idx = board_idx;
1985 spin_lock_init (&de->lock);
1986 init_timer(&de->media_timer);
1987 if (de->de21040)
1988 de->media_timer.function = de21040_media_timer;
1989 else
1990 de->media_timer.function = de21041_media_timer;
1991 de->media_timer.data = (unsigned long) de;
1992
1993 netif_carrier_off(dev);
1994 netif_stop_queue(dev);
1995
1996 /* wake up device, assign resources */
1997 rc = pci_enable_device(pdev);
1998 if (rc)
1999 goto err_out_free;
2000
2001 /* reserve PCI resources to ensure driver atomicity */
2002 rc = pci_request_regions(pdev, DRV_NAME);
2003 if (rc)
2004 goto err_out_disable;
2005
2006 /* check for invalid IRQ value */
2007 if (pdev->irq < 2) {
2008 rc = -EIO;
f639dc7d 2009 pr_err(PFX "invalid irq (%d) for pci dev %s\n",
1da177e4
LT
2010 pdev->irq, pci_name(pdev));
2011 goto err_out_res;
2012 }
2013
2014 dev->irq = pdev->irq;
2015
2016 /* obtain and check validity of PCI I/O address */
2017 pciaddr = pci_resource_start(pdev, 1);
2018 if (!pciaddr) {
2019 rc = -EIO;
f639dc7d 2020 pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
1da177e4
LT
2021 goto err_out_res;
2022 }
2023 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2024 rc = -EIO;
f639dc7d
JP
2025 pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
2026 (unsigned long long)pci_resource_len(pdev, 1),
2027 pci_name(pdev));
1da177e4
LT
2028 goto err_out_res;
2029 }
2030
2031 /* remap CSR registers */
2032 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2033 if (!regs) {
2034 rc = -EIO;
f639dc7d
JP
2035 pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2036 (unsigned long long)pci_resource_len(pdev, 1),
2037 pciaddr, pci_name(pdev));
1da177e4
LT
2038 goto err_out_res;
2039 }
2040 dev->base_addr = (unsigned long) regs;
2041 de->regs = regs;
2042
2043 de_adapter_wake(de);
2044
2045 /* make sure hardware is not running */
2046 rc = de_reset_mac(de);
2047 if (rc) {
f639dc7d 2048 pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
1da177e4
LT
2049 goto err_out_iomap;
2050 }
2051
2052 /* get MAC address, initialize default media type and
2053 * get list of supported media
2054 */
2055 if (de->de21040) {
2056 de21040_get_mac_address(de);
2057 de21040_get_media_info(de);
2058 } else {
2059 de21041_get_srom_info(de);
2060 }
2061
2062 /* register new network interface with kernel */
2063 rc = register_netdev(dev);
2064 if (rc)
2065 goto err_out_iomap;
2066
2067 /* print info about board and interface just registered */
f639dc7d
JP
2068 dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
2069 de->de21040 ? "21040" : "21041",
2070 dev->base_addr,
2071 dev->dev_addr,
2072 dev->irq);
1da177e4
LT
2073
2074 pci_set_drvdata(pdev, dev);
2075
2076 /* enable busmastering */
2077 pci_set_master(pdev);
2078
2079 /* put adapter to sleep */
2080 de_adapter_sleep(de);
2081
2082 return 0;
2083
2084err_out_iomap:
b4558ea9 2085 kfree(de->ee_data);
1da177e4
LT
2086 iounmap(regs);
2087err_out_res:
2088 pci_release_regions(pdev);
2089err_out_disable:
2090 pci_disable_device(pdev);
2091err_out_free:
2092 free_netdev(dev);
2093 return rc;
2094}
2095
4a1d2d81 2096static void __devexit de_remove_one (struct pci_dev *pdev)
1da177e4
LT
2097{
2098 struct net_device *dev = pci_get_drvdata(pdev);
8f15ea42 2099 struct de_private *de = netdev_priv(dev);
1da177e4 2100
7e0b58f3 2101 BUG_ON(!dev);
1da177e4 2102 unregister_netdev(dev);
b4558ea9 2103 kfree(de->ee_data);
1da177e4
LT
2104 iounmap(de->regs);
2105 pci_release_regions(pdev);
2106 pci_disable_device(pdev);
2107 pci_set_drvdata(pdev, NULL);
2108 free_netdev(dev);
2109}
2110
2111#ifdef CONFIG_PM
2112
05adc3b7 2113static int de_suspend (struct pci_dev *pdev, pm_message_t state)
1da177e4
LT
2114{
2115 struct net_device *dev = pci_get_drvdata (pdev);
8f15ea42 2116 struct de_private *de = netdev_priv(dev);
1da177e4
LT
2117
2118 rtnl_lock();
2119 if (netif_running (dev)) {
2120 del_timer_sync(&de->media_timer);
2121
2122 disable_irq(dev->irq);
2123 spin_lock_irq(&de->lock);
2124
2125 de_stop_hw(de);
2126 netif_stop_queue(dev);
2127 netif_device_detach(dev);
2128 netif_carrier_off(dev);
2129
2130 spin_unlock_irq(&de->lock);
2131 enable_irq(dev->irq);
f3b197ac 2132
1da177e4
LT
2133 /* Update the error counts. */
2134 __de_get_stats(de);
2135
2136 synchronize_irq(dev->irq);
2137 de_clean_rings(de);
2138
2139 de_adapter_sleep(de);
2140 pci_disable_device(pdev);
2141 } else {
2142 netif_device_detach(dev);
2143 }
2144 rtnl_unlock();
2145 return 0;
2146}
2147
2148static int de_resume (struct pci_dev *pdev)
2149{
2150 struct net_device *dev = pci_get_drvdata (pdev);
8f15ea42 2151 struct de_private *de = netdev_priv(dev);
9f486ae1 2152 int retval = 0;
1da177e4
LT
2153
2154 rtnl_lock();
2155 if (netif_device_present(dev))
2156 goto out;
9f486ae1
VH
2157 if (!netif_running(dev))
2158 goto out_attach;
2159 if ((retval = pci_enable_device(pdev))) {
f639dc7d 2160 dev_err(&dev->dev, "pci_enable_device failed in resume\n");
9f486ae1 2161 goto out;
1da177e4 2162 }
9f486ae1
VH
2163 de_init_hw(de);
2164out_attach:
2165 netif_device_attach(dev);
1da177e4
LT
2166out:
2167 rtnl_unlock();
2168 return 0;
2169}
2170
2171#endif /* CONFIG_PM */
2172
2173static struct pci_driver de_driver = {
2174 .name = DRV_NAME,
2175 .id_table = de_pci_tbl,
2176 .probe = de_init_one,
4a1d2d81 2177 .remove = __devexit_p(de_remove_one),
1da177e4
LT
2178#ifdef CONFIG_PM
2179 .suspend = de_suspend,
2180 .resume = de_resume,
2181#endif
2182};
2183
2184static int __init de_init (void)
2185{
2186#ifdef MODULE
2187 printk("%s", version);
2188#endif
29917620 2189 return pci_register_driver(&de_driver);
1da177e4
LT
2190}
2191
2192static void __exit de_exit (void)
2193{
2194 pci_unregister_driver (&de_driver);
2195}
2196
2197module_init(de_init);
2198module_exit(de_exit);