]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tulip/de2104x.c
de2104x: fix power management
[net-next-2.6.git] / drivers / net / tulip / de2104x.c
CommitLineData
1da177e4
LT
1/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2/*
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
7
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
14
15 See the file COPYING in this distribution for more information.
16
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
26 Tx descriptor bit
27
28 */
29
30#define DRV_NAME "de2104x"
31#define DRV_VERSION "0.7"
32#define DRV_RELDATE "Mar 17, 2004"
33
1da177e4
LT
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/init.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/ethtool.h>
42#include <linux/compiler.h>
43#include <linux/rtnetlink.h>
44#include <linux/crc32.h>
5a0e3ad6 45#include <linux/slab.h>
1da177e4
LT
46
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/uaccess.h>
50#include <asm/unaligned.h>
51
52/* These identify the driver base version and may not be removed. */
53static char version[] =
54KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
55
56MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
57MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
58MODULE_LICENSE("GPL");
59MODULE_VERSION(DRV_VERSION);
60
61static int debug = -1;
62module_param (debug, int, 0);
63MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
64
65/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
8e95a202
JP
66#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
67 defined(CONFIG_SPARC) || defined(__ia64__) || \
68 defined(__sh__) || defined(__mips__)
1da177e4
LT
69static int rx_copybreak = 1518;
70#else
71static int rx_copybreak = 100;
72#endif
73module_param (rx_copybreak, int, 0);
74MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
75
76#define PFX DRV_NAME ": "
77
78#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
79 NETIF_MSG_PROBE | \
80 NETIF_MSG_LINK | \
81 NETIF_MSG_IFDOWN | \
82 NETIF_MSG_IFUP | \
83 NETIF_MSG_RX_ERR | \
84 NETIF_MSG_TX_ERR)
85
b77e5228
RS
86/* Descriptor skip length in 32 bit longwords. */
87#ifndef CONFIG_DE2104X_DSL
88#define DSL 0
89#else
90#define DSL CONFIG_DE2104X_DSL
91#endif
92
1da177e4
LT
93#define DE_RX_RING_SIZE 64
94#define DE_TX_RING_SIZE 64
95#define DE_RING_BYTES \
96 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
97 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
98#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
99#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
100#define TX_BUFFS_AVAIL(CP) \
101 (((CP)->tx_tail <= (CP)->tx_head) ? \
102 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
103 (CP)->tx_tail - (CP)->tx_head - 1)
104
105#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
106#define RX_OFFSET 2
107
108#define DE_SETUP_SKB ((struct sk_buff *) 1)
109#define DE_DUMMY_SKB ((struct sk_buff *) 2)
110#define DE_SETUP_FRAME_WORDS 96
111#define DE_EEPROM_WORDS 256
112#define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
113#define DE_MAX_MEDIA 5
114
115#define DE_MEDIA_TP_AUTO 0
116#define DE_MEDIA_BNC 1
117#define DE_MEDIA_AUI 2
118#define DE_MEDIA_TP 3
119#define DE_MEDIA_TP_FD 4
120#define DE_MEDIA_INVALID DE_MAX_MEDIA
121#define DE_MEDIA_FIRST 0
122#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
123#define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
124
125#define DE_TIMER_LINK (60 * HZ)
126#define DE_TIMER_NO_LINK (5 * HZ)
127
128#define DE_NUM_REGS 16
129#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
130#define DE_REGS_VER 1
131
132/* Time in jiffies before concluding the transmitter is hung. */
133#define TX_TIMEOUT (6*HZ)
134
1da177e4
LT
135/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
136 to support a pre-NWay full-duplex signaling mechanism using short frames.
137 No one knows what it should be, but if left at its default value some
138 10base2(!) packets trigger a full-duplex-request interrupt. */
139#define FULL_DUPLEX_MAGIC 0x6969
140
141enum {
142 /* NIC registers */
143 BusMode = 0x00,
144 TxPoll = 0x08,
145 RxPoll = 0x10,
146 RxRingAddr = 0x18,
147 TxRingAddr = 0x20,
148 MacStatus = 0x28,
149 MacMode = 0x30,
150 IntrMask = 0x38,
151 RxMissed = 0x40,
152 ROMCmd = 0x48,
153 CSR11 = 0x58,
154 SIAStatus = 0x60,
155 CSR13 = 0x68,
156 CSR14 = 0x70,
157 CSR15 = 0x78,
158 PCIPM = 0x40,
159
160 /* BusMode bits */
161 CmdReset = (1 << 0),
162 CacheAlign16 = 0x00008000,
163 BurstLen4 = 0x00000400,
b77e5228 164 DescSkipLen = (DSL << 2),
1da177e4
LT
165
166 /* Rx/TxPoll bits */
167 NormalTxPoll = (1 << 0),
168 NormalRxPoll = (1 << 0),
169
170 /* Tx/Rx descriptor status bits */
171 DescOwn = (1 << 31),
172 RxError = (1 << 15),
173 RxErrLong = (1 << 7),
174 RxErrCRC = (1 << 1),
175 RxErrFIFO = (1 << 0),
176 RxErrRunt = (1 << 11),
177 RxErrFrame = (1 << 14),
178 RingEnd = (1 << 25),
179 FirstFrag = (1 << 29),
180 LastFrag = (1 << 30),
181 TxError = (1 << 15),
182 TxFIFOUnder = (1 << 1),
183 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
184 TxMaxCol = (1 << 8),
185 TxOWC = (1 << 9),
186 TxJabber = (1 << 14),
187 SetupFrame = (1 << 27),
188 TxSwInt = (1 << 31),
189
190 /* MacStatus bits */
191 IntrOK = (1 << 16),
192 IntrErr = (1 << 15),
193 RxIntr = (1 << 6),
194 RxEmpty = (1 << 7),
195 TxIntr = (1 << 0),
196 TxEmpty = (1 << 2),
197 PciErr = (1 << 13),
198 TxState = (1 << 22) | (1 << 21) | (1 << 20),
199 RxState = (1 << 19) | (1 << 18) | (1 << 17),
200 LinkFail = (1 << 12),
201 LinkPass = (1 << 4),
202 RxStopped = (1 << 8),
203 TxStopped = (1 << 1),
204
205 /* MacMode bits */
206 TxEnable = (1 << 13),
207 RxEnable = (1 << 1),
208 RxTx = TxEnable | RxEnable,
209 FullDuplex = (1 << 9),
210 AcceptAllMulticast = (1 << 7),
211 AcceptAllPhys = (1 << 6),
212 BOCnt = (1 << 5),
213 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
214 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
215
216 /* ROMCmd bits */
217 EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
218 EE_CS = 0x01, /* EEPROM chip select. */
219 EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
220 EE_WRITE_0 = 0x01,
221 EE_WRITE_1 = 0x05,
222 EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
223 EE_ENB = (0x4800 | EE_CS),
224
225 /* The EEPROM commands include the alway-set leading bit. */
226 EE_READ_CMD = 6,
227
228 /* RxMissed bits */
229 RxMissedOver = (1 << 16),
230 RxMissedMask = 0xffff,
231
232 /* SROM-related bits */
233 SROMC0InfoLeaf = 27,
234 MediaBlockMask = 0x3f,
235 MediaCustomCSRs = (1 << 6),
f3b197ac 236
1da177e4
LT
237 /* PCIPM bits */
238 PM_Sleep = (1 << 31),
239 PM_Snooze = (1 << 30),
240 PM_Mask = PM_Sleep | PM_Snooze,
f3b197ac 241
1da177e4
LT
242 /* SIAStatus bits */
243 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
244 NWayRestart = (1 << 12),
245 NonselPortActive = (1 << 9),
246 LinkFailStatus = (1 << 2),
247 NetCxnErr = (1 << 1),
248};
249
250static const u32 de_intr_mask =
251 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
252 LinkPass | LinkFail | PciErr;
253
254/*
255 * Set the programmable burst length to 4 longwords for all:
256 * DMA errors result without these values. Cache align 16 long.
257 */
b77e5228 258static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
1da177e4
LT
259
260struct de_srom_media_block {
261 u8 opts;
262 u16 csr13;
263 u16 csr14;
264 u16 csr15;
ba2d3587 265} __packed;
1da177e4
LT
266
267struct de_srom_info_leaf {
268 u16 default_media;
269 u8 n_blocks;
270 u8 unused;
ba2d3587 271} __packed;
1da177e4
LT
272
273struct de_desc {
c559a5bc
AV
274 __le32 opts1;
275 __le32 opts2;
276 __le32 addr1;
277 __le32 addr2;
b77e5228
RS
278#if DSL
279 __le32 skip[DSL];
280#endif
1da177e4
LT
281};
282
283struct media_info {
284 u16 type; /* DE_MEDIA_xxx */
285 u16 csr13;
286 u16 csr14;
287 u16 csr15;
288};
289
290struct ring_info {
291 struct sk_buff *skb;
292 dma_addr_t mapping;
293};
294
295struct de_private {
296 unsigned tx_head;
297 unsigned tx_tail;
298 unsigned rx_tail;
299
300 void __iomem *regs;
301 struct net_device *dev;
302 spinlock_t lock;
303
304 struct de_desc *rx_ring;
305 struct de_desc *tx_ring;
306 struct ring_info tx_skb[DE_TX_RING_SIZE];
307 struct ring_info rx_skb[DE_RX_RING_SIZE];
308 unsigned rx_buf_sz;
309 dma_addr_t ring_dma;
310
311 u32 msg_enable;
312
313 struct net_device_stats net_stats;
314
315 struct pci_dev *pdev;
316
317 u16 setup_frame[DE_SETUP_FRAME_WORDS];
318
319 u32 media_type;
320 u32 media_supported;
321 u32 media_advertise;
322 struct media_info media[DE_MAX_MEDIA];
323 struct timer_list media_timer;
324
325 u8 *ee_data;
326 unsigned board_idx;
327 unsigned de21040 : 1;
328 unsigned media_lock : 1;
329};
330
331
332static void de_set_rx_mode (struct net_device *dev);
333static void de_tx (struct de_private *de);
334static void de_clean_rings (struct de_private *de);
335static void de_media_interrupt (struct de_private *de, u32 status);
336static void de21040_media_timer (unsigned long data);
337static void de21041_media_timer (unsigned long data);
338static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
339
340
a3aa1884 341static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
1da177e4
LT
342 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
343 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
344 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
345 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
346 { },
347};
348MODULE_DEVICE_TABLE(pci, de_pci_tbl);
349
350static const char * const media_name[DE_MAX_MEDIA] = {
351 "10baseT auto",
352 "BNC",
353 "AUI",
354 "10baseT-HD",
355 "10baseT-FD"
356};
357
358/* 21040 transceiver register settings:
359 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
360static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
361static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
362static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
363
364/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
365static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
366static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
e0f9c4f3
OZ
367/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
368static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
1da177e4
LT
369static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
370
371
84cc1535
ML
372#define dr32(reg) ioread32(de->regs + (reg))
373#define dw32(reg, val) iowrite32((val), de->regs + (reg))
1da177e4
LT
374
375
376static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
377 u32 status, u32 len)
378{
379 if (netif_msg_rx_err (de))
380 printk (KERN_DEBUG
381 "%s: rx err, slot %d status 0x%x len %d\n",
382 de->dev->name, rx_tail, status, len);
383
384 if ((status & 0x38000300) != 0x0300) {
385 /* Ingore earlier buffers. */
386 if ((status & 0xffff) != 0x7fff) {
387 if (netif_msg_rx_err(de))
f639dc7d
JP
388 dev_warn(&de->dev->dev,
389 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
390 status);
1da177e4
LT
391 de->net_stats.rx_length_errors++;
392 }
393 } else if (status & RxError) {
394 /* There was a fatal error. */
395 de->net_stats.rx_errors++; /* end of a packet.*/
396 if (status & 0x0890) de->net_stats.rx_length_errors++;
397 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
398 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
399 }
400}
401
402static void de_rx (struct de_private *de)
403{
404 unsigned rx_tail = de->rx_tail;
405 unsigned rx_work = DE_RX_RING_SIZE;
406 unsigned drop = 0;
407 int rc;
408
46578a69 409 while (--rx_work) {
1da177e4
LT
410 u32 status, len;
411 dma_addr_t mapping;
412 struct sk_buff *skb, *copy_skb;
413 unsigned copying_skb, buflen;
414
415 skb = de->rx_skb[rx_tail].skb;
7e0b58f3 416 BUG_ON(!skb);
1da177e4
LT
417 rmb();
418 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
419 if (status & DescOwn)
420 break;
421
422 len = ((status >> 16) & 0x7ff) - 4;
423 mapping = de->rx_skb[rx_tail].mapping;
424
425 if (unlikely(drop)) {
426 de->net_stats.rx_dropped++;
427 goto rx_next;
428 }
429
430 if (unlikely((status & 0x38008300) != 0x0300)) {
431 de_rx_err_acct(de, rx_tail, status, len);
432 goto rx_next;
433 }
434
435 copying_skb = (len <= rx_copybreak);
436
437 if (unlikely(netif_msg_rx_status(de)))
438 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
439 de->dev->name, rx_tail, status, len,
440 copying_skb);
441
442 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
443 copy_skb = dev_alloc_skb (buflen);
444 if (unlikely(!copy_skb)) {
445 de->net_stats.rx_dropped++;
446 drop = 1;
447 rx_work = 100;
448 goto rx_next;
449 }
1da177e4
LT
450
451 if (!copying_skb) {
452 pci_unmap_single(de->pdev, mapping,
453 buflen, PCI_DMA_FROMDEVICE);
454 skb_put(skb, len);
455
456 mapping =
457 de->rx_skb[rx_tail].mapping =
689be439 458 pci_map_single(de->pdev, copy_skb->data,
1da177e4
LT
459 buflen, PCI_DMA_FROMDEVICE);
460 de->rx_skb[rx_tail].skb = copy_skb;
461 } else {
462 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
463 skb_reserve(copy_skb, RX_OFFSET);
d626f62b
ACM
464 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
465 len);
1da177e4
LT
466 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
467
468 /* We'll reuse the original ring buffer. */
469 skb = copy_skb;
470 }
471
472 skb->protocol = eth_type_trans (skb, de->dev);
473
474 de->net_stats.rx_packets++;
475 de->net_stats.rx_bytes += skb->len;
1da177e4
LT
476 rc = netif_rx (skb);
477 if (rc == NET_RX_DROP)
478 drop = 1;
479
480rx_next:
1da177e4
LT
481 if (rx_tail == (DE_RX_RING_SIZE - 1))
482 de->rx_ring[rx_tail].opts2 =
483 cpu_to_le32(RingEnd | de->rx_buf_sz);
484 else
485 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
486 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
b991d2bc
RS
487 wmb();
488 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
1da177e4
LT
489 rx_tail = NEXT_RX(rx_tail);
490 }
491
492 if (!rx_work)
f639dc7d 493 dev_warn(&de->dev->dev, "rx work limit reached\n");
1da177e4
LT
494
495 de->rx_tail = rx_tail;
496}
497
7d12e780 498static irqreturn_t de_interrupt (int irq, void *dev_instance)
1da177e4
LT
499{
500 struct net_device *dev = dev_instance;
8f15ea42 501 struct de_private *de = netdev_priv(dev);
1da177e4
LT
502 u32 status;
503
504 status = dr32(MacStatus);
505 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
506 return IRQ_NONE;
507
508 if (netif_msg_intr(de))
509 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
f639dc7d
JP
510 dev->name, status, dr32(MacMode),
511 de->rx_tail, de->tx_head, de->tx_tail);
1da177e4
LT
512
513 dw32(MacStatus, status);
514
515 if (status & (RxIntr | RxEmpty)) {
516 de_rx(de);
517 if (status & RxEmpty)
518 dw32(RxPoll, NormalRxPoll);
519 }
520
521 spin_lock(&de->lock);
522
523 if (status & (TxIntr | TxEmpty))
524 de_tx(de);
525
526 if (status & (LinkPass | LinkFail))
527 de_media_interrupt(de, status);
528
529 spin_unlock(&de->lock);
530
531 if (status & PciErr) {
532 u16 pci_status;
533
534 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
535 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
f639dc7d
JP
536 dev_err(&de->dev->dev,
537 "PCI bus error, status=%08x, PCI status=%04x\n",
538 status, pci_status);
1da177e4
LT
539 }
540
541 return IRQ_HANDLED;
542}
543
544static void de_tx (struct de_private *de)
545{
546 unsigned tx_head = de->tx_head;
547 unsigned tx_tail = de->tx_tail;
548
549 while (tx_tail != tx_head) {
550 struct sk_buff *skb;
551 u32 status;
552
553 rmb();
554 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
555 if (status & DescOwn)
556 break;
557
558 skb = de->tx_skb[tx_tail].skb;
7e0b58f3 559 BUG_ON(!skb);
1da177e4
LT
560 if (unlikely(skb == DE_DUMMY_SKB))
561 goto next;
562
563 if (unlikely(skb == DE_SETUP_SKB)) {
564 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
565 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
566 goto next;
567 }
568
569 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
570 skb->len, PCI_DMA_TODEVICE);
571
572 if (status & LastFrag) {
573 if (status & TxError) {
574 if (netif_msg_tx_err(de))
575 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
576 de->dev->name, status);
577 de->net_stats.tx_errors++;
578 if (status & TxOWC)
579 de->net_stats.tx_window_errors++;
580 if (status & TxMaxCol)
581 de->net_stats.tx_aborted_errors++;
582 if (status & TxLinkFail)
583 de->net_stats.tx_carrier_errors++;
584 if (status & TxFIFOUnder)
585 de->net_stats.tx_fifo_errors++;
586 } else {
587 de->net_stats.tx_packets++;
588 de->net_stats.tx_bytes += skb->len;
589 if (netif_msg_tx_done(de))
f639dc7d
JP
590 printk(KERN_DEBUG "%s: tx done, slot %d\n",
591 de->dev->name, tx_tail);
1da177e4
LT
592 }
593 dev_kfree_skb_irq(skb);
594 }
595
596next:
597 de->tx_skb[tx_tail].skb = NULL;
598
599 tx_tail = NEXT_TX(tx_tail);
600 }
601
602 de->tx_tail = tx_tail;
603
604 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
605 netif_wake_queue(de->dev);
606}
607
ad096463
SH
608static netdev_tx_t de_start_xmit (struct sk_buff *skb,
609 struct net_device *dev)
1da177e4 610{
8f15ea42 611 struct de_private *de = netdev_priv(dev);
1da177e4
LT
612 unsigned int entry, tx_free;
613 u32 mapping, len, flags = FirstFrag | LastFrag;
614 struct de_desc *txd;
615
616 spin_lock_irq(&de->lock);
617
618 tx_free = TX_BUFFS_AVAIL(de);
619 if (tx_free == 0) {
620 netif_stop_queue(dev);
621 spin_unlock_irq(&de->lock);
5b548140 622 return NETDEV_TX_BUSY;
1da177e4
LT
623 }
624 tx_free--;
625
626 entry = de->tx_head;
627
628 txd = &de->tx_ring[entry];
629
630 len = skb->len;
631 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
632 if (entry == (DE_TX_RING_SIZE - 1))
633 flags |= RingEnd;
634 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
635 flags |= TxSwInt;
636 flags |= len;
637 txd->opts2 = cpu_to_le32(flags);
638 txd->addr1 = cpu_to_le32(mapping);
639
640 de->tx_skb[entry].skb = skb;
641 de->tx_skb[entry].mapping = mapping;
642 wmb();
643
644 txd->opts1 = cpu_to_le32(DescOwn);
645 wmb();
646
647 de->tx_head = NEXT_TX(entry);
648 if (netif_msg_tx_queued(de))
649 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
650 dev->name, entry, skb->len);
651
652 if (tx_free == 0)
653 netif_stop_queue(dev);
654
655 spin_unlock_irq(&de->lock);
656
657 /* Trigger an immediate transmit demand. */
658 dw32(TxPoll, NormalTxPoll);
1da177e4 659
6ed10654 660 return NETDEV_TX_OK;
1da177e4
LT
661}
662
663/* Set or clear the multicast filter for this adaptor.
664 Note that we only use exclusion around actually queueing the
665 new frame, not around filling de->setup_frame. This is non-deterministic
666 when re-entered but still correct. */
667
668#undef set_bit_le
669#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
670
671static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
672{
8f15ea42 673 struct de_private *de = netdev_priv(dev);
1da177e4 674 u16 hash_table[32];
22bedad3 675 struct netdev_hw_addr *ha;
1da177e4
LT
676 int i;
677 u16 *eaddrs;
678
679 memset(hash_table, 0, sizeof(hash_table));
680 set_bit_le(255, hash_table); /* Broadcast entry */
681 /* This should work on big-endian machines as well. */
22bedad3
JP
682 netdev_for_each_mc_addr(ha, dev) {
683 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1da177e4
LT
684
685 set_bit_le(index, hash_table);
4302b67e 686 }
1da177e4 687
4302b67e
JP
688 for (i = 0; i < 32; i++) {
689 *setup_frm++ = hash_table[i];
690 *setup_frm++ = hash_table[i];
1da177e4 691 }
4302b67e 692 setup_frm = &de->setup_frame[13*6];
1da177e4
LT
693
694 /* Fill the final entry with our physical address. */
695 eaddrs = (u16 *)dev->dev_addr;
696 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
697 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
698 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
699}
700
701static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
702{
8f15ea42 703 struct de_private *de = netdev_priv(dev);
22bedad3 704 struct netdev_hw_addr *ha;
1da177e4
LT
705 u16 *eaddrs;
706
707 /* We have <= 14 addresses so we can use the wonderful
708 16 address perfect filtering of the Tulip. */
22bedad3
JP
709 netdev_for_each_mc_addr(ha, dev) {
710 eaddrs = (u16 *) ha->addr;
1da177e4
LT
711 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
712 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
713 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
714 }
715 /* Fill the unused entries with the broadcast address. */
4302b67e 716 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1da177e4
LT
717 setup_frm = &de->setup_frame[15*6];
718
719 /* Fill the final entry with our physical address. */
720 eaddrs = (u16 *)dev->dev_addr;
721 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
722 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
723 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
724}
725
726
727static void __de_set_rx_mode (struct net_device *dev)
728{
8f15ea42 729 struct de_private *de = netdev_priv(dev);
1da177e4
LT
730 u32 macmode;
731 unsigned int entry;
732 u32 mapping;
733 struct de_desc *txd;
734 struct de_desc *dummy_txd = NULL;
735
736 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
737
738 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
739 macmode |= AcceptAllMulticast | AcceptAllPhys;
740 goto out;
741 }
742
4cd24eaf 743 if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
1da177e4
LT
744 /* Too many to filter well -- accept all multicasts. */
745 macmode |= AcceptAllMulticast;
746 goto out;
747 }
748
749 /* Note that only the low-address shortword of setup_frame is valid!
750 The values are doubled for big-endian architectures. */
4cd24eaf 751 if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
1da177e4
LT
752 build_setup_frame_hash (de->setup_frame, dev);
753 else
754 build_setup_frame_perfect (de->setup_frame, dev);
755
756 /*
757 * Now add this frame to the Tx list.
758 */
759
760 entry = de->tx_head;
761
762 /* Avoid a chip errata by prefixing a dummy entry. */
763 if (entry != 0) {
764 de->tx_skb[entry].skb = DE_DUMMY_SKB;
765
766 dummy_txd = &de->tx_ring[entry];
767 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
768 cpu_to_le32(RingEnd) : 0;
769 dummy_txd->addr1 = 0;
770
771 /* Must set DescOwned later to avoid race with chip */
772
773 entry = NEXT_TX(entry);
774 }
775
776 de->tx_skb[entry].skb = DE_SETUP_SKB;
777 de->tx_skb[entry].mapping = mapping =
778 pci_map_single (de->pdev, de->setup_frame,
779 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
780
781 /* Put the setup frame on the Tx list. */
782 txd = &de->tx_ring[entry];
783 if (entry == (DE_TX_RING_SIZE - 1))
784 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
785 else
786 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
787 txd->addr1 = cpu_to_le32(mapping);
788 wmb();
789
790 txd->opts1 = cpu_to_le32(DescOwn);
791 wmb();
792
793 if (dummy_txd) {
794 dummy_txd->opts1 = cpu_to_le32(DescOwn);
795 wmb();
796 }
797
798 de->tx_head = NEXT_TX(entry);
799
1da177e4
LT
800 if (TX_BUFFS_AVAIL(de) == 0)
801 netif_stop_queue(dev);
802
803 /* Trigger an immediate transmit demand. */
804 dw32(TxPoll, NormalTxPoll);
805
806out:
807 if (macmode != dr32(MacMode))
808 dw32(MacMode, macmode);
809}
810
811static void de_set_rx_mode (struct net_device *dev)
812{
813 unsigned long flags;
8f15ea42 814 struct de_private *de = netdev_priv(dev);
1da177e4
LT
815
816 spin_lock_irqsave (&de->lock, flags);
817 __de_set_rx_mode(dev);
818 spin_unlock_irqrestore (&de->lock, flags);
819}
820
821static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
822{
823 if (unlikely(rx_missed & RxMissedOver))
824 de->net_stats.rx_missed_errors += RxMissedMask;
825 else
826 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
827}
828
829static void __de_get_stats(struct de_private *de)
830{
831 u32 tmp = dr32(RxMissed); /* self-clearing */
832
833 de_rx_missed(de, tmp);
834}
835
836static struct net_device_stats *de_get_stats(struct net_device *dev)
837{
8f15ea42 838 struct de_private *de = netdev_priv(dev);
1da177e4
LT
839
840 /* The chip only need report frame silently dropped. */
841 spin_lock_irq(&de->lock);
842 if (netif_running(dev) && netif_device_present(dev))
843 __de_get_stats(de);
844 spin_unlock_irq(&de->lock);
845
846 return &de->net_stats;
847}
848
849static inline int de_is_running (struct de_private *de)
850{
851 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
852}
853
854static void de_stop_rxtx (struct de_private *de)
855{
856 u32 macmode;
69cac988 857 unsigned int i = 1300/100;
1da177e4
LT
858
859 macmode = dr32(MacMode);
860 if (macmode & RxTx) {
861 dw32(MacMode, macmode & ~RxTx);
862 dr32(MacMode);
863 }
864
69cac988
GG
865 /* wait until in-flight frame completes.
866 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
867 * Typically expect this loop to end in < 50 us on 100BT.
868 */
869 while (--i) {
1da177e4
LT
870 if (!de_is_running(de))
871 return;
69cac988 872 udelay(100);
1da177e4 873 }
f3b197ac 874
f639dc7d 875 dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
1da177e4
LT
876}
877
878static inline void de_start_rxtx (struct de_private *de)
879{
880 u32 macmode;
881
882 macmode = dr32(MacMode);
883 if ((macmode & RxTx) != RxTx) {
884 dw32(MacMode, macmode | RxTx);
885 dr32(MacMode);
886 }
887}
888
889static void de_stop_hw (struct de_private *de)
890{
891
892 udelay(5);
893 dw32(IntrMask, 0);
894
895 de_stop_rxtx(de);
896
897 dw32(MacStatus, dr32(MacStatus));
898
899 udelay(10);
900
901 de->rx_tail = 0;
902 de->tx_head = de->tx_tail = 0;
903}
904
905static void de_link_up(struct de_private *de)
906{
907 if (!netif_carrier_ok(de->dev)) {
908 netif_carrier_on(de->dev);
909 if (netif_msg_link(de))
f639dc7d
JP
910 dev_info(&de->dev->dev, "link up, media %s\n",
911 media_name[de->media_type]);
1da177e4
LT
912 }
913}
914
915static void de_link_down(struct de_private *de)
916{
917 if (netif_carrier_ok(de->dev)) {
918 netif_carrier_off(de->dev);
919 if (netif_msg_link(de))
f639dc7d 920 dev_info(&de->dev->dev, "link down\n");
1da177e4
LT
921 }
922}
923
924static void de_set_media (struct de_private *de)
925{
926 unsigned media = de->media_type;
927 u32 macmode = dr32(MacMode);
928
f25f0f8d 929 if (de_is_running(de))
f639dc7d
JP
930 dev_warn(&de->dev->dev,
931 "chip is running while changing media!\n");
1da177e4
LT
932
933 if (de->de21040)
934 dw32(CSR11, FULL_DUPLEX_MAGIC);
935 dw32(CSR13, 0); /* Reset phy */
936 dw32(CSR14, de->media[media].csr14);
937 dw32(CSR15, de->media[media].csr15);
938 dw32(CSR13, de->media[media].csr13);
939
940 /* must delay 10ms before writing to other registers,
941 * especially CSR6
942 */
943 mdelay(10);
944
945 if (media == DE_MEDIA_TP_FD)
946 macmode |= FullDuplex;
947 else
948 macmode &= ~FullDuplex;
f3b197ac 949
1da177e4 950 if (netif_msg_link(de)) {
f639dc7d
JP
951 dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
952 dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
953 dr32(MacMode), dr32(SIAStatus),
954 dr32(CSR13), dr32(CSR14), dr32(CSR15));
955
956 dev_info(&de->dev->dev,
957 "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
958 macmode, de->media[media].csr13,
959 de->media[media].csr14, de->media[media].csr15);
1da177e4
LT
960 }
961 if (macmode != dr32(MacMode))
962 dw32(MacMode, macmode);
963}
964
965static void de_next_media (struct de_private *de, u32 *media,
966 unsigned int n_media)
967{
968 unsigned int i;
969
970 for (i = 0; i < n_media; i++) {
971 if (de_ok_to_advertise(de, media[i])) {
972 de->media_type = media[i];
973 return;
974 }
975 }
976}
977
978static void de21040_media_timer (unsigned long data)
979{
980 struct de_private *de = (struct de_private *) data;
981 struct net_device *dev = de->dev;
982 u32 status = dr32(SIAStatus);
983 unsigned int carrier;
984 unsigned long flags;
f3b197ac 985
1da177e4 986 carrier = (status & NetCxnErr) ? 0 : 1;
f3b197ac 987
1da177e4
LT
988 if (carrier) {
989 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
990 goto no_link_yet;
991
992 de->media_timer.expires = jiffies + DE_TIMER_LINK;
993 add_timer(&de->media_timer);
994 if (!netif_carrier_ok(dev))
995 de_link_up(de);
996 else
997 if (netif_msg_timer(de))
f639dc7d
JP
998 dev_info(&dev->dev, "%s link ok, status %x\n",
999 media_name[de->media_type], status);
1da177e4
LT
1000 return;
1001 }
1002
f3b197ac 1003 de_link_down(de);
1da177e4
LT
1004
1005 if (de->media_lock)
1006 return;
1007
1008 if (de->media_type == DE_MEDIA_AUI) {
1009 u32 next_state = DE_MEDIA_TP;
1010 de_next_media(de, &next_state, 1);
1011 } else {
1012 u32 next_state = DE_MEDIA_AUI;
1013 de_next_media(de, &next_state, 1);
1014 }
1015
1016 spin_lock_irqsave(&de->lock, flags);
1017 de_stop_rxtx(de);
1018 spin_unlock_irqrestore(&de->lock, flags);
1019 de_set_media(de);
1020 de_start_rxtx(de);
1021
1022no_link_yet:
1023 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1024 add_timer(&de->media_timer);
1025
1026 if (netif_msg_timer(de))
f639dc7d
JP
1027 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1028 media_name[de->media_type], status);
1da177e4
LT
1029}
1030
1031static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1032{
1033 switch (new_media) {
1034 case DE_MEDIA_TP_AUTO:
1035 if (!(de->media_advertise & ADVERTISED_Autoneg))
1036 return 0;
1037 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1038 return 0;
1039 break;
1040 case DE_MEDIA_BNC:
1041 if (!(de->media_advertise & ADVERTISED_BNC))
1042 return 0;
1043 break;
1044 case DE_MEDIA_AUI:
1045 if (!(de->media_advertise & ADVERTISED_AUI))
1046 return 0;
1047 break;
1048 case DE_MEDIA_TP:
1049 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1050 return 0;
1051 break;
1052 case DE_MEDIA_TP_FD:
1053 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1054 return 0;
1055 break;
1056 }
f3b197ac 1057
1da177e4
LT
1058 return 1;
1059}
1060
1061static void de21041_media_timer (unsigned long data)
1062{
1063 struct de_private *de = (struct de_private *) data;
1064 struct net_device *dev = de->dev;
1065 u32 status = dr32(SIAStatus);
1066 unsigned int carrier;
1067 unsigned long flags;
f3b197ac 1068
1da177e4 1069 carrier = (status & NetCxnErr) ? 0 : 1;
f3b197ac 1070
1da177e4
LT
1071 if (carrier) {
1072 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1073 de->media_type == DE_MEDIA_TP ||
1074 de->media_type == DE_MEDIA_TP_FD) &&
1075 (status & LinkFailStatus))
1076 goto no_link_yet;
1077
1078 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1079 add_timer(&de->media_timer);
1080 if (!netif_carrier_ok(dev))
1081 de_link_up(de);
1082 else
1083 if (netif_msg_timer(de))
f639dc7d
JP
1084 dev_info(&dev->dev,
1085 "%s link ok, mode %x status %x\n",
1086 media_name[de->media_type],
1087 dr32(MacMode), status);
1da177e4
LT
1088 return;
1089 }
1090
f3b197ac 1091 de_link_down(de);
1da177e4
LT
1092
1093 /* if media type locked, don't switch media */
1094 if (de->media_lock)
1095 goto set_media;
1096
1097 /* if activity detected, use that as hint for new media type */
1098 if (status & NonselPortActive) {
1099 unsigned int have_media = 1;
1100
1101 /* if AUI/BNC selected, then activity is on TP port */
1102 if (de->media_type == DE_MEDIA_AUI ||
1103 de->media_type == DE_MEDIA_BNC) {
1104 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1105 de->media_type = DE_MEDIA_TP_AUTO;
1106 else
1107 have_media = 0;
1108 }
1109
1110 /* TP selected. If there is only TP and BNC, then it's BNC */
1111 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1112 de_ok_to_advertise(de, DE_MEDIA_BNC))
1113 de->media_type = DE_MEDIA_BNC;
1114
1115 /* TP selected. If there is only TP and AUI, then it's AUI */
1116 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1117 de_ok_to_advertise(de, DE_MEDIA_AUI))
1118 de->media_type = DE_MEDIA_AUI;
1119
1120 /* otherwise, ignore the hint */
1121 else
1122 have_media = 0;
1123
1124 if (have_media)
1125 goto set_media;
1126 }
1127
1128 /*
1129 * Absent or ambiguous activity hint, move to next advertised
1130 * media state. If de->media_type is left unchanged, this
1131 * simply resets the PHY and reloads the current media settings.
1132 */
1133 if (de->media_type == DE_MEDIA_AUI) {
1134 u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1135 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1136 } else if (de->media_type == DE_MEDIA_BNC) {
1137 u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
1138 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1139 } else {
1140 u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1141 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1142 }
f3b197ac 1143
1da177e4
LT
1144set_media:
1145 spin_lock_irqsave(&de->lock, flags);
1146 de_stop_rxtx(de);
1147 spin_unlock_irqrestore(&de->lock, flags);
1148 de_set_media(de);
1149 de_start_rxtx(de);
1150
1151no_link_yet:
1152 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1153 add_timer(&de->media_timer);
1154
1155 if (netif_msg_timer(de))
f639dc7d
JP
1156 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1157 media_name[de->media_type], status);
1da177e4
LT
1158}
1159
1160static void de_media_interrupt (struct de_private *de, u32 status)
1161{
1162 if (status & LinkPass) {
1163 de_link_up(de);
1164 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1165 return;
1166 }
f3b197ac 1167
7e0b58f3 1168 BUG_ON(!(status & LinkFail));
1da177e4
LT
1169
1170 if (netif_carrier_ok(de->dev)) {
1171 de_link_down(de);
1172 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1173 }
1174}
1175
1176static int de_reset_mac (struct de_private *de)
1177{
1178 u32 status, tmp;
1179
1180 /*
1181 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1182 * in this area.
1183 */
1184
1185 if (dr32(BusMode) == 0xffffffff)
1186 return -EBUSY;
1187
1188 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1189 dw32 (BusMode, CmdReset);
1190 mdelay (1);
1191
1192 dw32 (BusMode, de_bus_mode);
1193 mdelay (1);
1194
1195 for (tmp = 0; tmp < 5; tmp++) {
1196 dr32 (BusMode);
1197 mdelay (1);
1198 }
1199
1200 mdelay (1);
1201
1202 status = dr32(MacStatus);
1203 if (status & (RxState | TxState))
1204 return -EBUSY;
1205 if (status == 0xffffffff)
1206 return -ENODEV;
1207 return 0;
1208}
1209
1210static void de_adapter_wake (struct de_private *de)
1211{
1212 u32 pmctl;
1213
1214 if (de->de21040)
1215 return;
1216
1217 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1218 if (pmctl & PM_Mask) {
1219 pmctl &= ~PM_Mask;
1220 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1221
1222 /* de4x5.c delays, so we do too */
1223 msleep(10);
1224 }
1225}
1226
1227static void de_adapter_sleep (struct de_private *de)
1228{
1229 u32 pmctl;
1230
1231 if (de->de21040)
1232 return;
1233
b0255a02 1234 dw32(CSR13, 0); /* Reset phy */
1da177e4
LT
1235 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1236 pmctl |= PM_Sleep;
1237 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1238}
1239
1240static int de_init_hw (struct de_private *de)
1241{
1242 struct net_device *dev = de->dev;
1243 u32 macmode;
1244 int rc;
1245
1246 de_adapter_wake(de);
f3b197ac 1247
1da177e4
LT
1248 macmode = dr32(MacMode) & ~MacModeClear;
1249
1250 rc = de_reset_mac(de);
1251 if (rc)
1252 return rc;
1253
1254 de_set_media(de); /* reset phy */
1255
1256 dw32(RxRingAddr, de->ring_dma);
1257 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1258
1259 dw32(MacMode, RxTx | macmode);
1260
1261 dr32(RxMissed); /* self-clearing */
1262
1263 dw32(IntrMask, de_intr_mask);
1264
1265 de_set_rx_mode(dev);
1266
1267 return 0;
1268}
1269
1270static int de_refill_rx (struct de_private *de)
1271{
1272 unsigned i;
1273
1274 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1275 struct sk_buff *skb;
1276
1277 skb = dev_alloc_skb(de->rx_buf_sz);
1278 if (!skb)
1279 goto err_out;
1280
1281 skb->dev = de->dev;
1282
1283 de->rx_skb[i].mapping = pci_map_single(de->pdev,
689be439 1284 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
1285 de->rx_skb[i].skb = skb;
1286
1287 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1288 if (i == (DE_RX_RING_SIZE - 1))
1289 de->rx_ring[i].opts2 =
1290 cpu_to_le32(RingEnd | de->rx_buf_sz);
1291 else
1292 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1293 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1294 de->rx_ring[i].addr2 = 0;
1295 }
1296
1297 return 0;
1298
1299err_out:
1300 de_clean_rings(de);
1301 return -ENOMEM;
1302}
1303
1304static int de_init_rings (struct de_private *de)
1305{
1306 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1307 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1308
1309 de->rx_tail = 0;
1310 de->tx_head = de->tx_tail = 0;
1311
1312 return de_refill_rx (de);
1313}
1314
1315static int de_alloc_rings (struct de_private *de)
1316{
1317 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1318 if (!de->rx_ring)
1319 return -ENOMEM;
1320 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1321 return de_init_rings(de);
1322}
1323
1324static void de_clean_rings (struct de_private *de)
1325{
1326 unsigned i;
1327
1328 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1329 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1330 wmb();
1331 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1332 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1333 wmb();
1334
1335 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1336 if (de->rx_skb[i].skb) {
1337 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1338 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1339 dev_kfree_skb(de->rx_skb[i].skb);
1340 }
1341 }
1342
1343 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1344 struct sk_buff *skb = de->tx_skb[i].skb;
1345 if ((skb) && (skb != DE_DUMMY_SKB)) {
1346 if (skb != DE_SETUP_SKB) {
1da177e4
LT
1347 de->net_stats.tx_dropped++;
1348 pci_unmap_single(de->pdev,
1349 de->tx_skb[i].mapping,
1350 skb->len, PCI_DMA_TODEVICE);
5185c7c2 1351 dev_kfree_skb(skb);
1da177e4
LT
1352 } else {
1353 pci_unmap_single(de->pdev,
1354 de->tx_skb[i].mapping,
1355 sizeof(de->setup_frame),
1356 PCI_DMA_TODEVICE);
1357 }
1358 }
1359 }
1360
1361 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1362 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1363}
1364
1365static void de_free_rings (struct de_private *de)
1366{
1367 de_clean_rings(de);
1368 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1369 de->rx_ring = NULL;
1370 de->tx_ring = NULL;
1371}
1372
1373static int de_open (struct net_device *dev)
1374{
8f15ea42 1375 struct de_private *de = netdev_priv(dev);
1da177e4 1376 int rc;
1da177e4
LT
1377
1378 if (netif_msg_ifup(de))
1379 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1380
1381 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1382
1383 rc = de_alloc_rings(de);
1384 if (rc) {
f639dc7d 1385 dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
1da177e4
LT
1386 return rc;
1387 }
1388
3f735b76 1389 dw32(IntrMask, 0);
1da177e4 1390
1fb9df5d 1391 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1da177e4 1392 if (rc) {
f639dc7d
JP
1393 dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
1394 dev->irq, rc);
3f735b76
FR
1395 goto err_out_free;
1396 }
1397
1398 rc = de_init_hw(de);
1399 if (rc) {
f639dc7d 1400 dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
3f735b76 1401 goto err_out_free_irq;
1da177e4
LT
1402 }
1403
1404 netif_start_queue(dev);
1405 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1406
1407 return 0;
1408
3f735b76
FR
1409err_out_free_irq:
1410 free_irq(dev->irq, dev);
1da177e4
LT
1411err_out_free:
1412 de_free_rings(de);
1413 return rc;
1414}
1415
1416static int de_close (struct net_device *dev)
1417{
8f15ea42 1418 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1419 unsigned long flags;
1420
1421 if (netif_msg_ifdown(de))
1422 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1423
1424 del_timer_sync(&de->media_timer);
1425
1426 spin_lock_irqsave(&de->lock, flags);
1427 de_stop_hw(de);
1428 netif_stop_queue(dev);
1429 netif_carrier_off(dev);
1430 spin_unlock_irqrestore(&de->lock, flags);
f3b197ac 1431
1da177e4
LT
1432 free_irq(dev->irq, dev);
1433
1434 de_free_rings(de);
1435 de_adapter_sleep(de);
1da177e4
LT
1436 return 0;
1437}
1438
1439static void de_tx_timeout (struct net_device *dev)
1440{
8f15ea42 1441 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1442
1443 printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1444 dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1445 de->rx_tail, de->tx_head, de->tx_tail);
1446
1447 del_timer_sync(&de->media_timer);
1448
1449 disable_irq(dev->irq);
1450 spin_lock_irq(&de->lock);
1451
1452 de_stop_hw(de);
1453 netif_stop_queue(dev);
1454 netif_carrier_off(dev);
1455
1456 spin_unlock_irq(&de->lock);
1457 enable_irq(dev->irq);
f3b197ac 1458
1da177e4
LT
1459 /* Update the error counts. */
1460 __de_get_stats(de);
1461
1462 synchronize_irq(dev->irq);
1463 de_clean_rings(de);
1464
39bf4295
FR
1465 de_init_rings(de);
1466
1da177e4 1467 de_init_hw(de);
f3b197ac 1468
1da177e4
LT
1469 netif_wake_queue(dev);
1470}
1471
1472static void __de_get_regs(struct de_private *de, u8 *buf)
1473{
1474 int i;
1475 u32 *rbuf = (u32 *)buf;
f3b197ac 1476
1da177e4
LT
1477 /* read all CSRs */
1478 for (i = 0; i < DE_NUM_REGS; i++)
1479 rbuf[i] = dr32(i * 8);
1480
1481 /* handle self-clearing RxMissed counter, CSR8 */
1482 de_rx_missed(de, rbuf[8]);
1483}
1484
1485static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1486{
1487 ecmd->supported = de->media_supported;
1488 ecmd->transceiver = XCVR_INTERNAL;
1489 ecmd->phy_address = 0;
1490 ecmd->advertising = de->media_advertise;
f3b197ac 1491
1da177e4
LT
1492 switch (de->media_type) {
1493 case DE_MEDIA_AUI:
1494 ecmd->port = PORT_AUI;
1495 ecmd->speed = 5;
1496 break;
1497 case DE_MEDIA_BNC:
1498 ecmd->port = PORT_BNC;
1499 ecmd->speed = 2;
1500 break;
1501 default:
1502 ecmd->port = PORT_TP;
1503 ecmd->speed = SPEED_10;
1504 break;
1505 }
f3b197ac 1506
1da177e4
LT
1507 if (dr32(MacMode) & FullDuplex)
1508 ecmd->duplex = DUPLEX_FULL;
1509 else
1510 ecmd->duplex = DUPLEX_HALF;
1511
1512 if (de->media_lock)
1513 ecmd->autoneg = AUTONEG_DISABLE;
1514 else
1515 ecmd->autoneg = AUTONEG_ENABLE;
1516
1517 /* ignore maxtxpkt, maxrxpkt for now */
1518
1519 return 0;
1520}
1521
1522static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1523{
1524 u32 new_media;
1525 unsigned int media_lock;
1526
1527 if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
1528 return -EINVAL;
1529 if (de->de21040 && ecmd->speed == 2)
1530 return -EINVAL;
1531 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1532 return -EINVAL;
1533 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1534 return -EINVAL;
1535 if (de->de21040 && ecmd->port == PORT_BNC)
1536 return -EINVAL;
1537 if (ecmd->transceiver != XCVR_INTERNAL)
1538 return -EINVAL;
1539 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1540 return -EINVAL;
1541 if (ecmd->advertising & ~de->media_supported)
1542 return -EINVAL;
1543 if (ecmd->autoneg == AUTONEG_ENABLE &&
1544 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1545 return -EINVAL;
f3b197ac 1546
1da177e4
LT
1547 switch (ecmd->port) {
1548 case PORT_AUI:
1549 new_media = DE_MEDIA_AUI;
1550 if (!(ecmd->advertising & ADVERTISED_AUI))
1551 return -EINVAL;
1552 break;
1553 case PORT_BNC:
1554 new_media = DE_MEDIA_BNC;
1555 if (!(ecmd->advertising & ADVERTISED_BNC))
1556 return -EINVAL;
1557 break;
1558 default:
1559 if (ecmd->autoneg == AUTONEG_ENABLE)
1560 new_media = DE_MEDIA_TP_AUTO;
1561 else if (ecmd->duplex == DUPLEX_FULL)
1562 new_media = DE_MEDIA_TP_FD;
1563 else
1564 new_media = DE_MEDIA_TP;
1565 if (!(ecmd->advertising & ADVERTISED_TP))
1566 return -EINVAL;
1567 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1568 return -EINVAL;
1569 break;
1570 }
f3b197ac 1571
1da177e4 1572 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
f3b197ac 1573
1da177e4
LT
1574 if ((new_media == de->media_type) &&
1575 (media_lock == de->media_lock) &&
1576 (ecmd->advertising == de->media_advertise))
1577 return 0; /* nothing to change */
f3b197ac 1578
1da177e4
LT
1579 de_link_down(de);
1580 de_stop_rxtx(de);
f3b197ac 1581
1da177e4
LT
1582 de->media_type = new_media;
1583 de->media_lock = media_lock;
1584 de->media_advertise = ecmd->advertising;
1585 de_set_media(de);
f3b197ac 1586
1da177e4
LT
1587 return 0;
1588}
1589
1590static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1591{
8f15ea42 1592 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1593
1594 strcpy (info->driver, DRV_NAME);
1595 strcpy (info->version, DRV_VERSION);
1596 strcpy (info->bus_info, pci_name(de->pdev));
1597 info->eedump_len = DE_EEPROM_SIZE;
1598}
1599
1600static int de_get_regs_len(struct net_device *dev)
1601{
1602 return DE_REGS_SIZE;
1603}
1604
1605static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1606{
8f15ea42 1607 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1608 int rc;
1609
1610 spin_lock_irq(&de->lock);
1611 rc = __de_get_settings(de, ecmd);
1612 spin_unlock_irq(&de->lock);
1613
1614 return rc;
1615}
1616
1617static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1618{
8f15ea42 1619 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1620 int rc;
1621
1622 spin_lock_irq(&de->lock);
1623 rc = __de_set_settings(de, ecmd);
1624 spin_unlock_irq(&de->lock);
1625
1626 return rc;
1627}
1628
1629static u32 de_get_msglevel(struct net_device *dev)
1630{
8f15ea42 1631 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1632
1633 return de->msg_enable;
1634}
1635
1636static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1637{
8f15ea42 1638 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1639
1640 de->msg_enable = msglvl;
1641}
1642
1643static int de_get_eeprom(struct net_device *dev,
1644 struct ethtool_eeprom *eeprom, u8 *data)
1645{
8f15ea42 1646 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1647
1648 if (!de->ee_data)
1649 return -EOPNOTSUPP;
1650 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1651 (eeprom->len != DE_EEPROM_SIZE))
1652 return -EINVAL;
1653 memcpy(data, de->ee_data, eeprom->len);
1654
1655 return 0;
1656}
1657
1658static int de_nway_reset(struct net_device *dev)
1659{
8f15ea42 1660 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1661 u32 status;
1662
1663 if (de->media_type != DE_MEDIA_TP_AUTO)
1664 return -EINVAL;
1665 if (netif_carrier_ok(de->dev))
1666 de_link_down(de);
1667
1668 status = dr32(SIAStatus);
1669 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1670 if (netif_msg_link(de))
f639dc7d
JP
1671 dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
1672 status, dr32(SIAStatus));
1da177e4
LT
1673 return 0;
1674}
1675
1676static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1677 void *data)
1678{
8f15ea42 1679 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1680
1681 regs->version = (DE_REGS_VER << 2) | de->de21040;
1682
1683 spin_lock_irq(&de->lock);
1684 __de_get_regs(de, data);
1685 spin_unlock_irq(&de->lock);
1686}
1687
7282d491 1688static const struct ethtool_ops de_ethtool_ops = {
1da177e4 1689 .get_link = ethtool_op_get_link,
1da177e4
LT
1690 .get_drvinfo = de_get_drvinfo,
1691 .get_regs_len = de_get_regs_len,
1692 .get_settings = de_get_settings,
1693 .set_settings = de_set_settings,
1694 .get_msglevel = de_get_msglevel,
1695 .set_msglevel = de_set_msglevel,
1696 .get_eeprom = de_get_eeprom,
1697 .nway_reset = de_nway_reset,
1698 .get_regs = de_get_regs,
1699};
1700
4c44fd00 1701static void __devinit de21040_get_mac_address (struct de_private *de)
1da177e4
LT
1702{
1703 unsigned i;
1704
1705 dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
bc0da3fc 1706 udelay(5);
1da177e4
LT
1707
1708 for (i = 0; i < 6; i++) {
1709 int value, boguscnt = 100000;
ec1d1ebb 1710 do {
1da177e4 1711 value = dr32(ROMCmd);
84cc1535 1712 rmb();
ec1d1ebb 1713 } while (value < 0 && --boguscnt > 0);
1da177e4
LT
1714 de->dev->dev_addr[i] = value;
1715 udelay(1);
1716 if (boguscnt <= 0)
f639dc7d 1717 pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
1da177e4
LT
1718 }
1719}
1720
4c44fd00 1721static void __devinit de21040_get_media_info(struct de_private *de)
1da177e4
LT
1722{
1723 unsigned int i;
1724
1725 de->media_type = DE_MEDIA_TP;
1726 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1727 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1728 de->media_advertise = de->media_supported;
1729
1730 for (i = 0; i < DE_MAX_MEDIA; i++) {
1731 switch (i) {
1732 case DE_MEDIA_AUI:
1733 case DE_MEDIA_TP:
1734 case DE_MEDIA_TP_FD:
1735 de->media[i].type = i;
1736 de->media[i].csr13 = t21040_csr13[i];
1737 de->media[i].csr14 = t21040_csr14[i];
1738 de->media[i].csr15 = t21040_csr15[i];
1739 break;
1740 default:
1741 de->media[i].type = DE_MEDIA_INVALID;
1742 break;
1743 }
1744 }
1745}
1746
1747/* Note: this routine returns extra data bits for size detection. */
4a1d2d81 1748static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1da177e4
LT
1749{
1750 int i;
1751 unsigned retval = 0;
1752 void __iomem *ee_addr = regs + ROMCmd;
1753 int read_cmd = location | (EE_READ_CMD << addr_len);
1754
1755 writel(EE_ENB & ~EE_CS, ee_addr);
1756 writel(EE_ENB, ee_addr);
1757
1758 /* Shift the read command bits out. */
1759 for (i = 4 + addr_len; i >= 0; i--) {
1760 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1761 writel(EE_ENB | dataval, ee_addr);
1762 readl(ee_addr);
1763 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1764 readl(ee_addr);
1765 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1766 }
1767 writel(EE_ENB, ee_addr);
1768 readl(ee_addr);
1769
1770 for (i = 16; i > 0; i--) {
1771 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1772 readl(ee_addr);
1773 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1774 writel(EE_ENB, ee_addr);
1775 readl(ee_addr);
1776 }
1777
1778 /* Terminate the EEPROM access. */
1779 writel(EE_ENB & ~EE_CS, ee_addr);
1780 return retval;
1781}
1782
4c44fd00 1783static void __devinit de21041_get_srom_info (struct de_private *de)
1da177e4
LT
1784{
1785 unsigned i, sa_offset = 0, ofs;
1786 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1787 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1788 struct de_srom_info_leaf *il;
1789 void *bufp;
1790
1791 /* download entire eeprom */
1792 for (i = 0; i < DE_EEPROM_WORDS; i++)
c559a5bc
AV
1793 ((__le16 *)ee_data)[i] =
1794 cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1da177e4
LT
1795
1796 /* DEC now has a specification but early board makers
1797 just put the address in the first EEPROM locations. */
1798 /* This does memcmp(eedata, eedata+16, 8) */
bc053d45
RB
1799
1800#ifndef CONFIG_MIPS_COBALT
1801
1da177e4
LT
1802 for (i = 0; i < 8; i ++)
1803 if (ee_data[i] != ee_data[16+i])
1804 sa_offset = 20;
1805
bc053d45
RB
1806#endif
1807
1da177e4
LT
1808 /* store MAC address */
1809 for (i = 0; i < 6; i ++)
1810 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1811
1812 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1813 ofs = ee_data[SROMC0InfoLeaf];
1814 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1815 goto bad_srom;
1816
1817 /* get pointer to info leaf */
1818 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1819
1820 /* paranoia checks */
1821 if (il->n_blocks == 0)
1822 goto bad_srom;
1823 if ((sizeof(ee_data) - ofs) <
1824 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1825 goto bad_srom;
1826
1827 /* get default media type */
445854f4 1828 switch (get_unaligned(&il->default_media)) {
1da177e4
LT
1829 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1830 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1831 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1832 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1833 }
f3b197ac 1834
1da177e4 1835 if (netif_msg_probe(de))
f639dc7d
JP
1836 pr_info("de%d: SROM leaf offset %u, default media %s\n",
1837 de->board_idx, ofs, media_name[de->media_type]);
1da177e4
LT
1838
1839 /* init SIA register values to defaults */
1840 for (i = 0; i < DE_MAX_MEDIA; i++) {
1841 de->media[i].type = DE_MEDIA_INVALID;
1842 de->media[i].csr13 = 0xffff;
1843 de->media[i].csr14 = 0xffff;
1844 de->media[i].csr15 = 0xffff;
1845 }
1846
1847 /* parse media blocks to see what medias are supported,
1848 * and if any custom CSR values are provided
1849 */
1850 bufp = ((void *)il) + sizeof(*il);
1851 for (i = 0; i < il->n_blocks; i++) {
1852 struct de_srom_media_block *ib = bufp;
1853 unsigned idx;
1854
1855 /* index based on media type in media block */
1856 switch(ib->opts & MediaBlockMask) {
1857 case 0: /* 10baseT */
1858 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1859 | SUPPORTED_Autoneg;
1860 idx = DE_MEDIA_TP;
1861 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1862 break;
1863 case 1: /* BNC */
1864 de->media_supported |= SUPPORTED_BNC;
1865 idx = DE_MEDIA_BNC;
1866 break;
1867 case 2: /* AUI */
1868 de->media_supported |= SUPPORTED_AUI;
1869 idx = DE_MEDIA_AUI;
1870 break;
1871 case 4: /* 10baseT-FD */
1872 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1873 | SUPPORTED_Autoneg;
1874 idx = DE_MEDIA_TP_FD;
1875 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1876 break;
1877 default:
1878 goto bad_srom;
1879 }
1880
1881 de->media[idx].type = idx;
1882
1883 if (netif_msg_probe(de))
f639dc7d
JP
1884 pr_info("de%d: media block #%u: %s",
1885 de->board_idx, i,
1886 media_name[de->media[idx].type]);
1da177e4
LT
1887
1888 bufp += sizeof (ib->opts);
1889
1890 if (ib->opts & MediaCustomCSRs) {
445854f4
HH
1891 de->media[idx].csr13 = get_unaligned(&ib->csr13);
1892 de->media[idx].csr14 = get_unaligned(&ib->csr14);
1893 de->media[idx].csr15 = get_unaligned(&ib->csr15);
1da177e4
LT
1894 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1895 sizeof(ib->csr15);
1896
1897 if (netif_msg_probe(de))
f639dc7d
JP
1898 pr_cont(" (%x,%x,%x)\n",
1899 de->media[idx].csr13,
1900 de->media[idx].csr14,
1901 de->media[idx].csr15);
f3b197ac 1902
1da177e4 1903 } else if (netif_msg_probe(de))
f639dc7d 1904 pr_cont("\n");
1da177e4
LT
1905
1906 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1907 break;
1908 }
1909
1910 de->media_advertise = de->media_supported;
1911
1912fill_defaults:
1913 /* fill in defaults, for cases where custom CSRs not used */
1914 for (i = 0; i < DE_MAX_MEDIA; i++) {
1915 if (de->media[i].csr13 == 0xffff)
1916 de->media[i].csr13 = t21041_csr13[i];
e0f9c4f3
OZ
1917 if (de->media[i].csr14 == 0xffff) {
1918 /* autonegotiation is broken at least on some chip
1919 revisions - rev. 0x21 works, 0x11 does not */
1920 if (de->pdev->revision < 0x20)
1921 de->media[i].csr14 = t21041_csr14_brk[i];
1922 else
1923 de->media[i].csr14 = t21041_csr14[i];
1924 }
1da177e4
LT
1925 if (de->media[i].csr15 == 0xffff)
1926 de->media[i].csr15 = t21041_csr15[i];
1927 }
1928
c3a9392e 1929 de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1da177e4
LT
1930
1931 return;
1932
1933bad_srom:
1934 /* for error cases, it's ok to assume we support all these */
1935 for (i = 0; i < DE_MAX_MEDIA; i++)
1936 de->media[i].type = i;
1937 de->media_supported =
1938 SUPPORTED_10baseT_Half |
1939 SUPPORTED_10baseT_Full |
1940 SUPPORTED_Autoneg |
1941 SUPPORTED_TP |
1942 SUPPORTED_AUI |
1943 SUPPORTED_BNC;
1944 goto fill_defaults;
1945}
1946
90d8743d
SH
1947static const struct net_device_ops de_netdev_ops = {
1948 .ndo_open = de_open,
1949 .ndo_stop = de_close,
1950 .ndo_set_multicast_list = de_set_rx_mode,
1951 .ndo_start_xmit = de_start_xmit,
1952 .ndo_get_stats = de_get_stats,
1953 .ndo_tx_timeout = de_tx_timeout,
1954 .ndo_change_mtu = eth_change_mtu,
1955 .ndo_set_mac_address = eth_mac_addr,
1956 .ndo_validate_addr = eth_validate_addr,
1957};
1958
4a1d2d81 1959static int __devinit de_init_one (struct pci_dev *pdev,
1da177e4
LT
1960 const struct pci_device_id *ent)
1961{
1962 struct net_device *dev;
1963 struct de_private *de;
1964 int rc;
1965 void __iomem *regs;
afc7097f 1966 unsigned long pciaddr;
1da177e4
LT
1967 static int board_idx = -1;
1968
1969 board_idx++;
1970
1971#ifndef MODULE
1972 if (board_idx == 0)
1973 printk("%s", version);
1974#endif
1975
1976 /* allocate a new ethernet device structure, and fill in defaults */
1977 dev = alloc_etherdev(sizeof(struct de_private));
1978 if (!dev)
1979 return -ENOMEM;
1980
90d8743d 1981 dev->netdev_ops = &de_netdev_ops;
1da177e4 1982 SET_NETDEV_DEV(dev, &pdev->dev);
1da177e4 1983 dev->ethtool_ops = &de_ethtool_ops;
1da177e4
LT
1984 dev->watchdog_timeo = TX_TIMEOUT;
1985
8f15ea42 1986 de = netdev_priv(dev);
1da177e4
LT
1987 de->de21040 = ent->driver_data == 0 ? 1 : 0;
1988 de->pdev = pdev;
1989 de->dev = dev;
1990 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
1991 de->board_idx = board_idx;
1992 spin_lock_init (&de->lock);
1993 init_timer(&de->media_timer);
1994 if (de->de21040)
1995 de->media_timer.function = de21040_media_timer;
1996 else
1997 de->media_timer.function = de21041_media_timer;
1998 de->media_timer.data = (unsigned long) de;
1999
2000 netif_carrier_off(dev);
2001 netif_stop_queue(dev);
2002
2003 /* wake up device, assign resources */
2004 rc = pci_enable_device(pdev);
2005 if (rc)
2006 goto err_out_free;
2007
2008 /* reserve PCI resources to ensure driver atomicity */
2009 rc = pci_request_regions(pdev, DRV_NAME);
2010 if (rc)
2011 goto err_out_disable;
2012
2013 /* check for invalid IRQ value */
2014 if (pdev->irq < 2) {
2015 rc = -EIO;
f639dc7d 2016 pr_err(PFX "invalid irq (%d) for pci dev %s\n",
1da177e4
LT
2017 pdev->irq, pci_name(pdev));
2018 goto err_out_res;
2019 }
2020
2021 dev->irq = pdev->irq;
2022
2023 /* obtain and check validity of PCI I/O address */
2024 pciaddr = pci_resource_start(pdev, 1);
2025 if (!pciaddr) {
2026 rc = -EIO;
f639dc7d 2027 pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
1da177e4
LT
2028 goto err_out_res;
2029 }
2030 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2031 rc = -EIO;
f639dc7d
JP
2032 pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
2033 (unsigned long long)pci_resource_len(pdev, 1),
2034 pci_name(pdev));
1da177e4
LT
2035 goto err_out_res;
2036 }
2037
2038 /* remap CSR registers */
2039 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2040 if (!regs) {
2041 rc = -EIO;
f639dc7d
JP
2042 pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2043 (unsigned long long)pci_resource_len(pdev, 1),
2044 pciaddr, pci_name(pdev));
1da177e4
LT
2045 goto err_out_res;
2046 }
2047 dev->base_addr = (unsigned long) regs;
2048 de->regs = regs;
2049
2050 de_adapter_wake(de);
2051
2052 /* make sure hardware is not running */
2053 rc = de_reset_mac(de);
2054 if (rc) {
f639dc7d 2055 pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
1da177e4
LT
2056 goto err_out_iomap;
2057 }
2058
2059 /* get MAC address, initialize default media type and
2060 * get list of supported media
2061 */
2062 if (de->de21040) {
2063 de21040_get_mac_address(de);
2064 de21040_get_media_info(de);
2065 } else {
2066 de21041_get_srom_info(de);
2067 }
2068
2069 /* register new network interface with kernel */
2070 rc = register_netdev(dev);
2071 if (rc)
2072 goto err_out_iomap;
2073
2074 /* print info about board and interface just registered */
f639dc7d
JP
2075 dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
2076 de->de21040 ? "21040" : "21041",
2077 dev->base_addr,
2078 dev->dev_addr,
2079 dev->irq);
1da177e4
LT
2080
2081 pci_set_drvdata(pdev, dev);
2082
2083 /* enable busmastering */
2084 pci_set_master(pdev);
2085
2086 /* put adapter to sleep */
2087 de_adapter_sleep(de);
2088
2089 return 0;
2090
2091err_out_iomap:
b4558ea9 2092 kfree(de->ee_data);
1da177e4
LT
2093 iounmap(regs);
2094err_out_res:
2095 pci_release_regions(pdev);
2096err_out_disable:
2097 pci_disable_device(pdev);
2098err_out_free:
2099 free_netdev(dev);
2100 return rc;
2101}
2102
4a1d2d81 2103static void __devexit de_remove_one (struct pci_dev *pdev)
1da177e4
LT
2104{
2105 struct net_device *dev = pci_get_drvdata(pdev);
8f15ea42 2106 struct de_private *de = netdev_priv(dev);
1da177e4 2107
7e0b58f3 2108 BUG_ON(!dev);
1da177e4 2109 unregister_netdev(dev);
b4558ea9 2110 kfree(de->ee_data);
1da177e4
LT
2111 iounmap(de->regs);
2112 pci_release_regions(pdev);
2113 pci_disable_device(pdev);
2114 pci_set_drvdata(pdev, NULL);
2115 free_netdev(dev);
2116}
2117
2118#ifdef CONFIG_PM
2119
05adc3b7 2120static int de_suspend (struct pci_dev *pdev, pm_message_t state)
1da177e4
LT
2121{
2122 struct net_device *dev = pci_get_drvdata (pdev);
8f15ea42 2123 struct de_private *de = netdev_priv(dev);
1da177e4
LT
2124
2125 rtnl_lock();
2126 if (netif_running (dev)) {
2127 del_timer_sync(&de->media_timer);
2128
2129 disable_irq(dev->irq);
2130 spin_lock_irq(&de->lock);
2131
2132 de_stop_hw(de);
2133 netif_stop_queue(dev);
2134 netif_device_detach(dev);
2135 netif_carrier_off(dev);
2136
2137 spin_unlock_irq(&de->lock);
2138 enable_irq(dev->irq);
f3b197ac 2139
1da177e4
LT
2140 /* Update the error counts. */
2141 __de_get_stats(de);
2142
2143 synchronize_irq(dev->irq);
2144 de_clean_rings(de);
2145
2146 de_adapter_sleep(de);
2147 pci_disable_device(pdev);
2148 } else {
2149 netif_device_detach(dev);
2150 }
2151 rtnl_unlock();
2152 return 0;
2153}
2154
2155static int de_resume (struct pci_dev *pdev)
2156{
2157 struct net_device *dev = pci_get_drvdata (pdev);
8f15ea42 2158 struct de_private *de = netdev_priv(dev);
9f486ae1 2159 int retval = 0;
1da177e4
LT
2160
2161 rtnl_lock();
2162 if (netif_device_present(dev))
2163 goto out;
9f486ae1
VH
2164 if (!netif_running(dev))
2165 goto out_attach;
2166 if ((retval = pci_enable_device(pdev))) {
f639dc7d 2167 dev_err(&dev->dev, "pci_enable_device failed in resume\n");
9f486ae1 2168 goto out;
1da177e4 2169 }
b0255a02
OZ
2170 pci_set_master(pdev);
2171 de_init_rings(de);
9f486ae1
VH
2172 de_init_hw(de);
2173out_attach:
2174 netif_device_attach(dev);
1da177e4
LT
2175out:
2176 rtnl_unlock();
2177 return 0;
2178}
2179
2180#endif /* CONFIG_PM */
2181
2182static struct pci_driver de_driver = {
2183 .name = DRV_NAME,
2184 .id_table = de_pci_tbl,
2185 .probe = de_init_one,
4a1d2d81 2186 .remove = __devexit_p(de_remove_one),
1da177e4
LT
2187#ifdef CONFIG_PM
2188 .suspend = de_suspend,
2189 .resume = de_resume,
2190#endif
2191};
2192
2193static int __init de_init (void)
2194{
2195#ifdef MODULE
2196 printk("%s", version);
2197#endif
29917620 2198 return pci_register_driver(&de_driver);
1da177e4
LT
2199}
2200
2201static void __exit de_exit (void)
2202{
2203 pci_unregister_driver (&de_driver);
2204}
2205
2206module_init(de_init);
2207module_exit(de_exit);