]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tulip/de2104x.c
de2104x: disable autonegotiation on broken hardware
[net-next-2.6.git] / drivers / net / tulip / de2104x.c
CommitLineData
1da177e4
LT
1/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2/*
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
7
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
14
15 See the file COPYING in this distribution for more information.
16
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
26 Tx descriptor bit
27
28 */
29
30#define DRV_NAME "de2104x"
31#define DRV_VERSION "0.7"
32#define DRV_RELDATE "Mar 17, 2004"
33
1da177e4
LT
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/init.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/ethtool.h>
42#include <linux/compiler.h>
43#include <linux/rtnetlink.h>
44#include <linux/crc32.h>
5a0e3ad6 45#include <linux/slab.h>
1da177e4
LT
46
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/uaccess.h>
50#include <asm/unaligned.h>
51
52/* These identify the driver base version and may not be removed. */
53static char version[] =
54KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
55
56MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
57MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
58MODULE_LICENSE("GPL");
59MODULE_VERSION(DRV_VERSION);
60
61static int debug = -1;
62module_param (debug, int, 0);
63MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
64
65/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
8e95a202
JP
66#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
67 defined(CONFIG_SPARC) || defined(__ia64__) || \
68 defined(__sh__) || defined(__mips__)
1da177e4
LT
69static int rx_copybreak = 1518;
70#else
71static int rx_copybreak = 100;
72#endif
73module_param (rx_copybreak, int, 0);
74MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
75
76#define PFX DRV_NAME ": "
77
78#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
79 NETIF_MSG_PROBE | \
80 NETIF_MSG_LINK | \
81 NETIF_MSG_IFDOWN | \
82 NETIF_MSG_IFUP | \
83 NETIF_MSG_RX_ERR | \
84 NETIF_MSG_TX_ERR)
85
b77e5228
RS
86/* Descriptor skip length in 32 bit longwords. */
87#ifndef CONFIG_DE2104X_DSL
88#define DSL 0
89#else
90#define DSL CONFIG_DE2104X_DSL
91#endif
92
1da177e4
LT
93#define DE_RX_RING_SIZE 64
94#define DE_TX_RING_SIZE 64
95#define DE_RING_BYTES \
96 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
97 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
98#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
99#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
100#define TX_BUFFS_AVAIL(CP) \
101 (((CP)->tx_tail <= (CP)->tx_head) ? \
102 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
103 (CP)->tx_tail - (CP)->tx_head - 1)
104
105#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
106#define RX_OFFSET 2
107
108#define DE_SETUP_SKB ((struct sk_buff *) 1)
109#define DE_DUMMY_SKB ((struct sk_buff *) 2)
110#define DE_SETUP_FRAME_WORDS 96
111#define DE_EEPROM_WORDS 256
112#define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
113#define DE_MAX_MEDIA 5
114
115#define DE_MEDIA_TP_AUTO 0
116#define DE_MEDIA_BNC 1
117#define DE_MEDIA_AUI 2
118#define DE_MEDIA_TP 3
119#define DE_MEDIA_TP_FD 4
120#define DE_MEDIA_INVALID DE_MAX_MEDIA
121#define DE_MEDIA_FIRST 0
122#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
123#define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
124
125#define DE_TIMER_LINK (60 * HZ)
126#define DE_TIMER_NO_LINK (5 * HZ)
127
128#define DE_NUM_REGS 16
129#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
130#define DE_REGS_VER 1
131
132/* Time in jiffies before concluding the transmitter is hung. */
133#define TX_TIMEOUT (6*HZ)
134
1da177e4
LT
135/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
136 to support a pre-NWay full-duplex signaling mechanism using short frames.
137 No one knows what it should be, but if left at its default value some
138 10base2(!) packets trigger a full-duplex-request interrupt. */
139#define FULL_DUPLEX_MAGIC 0x6969
140
141enum {
142 /* NIC registers */
143 BusMode = 0x00,
144 TxPoll = 0x08,
145 RxPoll = 0x10,
146 RxRingAddr = 0x18,
147 TxRingAddr = 0x20,
148 MacStatus = 0x28,
149 MacMode = 0x30,
150 IntrMask = 0x38,
151 RxMissed = 0x40,
152 ROMCmd = 0x48,
153 CSR11 = 0x58,
154 SIAStatus = 0x60,
155 CSR13 = 0x68,
156 CSR14 = 0x70,
157 CSR15 = 0x78,
158 PCIPM = 0x40,
159
160 /* BusMode bits */
161 CmdReset = (1 << 0),
162 CacheAlign16 = 0x00008000,
163 BurstLen4 = 0x00000400,
b77e5228 164 DescSkipLen = (DSL << 2),
1da177e4
LT
165
166 /* Rx/TxPoll bits */
167 NormalTxPoll = (1 << 0),
168 NormalRxPoll = (1 << 0),
169
170 /* Tx/Rx descriptor status bits */
171 DescOwn = (1 << 31),
172 RxError = (1 << 15),
173 RxErrLong = (1 << 7),
174 RxErrCRC = (1 << 1),
175 RxErrFIFO = (1 << 0),
176 RxErrRunt = (1 << 11),
177 RxErrFrame = (1 << 14),
178 RingEnd = (1 << 25),
179 FirstFrag = (1 << 29),
180 LastFrag = (1 << 30),
181 TxError = (1 << 15),
182 TxFIFOUnder = (1 << 1),
183 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
184 TxMaxCol = (1 << 8),
185 TxOWC = (1 << 9),
186 TxJabber = (1 << 14),
187 SetupFrame = (1 << 27),
188 TxSwInt = (1 << 31),
189
190 /* MacStatus bits */
191 IntrOK = (1 << 16),
192 IntrErr = (1 << 15),
193 RxIntr = (1 << 6),
194 RxEmpty = (1 << 7),
195 TxIntr = (1 << 0),
196 TxEmpty = (1 << 2),
197 PciErr = (1 << 13),
198 TxState = (1 << 22) | (1 << 21) | (1 << 20),
199 RxState = (1 << 19) | (1 << 18) | (1 << 17),
200 LinkFail = (1 << 12),
201 LinkPass = (1 << 4),
202 RxStopped = (1 << 8),
203 TxStopped = (1 << 1),
204
205 /* MacMode bits */
206 TxEnable = (1 << 13),
207 RxEnable = (1 << 1),
208 RxTx = TxEnable | RxEnable,
209 FullDuplex = (1 << 9),
210 AcceptAllMulticast = (1 << 7),
211 AcceptAllPhys = (1 << 6),
212 BOCnt = (1 << 5),
213 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
214 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
215
216 /* ROMCmd bits */
217 EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
218 EE_CS = 0x01, /* EEPROM chip select. */
219 EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
220 EE_WRITE_0 = 0x01,
221 EE_WRITE_1 = 0x05,
222 EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
223 EE_ENB = (0x4800 | EE_CS),
224
225 /* The EEPROM commands include the alway-set leading bit. */
226 EE_READ_CMD = 6,
227
228 /* RxMissed bits */
229 RxMissedOver = (1 << 16),
230 RxMissedMask = 0xffff,
231
232 /* SROM-related bits */
233 SROMC0InfoLeaf = 27,
234 MediaBlockMask = 0x3f,
235 MediaCustomCSRs = (1 << 6),
f3b197ac 236
1da177e4
LT
237 /* PCIPM bits */
238 PM_Sleep = (1 << 31),
239 PM_Snooze = (1 << 30),
240 PM_Mask = PM_Sleep | PM_Snooze,
f3b197ac 241
1da177e4
LT
242 /* SIAStatus bits */
243 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
244 NWayRestart = (1 << 12),
245 NonselPortActive = (1 << 9),
246 LinkFailStatus = (1 << 2),
247 NetCxnErr = (1 << 1),
248};
249
250static const u32 de_intr_mask =
251 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
252 LinkPass | LinkFail | PciErr;
253
254/*
255 * Set the programmable burst length to 4 longwords for all:
256 * DMA errors result without these values. Cache align 16 long.
257 */
b77e5228 258static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
1da177e4
LT
259
260struct de_srom_media_block {
261 u8 opts;
262 u16 csr13;
263 u16 csr14;
264 u16 csr15;
ba2d3587 265} __packed;
1da177e4
LT
266
267struct de_srom_info_leaf {
268 u16 default_media;
269 u8 n_blocks;
270 u8 unused;
ba2d3587 271} __packed;
1da177e4
LT
272
273struct de_desc {
c559a5bc
AV
274 __le32 opts1;
275 __le32 opts2;
276 __le32 addr1;
277 __le32 addr2;
b77e5228
RS
278#if DSL
279 __le32 skip[DSL];
280#endif
1da177e4
LT
281};
282
283struct media_info {
284 u16 type; /* DE_MEDIA_xxx */
285 u16 csr13;
286 u16 csr14;
287 u16 csr15;
288};
289
290struct ring_info {
291 struct sk_buff *skb;
292 dma_addr_t mapping;
293};
294
295struct de_private {
296 unsigned tx_head;
297 unsigned tx_tail;
298 unsigned rx_tail;
299
300 void __iomem *regs;
301 struct net_device *dev;
302 spinlock_t lock;
303
304 struct de_desc *rx_ring;
305 struct de_desc *tx_ring;
306 struct ring_info tx_skb[DE_TX_RING_SIZE];
307 struct ring_info rx_skb[DE_RX_RING_SIZE];
308 unsigned rx_buf_sz;
309 dma_addr_t ring_dma;
310
311 u32 msg_enable;
312
313 struct net_device_stats net_stats;
314
315 struct pci_dev *pdev;
316
317 u16 setup_frame[DE_SETUP_FRAME_WORDS];
318
319 u32 media_type;
320 u32 media_supported;
321 u32 media_advertise;
322 struct media_info media[DE_MAX_MEDIA];
323 struct timer_list media_timer;
324
325 u8 *ee_data;
326 unsigned board_idx;
327 unsigned de21040 : 1;
328 unsigned media_lock : 1;
329};
330
331
332static void de_set_rx_mode (struct net_device *dev);
333static void de_tx (struct de_private *de);
334static void de_clean_rings (struct de_private *de);
335static void de_media_interrupt (struct de_private *de, u32 status);
336static void de21040_media_timer (unsigned long data);
337static void de21041_media_timer (unsigned long data);
338static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
339
340
a3aa1884 341static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
1da177e4
LT
342 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
343 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
344 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
345 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
346 { },
347};
348MODULE_DEVICE_TABLE(pci, de_pci_tbl);
349
350static const char * const media_name[DE_MAX_MEDIA] = {
351 "10baseT auto",
352 "BNC",
353 "AUI",
354 "10baseT-HD",
355 "10baseT-FD"
356};
357
358/* 21040 transceiver register settings:
359 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
360static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
361static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
362static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
363
364/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
365static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
366static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
e0f9c4f3
OZ
367/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
368static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
1da177e4
LT
369static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
370
371
84cc1535
ML
372#define dr32(reg) ioread32(de->regs + (reg))
373#define dw32(reg, val) iowrite32((val), de->regs + (reg))
1da177e4
LT
374
375
376static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
377 u32 status, u32 len)
378{
379 if (netif_msg_rx_err (de))
380 printk (KERN_DEBUG
381 "%s: rx err, slot %d status 0x%x len %d\n",
382 de->dev->name, rx_tail, status, len);
383
384 if ((status & 0x38000300) != 0x0300) {
385 /* Ingore earlier buffers. */
386 if ((status & 0xffff) != 0x7fff) {
387 if (netif_msg_rx_err(de))
f639dc7d
JP
388 dev_warn(&de->dev->dev,
389 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
390 status);
1da177e4
LT
391 de->net_stats.rx_length_errors++;
392 }
393 } else if (status & RxError) {
394 /* There was a fatal error. */
395 de->net_stats.rx_errors++; /* end of a packet.*/
396 if (status & 0x0890) de->net_stats.rx_length_errors++;
397 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
398 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
399 }
400}
401
402static void de_rx (struct de_private *de)
403{
404 unsigned rx_tail = de->rx_tail;
405 unsigned rx_work = DE_RX_RING_SIZE;
406 unsigned drop = 0;
407 int rc;
408
46578a69 409 while (--rx_work) {
1da177e4
LT
410 u32 status, len;
411 dma_addr_t mapping;
412 struct sk_buff *skb, *copy_skb;
413 unsigned copying_skb, buflen;
414
415 skb = de->rx_skb[rx_tail].skb;
7e0b58f3 416 BUG_ON(!skb);
1da177e4
LT
417 rmb();
418 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
419 if (status & DescOwn)
420 break;
421
422 len = ((status >> 16) & 0x7ff) - 4;
423 mapping = de->rx_skb[rx_tail].mapping;
424
425 if (unlikely(drop)) {
426 de->net_stats.rx_dropped++;
427 goto rx_next;
428 }
429
430 if (unlikely((status & 0x38008300) != 0x0300)) {
431 de_rx_err_acct(de, rx_tail, status, len);
432 goto rx_next;
433 }
434
435 copying_skb = (len <= rx_copybreak);
436
437 if (unlikely(netif_msg_rx_status(de)))
438 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
439 de->dev->name, rx_tail, status, len,
440 copying_skb);
441
442 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
443 copy_skb = dev_alloc_skb (buflen);
444 if (unlikely(!copy_skb)) {
445 de->net_stats.rx_dropped++;
446 drop = 1;
447 rx_work = 100;
448 goto rx_next;
449 }
1da177e4
LT
450
451 if (!copying_skb) {
452 pci_unmap_single(de->pdev, mapping,
453 buflen, PCI_DMA_FROMDEVICE);
454 skb_put(skb, len);
455
456 mapping =
457 de->rx_skb[rx_tail].mapping =
689be439 458 pci_map_single(de->pdev, copy_skb->data,
1da177e4
LT
459 buflen, PCI_DMA_FROMDEVICE);
460 de->rx_skb[rx_tail].skb = copy_skb;
461 } else {
462 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
463 skb_reserve(copy_skb, RX_OFFSET);
d626f62b
ACM
464 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
465 len);
1da177e4
LT
466 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
467
468 /* We'll reuse the original ring buffer. */
469 skb = copy_skb;
470 }
471
472 skb->protocol = eth_type_trans (skb, de->dev);
473
474 de->net_stats.rx_packets++;
475 de->net_stats.rx_bytes += skb->len;
1da177e4
LT
476 rc = netif_rx (skb);
477 if (rc == NET_RX_DROP)
478 drop = 1;
479
480rx_next:
1da177e4
LT
481 if (rx_tail == (DE_RX_RING_SIZE - 1))
482 de->rx_ring[rx_tail].opts2 =
483 cpu_to_le32(RingEnd | de->rx_buf_sz);
484 else
485 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
486 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
b991d2bc
RS
487 wmb();
488 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
1da177e4
LT
489 rx_tail = NEXT_RX(rx_tail);
490 }
491
492 if (!rx_work)
f639dc7d 493 dev_warn(&de->dev->dev, "rx work limit reached\n");
1da177e4
LT
494
495 de->rx_tail = rx_tail;
496}
497
7d12e780 498static irqreturn_t de_interrupt (int irq, void *dev_instance)
1da177e4
LT
499{
500 struct net_device *dev = dev_instance;
8f15ea42 501 struct de_private *de = netdev_priv(dev);
1da177e4
LT
502 u32 status;
503
504 status = dr32(MacStatus);
505 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
506 return IRQ_NONE;
507
508 if (netif_msg_intr(de))
509 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
f639dc7d
JP
510 dev->name, status, dr32(MacMode),
511 de->rx_tail, de->tx_head, de->tx_tail);
1da177e4
LT
512
513 dw32(MacStatus, status);
514
515 if (status & (RxIntr | RxEmpty)) {
516 de_rx(de);
517 if (status & RxEmpty)
518 dw32(RxPoll, NormalRxPoll);
519 }
520
521 spin_lock(&de->lock);
522
523 if (status & (TxIntr | TxEmpty))
524 de_tx(de);
525
526 if (status & (LinkPass | LinkFail))
527 de_media_interrupt(de, status);
528
529 spin_unlock(&de->lock);
530
531 if (status & PciErr) {
532 u16 pci_status;
533
534 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
535 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
f639dc7d
JP
536 dev_err(&de->dev->dev,
537 "PCI bus error, status=%08x, PCI status=%04x\n",
538 status, pci_status);
1da177e4
LT
539 }
540
541 return IRQ_HANDLED;
542}
543
544static void de_tx (struct de_private *de)
545{
546 unsigned tx_head = de->tx_head;
547 unsigned tx_tail = de->tx_tail;
548
549 while (tx_tail != tx_head) {
550 struct sk_buff *skb;
551 u32 status;
552
553 rmb();
554 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
555 if (status & DescOwn)
556 break;
557
558 skb = de->tx_skb[tx_tail].skb;
7e0b58f3 559 BUG_ON(!skb);
1da177e4
LT
560 if (unlikely(skb == DE_DUMMY_SKB))
561 goto next;
562
563 if (unlikely(skb == DE_SETUP_SKB)) {
564 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
565 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
566 goto next;
567 }
568
569 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
570 skb->len, PCI_DMA_TODEVICE);
571
572 if (status & LastFrag) {
573 if (status & TxError) {
574 if (netif_msg_tx_err(de))
575 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
576 de->dev->name, status);
577 de->net_stats.tx_errors++;
578 if (status & TxOWC)
579 de->net_stats.tx_window_errors++;
580 if (status & TxMaxCol)
581 de->net_stats.tx_aborted_errors++;
582 if (status & TxLinkFail)
583 de->net_stats.tx_carrier_errors++;
584 if (status & TxFIFOUnder)
585 de->net_stats.tx_fifo_errors++;
586 } else {
587 de->net_stats.tx_packets++;
588 de->net_stats.tx_bytes += skb->len;
589 if (netif_msg_tx_done(de))
f639dc7d
JP
590 printk(KERN_DEBUG "%s: tx done, slot %d\n",
591 de->dev->name, tx_tail);
1da177e4
LT
592 }
593 dev_kfree_skb_irq(skb);
594 }
595
596next:
597 de->tx_skb[tx_tail].skb = NULL;
598
599 tx_tail = NEXT_TX(tx_tail);
600 }
601
602 de->tx_tail = tx_tail;
603
604 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
605 netif_wake_queue(de->dev);
606}
607
ad096463
SH
608static netdev_tx_t de_start_xmit (struct sk_buff *skb,
609 struct net_device *dev)
1da177e4 610{
8f15ea42 611 struct de_private *de = netdev_priv(dev);
1da177e4
LT
612 unsigned int entry, tx_free;
613 u32 mapping, len, flags = FirstFrag | LastFrag;
614 struct de_desc *txd;
615
616 spin_lock_irq(&de->lock);
617
618 tx_free = TX_BUFFS_AVAIL(de);
619 if (tx_free == 0) {
620 netif_stop_queue(dev);
621 spin_unlock_irq(&de->lock);
5b548140 622 return NETDEV_TX_BUSY;
1da177e4
LT
623 }
624 tx_free--;
625
626 entry = de->tx_head;
627
628 txd = &de->tx_ring[entry];
629
630 len = skb->len;
631 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
632 if (entry == (DE_TX_RING_SIZE - 1))
633 flags |= RingEnd;
634 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
635 flags |= TxSwInt;
636 flags |= len;
637 txd->opts2 = cpu_to_le32(flags);
638 txd->addr1 = cpu_to_le32(mapping);
639
640 de->tx_skb[entry].skb = skb;
641 de->tx_skb[entry].mapping = mapping;
642 wmb();
643
644 txd->opts1 = cpu_to_le32(DescOwn);
645 wmb();
646
647 de->tx_head = NEXT_TX(entry);
648 if (netif_msg_tx_queued(de))
649 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
650 dev->name, entry, skb->len);
651
652 if (tx_free == 0)
653 netif_stop_queue(dev);
654
655 spin_unlock_irq(&de->lock);
656
657 /* Trigger an immediate transmit demand. */
658 dw32(TxPoll, NormalTxPoll);
1da177e4 659
6ed10654 660 return NETDEV_TX_OK;
1da177e4
LT
661}
662
663/* Set or clear the multicast filter for this adaptor.
664 Note that we only use exclusion around actually queueing the
665 new frame, not around filling de->setup_frame. This is non-deterministic
666 when re-entered but still correct. */
667
668#undef set_bit_le
669#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
670
671static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
672{
8f15ea42 673 struct de_private *de = netdev_priv(dev);
1da177e4 674 u16 hash_table[32];
22bedad3 675 struct netdev_hw_addr *ha;
1da177e4
LT
676 int i;
677 u16 *eaddrs;
678
679 memset(hash_table, 0, sizeof(hash_table));
680 set_bit_le(255, hash_table); /* Broadcast entry */
681 /* This should work on big-endian machines as well. */
22bedad3
JP
682 netdev_for_each_mc_addr(ha, dev) {
683 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1da177e4
LT
684
685 set_bit_le(index, hash_table);
4302b67e 686 }
1da177e4 687
4302b67e
JP
688 for (i = 0; i < 32; i++) {
689 *setup_frm++ = hash_table[i];
690 *setup_frm++ = hash_table[i];
1da177e4 691 }
4302b67e 692 setup_frm = &de->setup_frame[13*6];
1da177e4
LT
693
694 /* Fill the final entry with our physical address. */
695 eaddrs = (u16 *)dev->dev_addr;
696 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
697 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
698 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
699}
700
701static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
702{
8f15ea42 703 struct de_private *de = netdev_priv(dev);
22bedad3 704 struct netdev_hw_addr *ha;
1da177e4
LT
705 u16 *eaddrs;
706
707 /* We have <= 14 addresses so we can use the wonderful
708 16 address perfect filtering of the Tulip. */
22bedad3
JP
709 netdev_for_each_mc_addr(ha, dev) {
710 eaddrs = (u16 *) ha->addr;
1da177e4
LT
711 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
712 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
713 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
714 }
715 /* Fill the unused entries with the broadcast address. */
4302b67e 716 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1da177e4
LT
717 setup_frm = &de->setup_frame[15*6];
718
719 /* Fill the final entry with our physical address. */
720 eaddrs = (u16 *)dev->dev_addr;
721 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
722 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
723 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
724}
725
726
727static void __de_set_rx_mode (struct net_device *dev)
728{
8f15ea42 729 struct de_private *de = netdev_priv(dev);
1da177e4
LT
730 u32 macmode;
731 unsigned int entry;
732 u32 mapping;
733 struct de_desc *txd;
734 struct de_desc *dummy_txd = NULL;
735
736 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
737
738 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
739 macmode |= AcceptAllMulticast | AcceptAllPhys;
740 goto out;
741 }
742
4cd24eaf 743 if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
1da177e4
LT
744 /* Too many to filter well -- accept all multicasts. */
745 macmode |= AcceptAllMulticast;
746 goto out;
747 }
748
749 /* Note that only the low-address shortword of setup_frame is valid!
750 The values are doubled for big-endian architectures. */
4cd24eaf 751 if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
1da177e4
LT
752 build_setup_frame_hash (de->setup_frame, dev);
753 else
754 build_setup_frame_perfect (de->setup_frame, dev);
755
756 /*
757 * Now add this frame to the Tx list.
758 */
759
760 entry = de->tx_head;
761
762 /* Avoid a chip errata by prefixing a dummy entry. */
763 if (entry != 0) {
764 de->tx_skb[entry].skb = DE_DUMMY_SKB;
765
766 dummy_txd = &de->tx_ring[entry];
767 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
768 cpu_to_le32(RingEnd) : 0;
769 dummy_txd->addr1 = 0;
770
771 /* Must set DescOwned later to avoid race with chip */
772
773 entry = NEXT_TX(entry);
774 }
775
776 de->tx_skb[entry].skb = DE_SETUP_SKB;
777 de->tx_skb[entry].mapping = mapping =
778 pci_map_single (de->pdev, de->setup_frame,
779 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
780
781 /* Put the setup frame on the Tx list. */
782 txd = &de->tx_ring[entry];
783 if (entry == (DE_TX_RING_SIZE - 1))
784 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
785 else
786 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
787 txd->addr1 = cpu_to_le32(mapping);
788 wmb();
789
790 txd->opts1 = cpu_to_le32(DescOwn);
791 wmb();
792
793 if (dummy_txd) {
794 dummy_txd->opts1 = cpu_to_le32(DescOwn);
795 wmb();
796 }
797
798 de->tx_head = NEXT_TX(entry);
799
1da177e4
LT
800 if (TX_BUFFS_AVAIL(de) == 0)
801 netif_stop_queue(dev);
802
803 /* Trigger an immediate transmit demand. */
804 dw32(TxPoll, NormalTxPoll);
805
806out:
807 if (macmode != dr32(MacMode))
808 dw32(MacMode, macmode);
809}
810
811static void de_set_rx_mode (struct net_device *dev)
812{
813 unsigned long flags;
8f15ea42 814 struct de_private *de = netdev_priv(dev);
1da177e4
LT
815
816 spin_lock_irqsave (&de->lock, flags);
817 __de_set_rx_mode(dev);
818 spin_unlock_irqrestore (&de->lock, flags);
819}
820
821static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
822{
823 if (unlikely(rx_missed & RxMissedOver))
824 de->net_stats.rx_missed_errors += RxMissedMask;
825 else
826 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
827}
828
829static void __de_get_stats(struct de_private *de)
830{
831 u32 tmp = dr32(RxMissed); /* self-clearing */
832
833 de_rx_missed(de, tmp);
834}
835
836static struct net_device_stats *de_get_stats(struct net_device *dev)
837{
8f15ea42 838 struct de_private *de = netdev_priv(dev);
1da177e4
LT
839
840 /* The chip only need report frame silently dropped. */
841 spin_lock_irq(&de->lock);
842 if (netif_running(dev) && netif_device_present(dev))
843 __de_get_stats(de);
844 spin_unlock_irq(&de->lock);
845
846 return &de->net_stats;
847}
848
849static inline int de_is_running (struct de_private *de)
850{
851 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
852}
853
854static void de_stop_rxtx (struct de_private *de)
855{
856 u32 macmode;
69cac988 857 unsigned int i = 1300/100;
1da177e4
LT
858
859 macmode = dr32(MacMode);
860 if (macmode & RxTx) {
861 dw32(MacMode, macmode & ~RxTx);
862 dr32(MacMode);
863 }
864
69cac988
GG
865 /* wait until in-flight frame completes.
866 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
867 * Typically expect this loop to end in < 50 us on 100BT.
868 */
869 while (--i) {
1da177e4
LT
870 if (!de_is_running(de))
871 return;
69cac988 872 udelay(100);
1da177e4 873 }
f3b197ac 874
f639dc7d 875 dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
1da177e4
LT
876}
877
878static inline void de_start_rxtx (struct de_private *de)
879{
880 u32 macmode;
881
882 macmode = dr32(MacMode);
883 if ((macmode & RxTx) != RxTx) {
884 dw32(MacMode, macmode | RxTx);
885 dr32(MacMode);
886 }
887}
888
889static void de_stop_hw (struct de_private *de)
890{
891
892 udelay(5);
893 dw32(IntrMask, 0);
894
895 de_stop_rxtx(de);
896
897 dw32(MacStatus, dr32(MacStatus));
898
899 udelay(10);
900
901 de->rx_tail = 0;
902 de->tx_head = de->tx_tail = 0;
903}
904
905static void de_link_up(struct de_private *de)
906{
907 if (!netif_carrier_ok(de->dev)) {
908 netif_carrier_on(de->dev);
909 if (netif_msg_link(de))
f639dc7d
JP
910 dev_info(&de->dev->dev, "link up, media %s\n",
911 media_name[de->media_type]);
1da177e4
LT
912 }
913}
914
915static void de_link_down(struct de_private *de)
916{
917 if (netif_carrier_ok(de->dev)) {
918 netif_carrier_off(de->dev);
919 if (netif_msg_link(de))
f639dc7d 920 dev_info(&de->dev->dev, "link down\n");
1da177e4
LT
921 }
922}
923
924static void de_set_media (struct de_private *de)
925{
926 unsigned media = de->media_type;
927 u32 macmode = dr32(MacMode);
928
f25f0f8d 929 if (de_is_running(de))
f639dc7d
JP
930 dev_warn(&de->dev->dev,
931 "chip is running while changing media!\n");
1da177e4
LT
932
933 if (de->de21040)
934 dw32(CSR11, FULL_DUPLEX_MAGIC);
935 dw32(CSR13, 0); /* Reset phy */
936 dw32(CSR14, de->media[media].csr14);
937 dw32(CSR15, de->media[media].csr15);
938 dw32(CSR13, de->media[media].csr13);
939
940 /* must delay 10ms before writing to other registers,
941 * especially CSR6
942 */
943 mdelay(10);
944
945 if (media == DE_MEDIA_TP_FD)
946 macmode |= FullDuplex;
947 else
948 macmode &= ~FullDuplex;
f3b197ac 949
1da177e4 950 if (netif_msg_link(de)) {
f639dc7d
JP
951 dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
952 dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
953 dr32(MacMode), dr32(SIAStatus),
954 dr32(CSR13), dr32(CSR14), dr32(CSR15));
955
956 dev_info(&de->dev->dev,
957 "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
958 macmode, de->media[media].csr13,
959 de->media[media].csr14, de->media[media].csr15);
1da177e4
LT
960 }
961 if (macmode != dr32(MacMode))
962 dw32(MacMode, macmode);
963}
964
965static void de_next_media (struct de_private *de, u32 *media,
966 unsigned int n_media)
967{
968 unsigned int i;
969
970 for (i = 0; i < n_media; i++) {
971 if (de_ok_to_advertise(de, media[i])) {
972 de->media_type = media[i];
973 return;
974 }
975 }
976}
977
978static void de21040_media_timer (unsigned long data)
979{
980 struct de_private *de = (struct de_private *) data;
981 struct net_device *dev = de->dev;
982 u32 status = dr32(SIAStatus);
983 unsigned int carrier;
984 unsigned long flags;
f3b197ac 985
1da177e4 986 carrier = (status & NetCxnErr) ? 0 : 1;
f3b197ac 987
1da177e4
LT
988 if (carrier) {
989 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
990 goto no_link_yet;
991
992 de->media_timer.expires = jiffies + DE_TIMER_LINK;
993 add_timer(&de->media_timer);
994 if (!netif_carrier_ok(dev))
995 de_link_up(de);
996 else
997 if (netif_msg_timer(de))
f639dc7d
JP
998 dev_info(&dev->dev, "%s link ok, status %x\n",
999 media_name[de->media_type], status);
1da177e4
LT
1000 return;
1001 }
1002
f3b197ac 1003 de_link_down(de);
1da177e4
LT
1004
1005 if (de->media_lock)
1006 return;
1007
1008 if (de->media_type == DE_MEDIA_AUI) {
1009 u32 next_state = DE_MEDIA_TP;
1010 de_next_media(de, &next_state, 1);
1011 } else {
1012 u32 next_state = DE_MEDIA_AUI;
1013 de_next_media(de, &next_state, 1);
1014 }
1015
1016 spin_lock_irqsave(&de->lock, flags);
1017 de_stop_rxtx(de);
1018 spin_unlock_irqrestore(&de->lock, flags);
1019 de_set_media(de);
1020 de_start_rxtx(de);
1021
1022no_link_yet:
1023 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1024 add_timer(&de->media_timer);
1025
1026 if (netif_msg_timer(de))
f639dc7d
JP
1027 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1028 media_name[de->media_type], status);
1da177e4
LT
1029}
1030
1031static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1032{
1033 switch (new_media) {
1034 case DE_MEDIA_TP_AUTO:
1035 if (!(de->media_advertise & ADVERTISED_Autoneg))
1036 return 0;
1037 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1038 return 0;
1039 break;
1040 case DE_MEDIA_BNC:
1041 if (!(de->media_advertise & ADVERTISED_BNC))
1042 return 0;
1043 break;
1044 case DE_MEDIA_AUI:
1045 if (!(de->media_advertise & ADVERTISED_AUI))
1046 return 0;
1047 break;
1048 case DE_MEDIA_TP:
1049 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1050 return 0;
1051 break;
1052 case DE_MEDIA_TP_FD:
1053 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1054 return 0;
1055 break;
1056 }
f3b197ac 1057
1da177e4
LT
1058 return 1;
1059}
1060
1061static void de21041_media_timer (unsigned long data)
1062{
1063 struct de_private *de = (struct de_private *) data;
1064 struct net_device *dev = de->dev;
1065 u32 status = dr32(SIAStatus);
1066 unsigned int carrier;
1067 unsigned long flags;
f3b197ac 1068
1da177e4 1069 carrier = (status & NetCxnErr) ? 0 : 1;
f3b197ac 1070
1da177e4
LT
1071 if (carrier) {
1072 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1073 de->media_type == DE_MEDIA_TP ||
1074 de->media_type == DE_MEDIA_TP_FD) &&
1075 (status & LinkFailStatus))
1076 goto no_link_yet;
1077
1078 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1079 add_timer(&de->media_timer);
1080 if (!netif_carrier_ok(dev))
1081 de_link_up(de);
1082 else
1083 if (netif_msg_timer(de))
f639dc7d
JP
1084 dev_info(&dev->dev,
1085 "%s link ok, mode %x status %x\n",
1086 media_name[de->media_type],
1087 dr32(MacMode), status);
1da177e4
LT
1088 return;
1089 }
1090
f3b197ac 1091 de_link_down(de);
1da177e4
LT
1092
1093 /* if media type locked, don't switch media */
1094 if (de->media_lock)
1095 goto set_media;
1096
1097 /* if activity detected, use that as hint for new media type */
1098 if (status & NonselPortActive) {
1099 unsigned int have_media = 1;
1100
1101 /* if AUI/BNC selected, then activity is on TP port */
1102 if (de->media_type == DE_MEDIA_AUI ||
1103 de->media_type == DE_MEDIA_BNC) {
1104 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1105 de->media_type = DE_MEDIA_TP_AUTO;
1106 else
1107 have_media = 0;
1108 }
1109
1110 /* TP selected. If there is only TP and BNC, then it's BNC */
1111 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1112 de_ok_to_advertise(de, DE_MEDIA_BNC))
1113 de->media_type = DE_MEDIA_BNC;
1114
1115 /* TP selected. If there is only TP and AUI, then it's AUI */
1116 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1117 de_ok_to_advertise(de, DE_MEDIA_AUI))
1118 de->media_type = DE_MEDIA_AUI;
1119
1120 /* otherwise, ignore the hint */
1121 else
1122 have_media = 0;
1123
1124 if (have_media)
1125 goto set_media;
1126 }
1127
1128 /*
1129 * Absent or ambiguous activity hint, move to next advertised
1130 * media state. If de->media_type is left unchanged, this
1131 * simply resets the PHY and reloads the current media settings.
1132 */
1133 if (de->media_type == DE_MEDIA_AUI) {
1134 u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1135 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1136 } else if (de->media_type == DE_MEDIA_BNC) {
1137 u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
1138 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1139 } else {
1140 u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1141 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1142 }
f3b197ac 1143
1da177e4
LT
1144set_media:
1145 spin_lock_irqsave(&de->lock, flags);
1146 de_stop_rxtx(de);
1147 spin_unlock_irqrestore(&de->lock, flags);
1148 de_set_media(de);
1149 de_start_rxtx(de);
1150
1151no_link_yet:
1152 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1153 add_timer(&de->media_timer);
1154
1155 if (netif_msg_timer(de))
f639dc7d
JP
1156 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1157 media_name[de->media_type], status);
1da177e4
LT
1158}
1159
1160static void de_media_interrupt (struct de_private *de, u32 status)
1161{
1162 if (status & LinkPass) {
1163 de_link_up(de);
1164 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1165 return;
1166 }
f3b197ac 1167
7e0b58f3 1168 BUG_ON(!(status & LinkFail));
1da177e4
LT
1169
1170 if (netif_carrier_ok(de->dev)) {
1171 de_link_down(de);
1172 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1173 }
1174}
1175
1176static int de_reset_mac (struct de_private *de)
1177{
1178 u32 status, tmp;
1179
1180 /*
1181 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1182 * in this area.
1183 */
1184
1185 if (dr32(BusMode) == 0xffffffff)
1186 return -EBUSY;
1187
1188 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1189 dw32 (BusMode, CmdReset);
1190 mdelay (1);
1191
1192 dw32 (BusMode, de_bus_mode);
1193 mdelay (1);
1194
1195 for (tmp = 0; tmp < 5; tmp++) {
1196 dr32 (BusMode);
1197 mdelay (1);
1198 }
1199
1200 mdelay (1);
1201
1202 status = dr32(MacStatus);
1203 if (status & (RxState | TxState))
1204 return -EBUSY;
1205 if (status == 0xffffffff)
1206 return -ENODEV;
1207 return 0;
1208}
1209
1210static void de_adapter_wake (struct de_private *de)
1211{
1212 u32 pmctl;
1213
1214 if (de->de21040)
1215 return;
1216
1217 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1218 if (pmctl & PM_Mask) {
1219 pmctl &= ~PM_Mask;
1220 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1221
1222 /* de4x5.c delays, so we do too */
1223 msleep(10);
1224 }
1225}
1226
1227static void de_adapter_sleep (struct de_private *de)
1228{
1229 u32 pmctl;
1230
1231 if (de->de21040)
1232 return;
1233
1234 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1235 pmctl |= PM_Sleep;
1236 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1237}
1238
1239static int de_init_hw (struct de_private *de)
1240{
1241 struct net_device *dev = de->dev;
1242 u32 macmode;
1243 int rc;
1244
1245 de_adapter_wake(de);
f3b197ac 1246
1da177e4
LT
1247 macmode = dr32(MacMode) & ~MacModeClear;
1248
1249 rc = de_reset_mac(de);
1250 if (rc)
1251 return rc;
1252
1253 de_set_media(de); /* reset phy */
1254
1255 dw32(RxRingAddr, de->ring_dma);
1256 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1257
1258 dw32(MacMode, RxTx | macmode);
1259
1260 dr32(RxMissed); /* self-clearing */
1261
1262 dw32(IntrMask, de_intr_mask);
1263
1264 de_set_rx_mode(dev);
1265
1266 return 0;
1267}
1268
1269static int de_refill_rx (struct de_private *de)
1270{
1271 unsigned i;
1272
1273 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1274 struct sk_buff *skb;
1275
1276 skb = dev_alloc_skb(de->rx_buf_sz);
1277 if (!skb)
1278 goto err_out;
1279
1280 skb->dev = de->dev;
1281
1282 de->rx_skb[i].mapping = pci_map_single(de->pdev,
689be439 1283 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
1284 de->rx_skb[i].skb = skb;
1285
1286 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1287 if (i == (DE_RX_RING_SIZE - 1))
1288 de->rx_ring[i].opts2 =
1289 cpu_to_le32(RingEnd | de->rx_buf_sz);
1290 else
1291 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1292 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1293 de->rx_ring[i].addr2 = 0;
1294 }
1295
1296 return 0;
1297
1298err_out:
1299 de_clean_rings(de);
1300 return -ENOMEM;
1301}
1302
1303static int de_init_rings (struct de_private *de)
1304{
1305 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1306 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1307
1308 de->rx_tail = 0;
1309 de->tx_head = de->tx_tail = 0;
1310
1311 return de_refill_rx (de);
1312}
1313
1314static int de_alloc_rings (struct de_private *de)
1315{
1316 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1317 if (!de->rx_ring)
1318 return -ENOMEM;
1319 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1320 return de_init_rings(de);
1321}
1322
1323static void de_clean_rings (struct de_private *de)
1324{
1325 unsigned i;
1326
1327 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1328 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1329 wmb();
1330 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1331 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1332 wmb();
1333
1334 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1335 if (de->rx_skb[i].skb) {
1336 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1337 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1338 dev_kfree_skb(de->rx_skb[i].skb);
1339 }
1340 }
1341
1342 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1343 struct sk_buff *skb = de->tx_skb[i].skb;
1344 if ((skb) && (skb != DE_DUMMY_SKB)) {
1345 if (skb != DE_SETUP_SKB) {
1da177e4
LT
1346 de->net_stats.tx_dropped++;
1347 pci_unmap_single(de->pdev,
1348 de->tx_skb[i].mapping,
1349 skb->len, PCI_DMA_TODEVICE);
5185c7c2 1350 dev_kfree_skb(skb);
1da177e4
LT
1351 } else {
1352 pci_unmap_single(de->pdev,
1353 de->tx_skb[i].mapping,
1354 sizeof(de->setup_frame),
1355 PCI_DMA_TODEVICE);
1356 }
1357 }
1358 }
1359
1360 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1361 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1362}
1363
1364static void de_free_rings (struct de_private *de)
1365{
1366 de_clean_rings(de);
1367 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1368 de->rx_ring = NULL;
1369 de->tx_ring = NULL;
1370}
1371
1372static int de_open (struct net_device *dev)
1373{
8f15ea42 1374 struct de_private *de = netdev_priv(dev);
1da177e4 1375 int rc;
1da177e4
LT
1376
1377 if (netif_msg_ifup(de))
1378 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1379
1380 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1381
1382 rc = de_alloc_rings(de);
1383 if (rc) {
f639dc7d 1384 dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
1da177e4
LT
1385 return rc;
1386 }
1387
3f735b76 1388 dw32(IntrMask, 0);
1da177e4 1389
1fb9df5d 1390 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1da177e4 1391 if (rc) {
f639dc7d
JP
1392 dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
1393 dev->irq, rc);
3f735b76
FR
1394 goto err_out_free;
1395 }
1396
1397 rc = de_init_hw(de);
1398 if (rc) {
f639dc7d 1399 dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
3f735b76 1400 goto err_out_free_irq;
1da177e4
LT
1401 }
1402
1403 netif_start_queue(dev);
1404 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1405
1406 return 0;
1407
3f735b76
FR
1408err_out_free_irq:
1409 free_irq(dev->irq, dev);
1da177e4
LT
1410err_out_free:
1411 de_free_rings(de);
1412 return rc;
1413}
1414
1415static int de_close (struct net_device *dev)
1416{
8f15ea42 1417 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1418 unsigned long flags;
1419
1420 if (netif_msg_ifdown(de))
1421 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1422
1423 del_timer_sync(&de->media_timer);
1424
1425 spin_lock_irqsave(&de->lock, flags);
1426 de_stop_hw(de);
1427 netif_stop_queue(dev);
1428 netif_carrier_off(dev);
1429 spin_unlock_irqrestore(&de->lock, flags);
f3b197ac 1430
1da177e4
LT
1431 free_irq(dev->irq, dev);
1432
1433 de_free_rings(de);
1434 de_adapter_sleep(de);
1da177e4
LT
1435 return 0;
1436}
1437
1438static void de_tx_timeout (struct net_device *dev)
1439{
8f15ea42 1440 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1441
1442 printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1443 dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1444 de->rx_tail, de->tx_head, de->tx_tail);
1445
1446 del_timer_sync(&de->media_timer);
1447
1448 disable_irq(dev->irq);
1449 spin_lock_irq(&de->lock);
1450
1451 de_stop_hw(de);
1452 netif_stop_queue(dev);
1453 netif_carrier_off(dev);
1454
1455 spin_unlock_irq(&de->lock);
1456 enable_irq(dev->irq);
f3b197ac 1457
1da177e4
LT
1458 /* Update the error counts. */
1459 __de_get_stats(de);
1460
1461 synchronize_irq(dev->irq);
1462 de_clean_rings(de);
1463
39bf4295
FR
1464 de_init_rings(de);
1465
1da177e4 1466 de_init_hw(de);
f3b197ac 1467
1da177e4
LT
1468 netif_wake_queue(dev);
1469}
1470
1471static void __de_get_regs(struct de_private *de, u8 *buf)
1472{
1473 int i;
1474 u32 *rbuf = (u32 *)buf;
f3b197ac 1475
1da177e4
LT
1476 /* read all CSRs */
1477 for (i = 0; i < DE_NUM_REGS; i++)
1478 rbuf[i] = dr32(i * 8);
1479
1480 /* handle self-clearing RxMissed counter, CSR8 */
1481 de_rx_missed(de, rbuf[8]);
1482}
1483
1484static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1485{
1486 ecmd->supported = de->media_supported;
1487 ecmd->transceiver = XCVR_INTERNAL;
1488 ecmd->phy_address = 0;
1489 ecmd->advertising = de->media_advertise;
f3b197ac 1490
1da177e4
LT
1491 switch (de->media_type) {
1492 case DE_MEDIA_AUI:
1493 ecmd->port = PORT_AUI;
1494 ecmd->speed = 5;
1495 break;
1496 case DE_MEDIA_BNC:
1497 ecmd->port = PORT_BNC;
1498 ecmd->speed = 2;
1499 break;
1500 default:
1501 ecmd->port = PORT_TP;
1502 ecmd->speed = SPEED_10;
1503 break;
1504 }
f3b197ac 1505
1da177e4
LT
1506 if (dr32(MacMode) & FullDuplex)
1507 ecmd->duplex = DUPLEX_FULL;
1508 else
1509 ecmd->duplex = DUPLEX_HALF;
1510
1511 if (de->media_lock)
1512 ecmd->autoneg = AUTONEG_DISABLE;
1513 else
1514 ecmd->autoneg = AUTONEG_ENABLE;
1515
1516 /* ignore maxtxpkt, maxrxpkt for now */
1517
1518 return 0;
1519}
1520
1521static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1522{
1523 u32 new_media;
1524 unsigned int media_lock;
1525
1526 if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
1527 return -EINVAL;
1528 if (de->de21040 && ecmd->speed == 2)
1529 return -EINVAL;
1530 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1531 return -EINVAL;
1532 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1533 return -EINVAL;
1534 if (de->de21040 && ecmd->port == PORT_BNC)
1535 return -EINVAL;
1536 if (ecmd->transceiver != XCVR_INTERNAL)
1537 return -EINVAL;
1538 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1539 return -EINVAL;
1540 if (ecmd->advertising & ~de->media_supported)
1541 return -EINVAL;
1542 if (ecmd->autoneg == AUTONEG_ENABLE &&
1543 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1544 return -EINVAL;
f3b197ac 1545
1da177e4
LT
1546 switch (ecmd->port) {
1547 case PORT_AUI:
1548 new_media = DE_MEDIA_AUI;
1549 if (!(ecmd->advertising & ADVERTISED_AUI))
1550 return -EINVAL;
1551 break;
1552 case PORT_BNC:
1553 new_media = DE_MEDIA_BNC;
1554 if (!(ecmd->advertising & ADVERTISED_BNC))
1555 return -EINVAL;
1556 break;
1557 default:
1558 if (ecmd->autoneg == AUTONEG_ENABLE)
1559 new_media = DE_MEDIA_TP_AUTO;
1560 else if (ecmd->duplex == DUPLEX_FULL)
1561 new_media = DE_MEDIA_TP_FD;
1562 else
1563 new_media = DE_MEDIA_TP;
1564 if (!(ecmd->advertising & ADVERTISED_TP))
1565 return -EINVAL;
1566 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1567 return -EINVAL;
1568 break;
1569 }
f3b197ac 1570
1da177e4 1571 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
f3b197ac 1572
1da177e4
LT
1573 if ((new_media == de->media_type) &&
1574 (media_lock == de->media_lock) &&
1575 (ecmd->advertising == de->media_advertise))
1576 return 0; /* nothing to change */
f3b197ac 1577
1da177e4
LT
1578 de_link_down(de);
1579 de_stop_rxtx(de);
f3b197ac 1580
1da177e4
LT
1581 de->media_type = new_media;
1582 de->media_lock = media_lock;
1583 de->media_advertise = ecmd->advertising;
1584 de_set_media(de);
f3b197ac 1585
1da177e4
LT
1586 return 0;
1587}
1588
1589static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1590{
8f15ea42 1591 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1592
1593 strcpy (info->driver, DRV_NAME);
1594 strcpy (info->version, DRV_VERSION);
1595 strcpy (info->bus_info, pci_name(de->pdev));
1596 info->eedump_len = DE_EEPROM_SIZE;
1597}
1598
1599static int de_get_regs_len(struct net_device *dev)
1600{
1601 return DE_REGS_SIZE;
1602}
1603
1604static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1605{
8f15ea42 1606 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1607 int rc;
1608
1609 spin_lock_irq(&de->lock);
1610 rc = __de_get_settings(de, ecmd);
1611 spin_unlock_irq(&de->lock);
1612
1613 return rc;
1614}
1615
1616static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1617{
8f15ea42 1618 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1619 int rc;
1620
1621 spin_lock_irq(&de->lock);
1622 rc = __de_set_settings(de, ecmd);
1623 spin_unlock_irq(&de->lock);
1624
1625 return rc;
1626}
1627
1628static u32 de_get_msglevel(struct net_device *dev)
1629{
8f15ea42 1630 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1631
1632 return de->msg_enable;
1633}
1634
1635static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1636{
8f15ea42 1637 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1638
1639 de->msg_enable = msglvl;
1640}
1641
1642static int de_get_eeprom(struct net_device *dev,
1643 struct ethtool_eeprom *eeprom, u8 *data)
1644{
8f15ea42 1645 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1646
1647 if (!de->ee_data)
1648 return -EOPNOTSUPP;
1649 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1650 (eeprom->len != DE_EEPROM_SIZE))
1651 return -EINVAL;
1652 memcpy(data, de->ee_data, eeprom->len);
1653
1654 return 0;
1655}
1656
1657static int de_nway_reset(struct net_device *dev)
1658{
8f15ea42 1659 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1660 u32 status;
1661
1662 if (de->media_type != DE_MEDIA_TP_AUTO)
1663 return -EINVAL;
1664 if (netif_carrier_ok(de->dev))
1665 de_link_down(de);
1666
1667 status = dr32(SIAStatus);
1668 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1669 if (netif_msg_link(de))
f639dc7d
JP
1670 dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
1671 status, dr32(SIAStatus));
1da177e4
LT
1672 return 0;
1673}
1674
1675static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1676 void *data)
1677{
8f15ea42 1678 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1679
1680 regs->version = (DE_REGS_VER << 2) | de->de21040;
1681
1682 spin_lock_irq(&de->lock);
1683 __de_get_regs(de, data);
1684 spin_unlock_irq(&de->lock);
1685}
1686
7282d491 1687static const struct ethtool_ops de_ethtool_ops = {
1da177e4 1688 .get_link = ethtool_op_get_link,
1da177e4
LT
1689 .get_drvinfo = de_get_drvinfo,
1690 .get_regs_len = de_get_regs_len,
1691 .get_settings = de_get_settings,
1692 .set_settings = de_set_settings,
1693 .get_msglevel = de_get_msglevel,
1694 .set_msglevel = de_set_msglevel,
1695 .get_eeprom = de_get_eeprom,
1696 .nway_reset = de_nway_reset,
1697 .get_regs = de_get_regs,
1698};
1699
4c44fd00 1700static void __devinit de21040_get_mac_address (struct de_private *de)
1da177e4
LT
1701{
1702 unsigned i;
1703
1704 dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
bc0da3fc 1705 udelay(5);
1da177e4
LT
1706
1707 for (i = 0; i < 6; i++) {
1708 int value, boguscnt = 100000;
ec1d1ebb 1709 do {
1da177e4 1710 value = dr32(ROMCmd);
84cc1535 1711 rmb();
ec1d1ebb 1712 } while (value < 0 && --boguscnt > 0);
1da177e4
LT
1713 de->dev->dev_addr[i] = value;
1714 udelay(1);
1715 if (boguscnt <= 0)
f639dc7d 1716 pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
1da177e4
LT
1717 }
1718}
1719
4c44fd00 1720static void __devinit de21040_get_media_info(struct de_private *de)
1da177e4
LT
1721{
1722 unsigned int i;
1723
1724 de->media_type = DE_MEDIA_TP;
1725 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1726 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1727 de->media_advertise = de->media_supported;
1728
1729 for (i = 0; i < DE_MAX_MEDIA; i++) {
1730 switch (i) {
1731 case DE_MEDIA_AUI:
1732 case DE_MEDIA_TP:
1733 case DE_MEDIA_TP_FD:
1734 de->media[i].type = i;
1735 de->media[i].csr13 = t21040_csr13[i];
1736 de->media[i].csr14 = t21040_csr14[i];
1737 de->media[i].csr15 = t21040_csr15[i];
1738 break;
1739 default:
1740 de->media[i].type = DE_MEDIA_INVALID;
1741 break;
1742 }
1743 }
1744}
1745
1746/* Note: this routine returns extra data bits for size detection. */
4a1d2d81 1747static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1da177e4
LT
1748{
1749 int i;
1750 unsigned retval = 0;
1751 void __iomem *ee_addr = regs + ROMCmd;
1752 int read_cmd = location | (EE_READ_CMD << addr_len);
1753
1754 writel(EE_ENB & ~EE_CS, ee_addr);
1755 writel(EE_ENB, ee_addr);
1756
1757 /* Shift the read command bits out. */
1758 for (i = 4 + addr_len; i >= 0; i--) {
1759 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1760 writel(EE_ENB | dataval, ee_addr);
1761 readl(ee_addr);
1762 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1763 readl(ee_addr);
1764 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1765 }
1766 writel(EE_ENB, ee_addr);
1767 readl(ee_addr);
1768
1769 for (i = 16; i > 0; i--) {
1770 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1771 readl(ee_addr);
1772 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1773 writel(EE_ENB, ee_addr);
1774 readl(ee_addr);
1775 }
1776
1777 /* Terminate the EEPROM access. */
1778 writel(EE_ENB & ~EE_CS, ee_addr);
1779 return retval;
1780}
1781
4c44fd00 1782static void __devinit de21041_get_srom_info (struct de_private *de)
1da177e4
LT
1783{
1784 unsigned i, sa_offset = 0, ofs;
1785 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1786 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1787 struct de_srom_info_leaf *il;
1788 void *bufp;
1789
1790 /* download entire eeprom */
1791 for (i = 0; i < DE_EEPROM_WORDS; i++)
c559a5bc
AV
1792 ((__le16 *)ee_data)[i] =
1793 cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1da177e4
LT
1794
1795 /* DEC now has a specification but early board makers
1796 just put the address in the first EEPROM locations. */
1797 /* This does memcmp(eedata, eedata+16, 8) */
bc053d45
RB
1798
1799#ifndef CONFIG_MIPS_COBALT
1800
1da177e4
LT
1801 for (i = 0; i < 8; i ++)
1802 if (ee_data[i] != ee_data[16+i])
1803 sa_offset = 20;
1804
bc053d45
RB
1805#endif
1806
1da177e4
LT
1807 /* store MAC address */
1808 for (i = 0; i < 6; i ++)
1809 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1810
1811 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1812 ofs = ee_data[SROMC0InfoLeaf];
1813 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1814 goto bad_srom;
1815
1816 /* get pointer to info leaf */
1817 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1818
1819 /* paranoia checks */
1820 if (il->n_blocks == 0)
1821 goto bad_srom;
1822 if ((sizeof(ee_data) - ofs) <
1823 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1824 goto bad_srom;
1825
1826 /* get default media type */
445854f4 1827 switch (get_unaligned(&il->default_media)) {
1da177e4
LT
1828 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1829 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1830 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1831 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1832 }
f3b197ac 1833
1da177e4 1834 if (netif_msg_probe(de))
f639dc7d
JP
1835 pr_info("de%d: SROM leaf offset %u, default media %s\n",
1836 de->board_idx, ofs, media_name[de->media_type]);
1da177e4
LT
1837
1838 /* init SIA register values to defaults */
1839 for (i = 0; i < DE_MAX_MEDIA; i++) {
1840 de->media[i].type = DE_MEDIA_INVALID;
1841 de->media[i].csr13 = 0xffff;
1842 de->media[i].csr14 = 0xffff;
1843 de->media[i].csr15 = 0xffff;
1844 }
1845
1846 /* parse media blocks to see what medias are supported,
1847 * and if any custom CSR values are provided
1848 */
1849 bufp = ((void *)il) + sizeof(*il);
1850 for (i = 0; i < il->n_blocks; i++) {
1851 struct de_srom_media_block *ib = bufp;
1852 unsigned idx;
1853
1854 /* index based on media type in media block */
1855 switch(ib->opts & MediaBlockMask) {
1856 case 0: /* 10baseT */
1857 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1858 | SUPPORTED_Autoneg;
1859 idx = DE_MEDIA_TP;
1860 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1861 break;
1862 case 1: /* BNC */
1863 de->media_supported |= SUPPORTED_BNC;
1864 idx = DE_MEDIA_BNC;
1865 break;
1866 case 2: /* AUI */
1867 de->media_supported |= SUPPORTED_AUI;
1868 idx = DE_MEDIA_AUI;
1869 break;
1870 case 4: /* 10baseT-FD */
1871 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1872 | SUPPORTED_Autoneg;
1873 idx = DE_MEDIA_TP_FD;
1874 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1875 break;
1876 default:
1877 goto bad_srom;
1878 }
1879
1880 de->media[idx].type = idx;
1881
1882 if (netif_msg_probe(de))
f639dc7d
JP
1883 pr_info("de%d: media block #%u: %s",
1884 de->board_idx, i,
1885 media_name[de->media[idx].type]);
1da177e4
LT
1886
1887 bufp += sizeof (ib->opts);
1888
1889 if (ib->opts & MediaCustomCSRs) {
445854f4
HH
1890 de->media[idx].csr13 = get_unaligned(&ib->csr13);
1891 de->media[idx].csr14 = get_unaligned(&ib->csr14);
1892 de->media[idx].csr15 = get_unaligned(&ib->csr15);
1da177e4
LT
1893 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1894 sizeof(ib->csr15);
1895
1896 if (netif_msg_probe(de))
f639dc7d
JP
1897 pr_cont(" (%x,%x,%x)\n",
1898 de->media[idx].csr13,
1899 de->media[idx].csr14,
1900 de->media[idx].csr15);
f3b197ac 1901
1da177e4 1902 } else if (netif_msg_probe(de))
f639dc7d 1903 pr_cont("\n");
1da177e4
LT
1904
1905 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1906 break;
1907 }
1908
1909 de->media_advertise = de->media_supported;
1910
1911fill_defaults:
1912 /* fill in defaults, for cases where custom CSRs not used */
1913 for (i = 0; i < DE_MAX_MEDIA; i++) {
1914 if (de->media[i].csr13 == 0xffff)
1915 de->media[i].csr13 = t21041_csr13[i];
e0f9c4f3
OZ
1916 if (de->media[i].csr14 == 0xffff) {
1917 /* autonegotiation is broken at least on some chip
1918 revisions - rev. 0x21 works, 0x11 does not */
1919 if (de->pdev->revision < 0x20)
1920 de->media[i].csr14 = t21041_csr14_brk[i];
1921 else
1922 de->media[i].csr14 = t21041_csr14[i];
1923 }
1da177e4
LT
1924 if (de->media[i].csr15 == 0xffff)
1925 de->media[i].csr15 = t21041_csr15[i];
1926 }
1927
c3a9392e 1928 de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1da177e4
LT
1929
1930 return;
1931
1932bad_srom:
1933 /* for error cases, it's ok to assume we support all these */
1934 for (i = 0; i < DE_MAX_MEDIA; i++)
1935 de->media[i].type = i;
1936 de->media_supported =
1937 SUPPORTED_10baseT_Half |
1938 SUPPORTED_10baseT_Full |
1939 SUPPORTED_Autoneg |
1940 SUPPORTED_TP |
1941 SUPPORTED_AUI |
1942 SUPPORTED_BNC;
1943 goto fill_defaults;
1944}
1945
90d8743d
SH
1946static const struct net_device_ops de_netdev_ops = {
1947 .ndo_open = de_open,
1948 .ndo_stop = de_close,
1949 .ndo_set_multicast_list = de_set_rx_mode,
1950 .ndo_start_xmit = de_start_xmit,
1951 .ndo_get_stats = de_get_stats,
1952 .ndo_tx_timeout = de_tx_timeout,
1953 .ndo_change_mtu = eth_change_mtu,
1954 .ndo_set_mac_address = eth_mac_addr,
1955 .ndo_validate_addr = eth_validate_addr,
1956};
1957
4a1d2d81 1958static int __devinit de_init_one (struct pci_dev *pdev,
1da177e4
LT
1959 const struct pci_device_id *ent)
1960{
1961 struct net_device *dev;
1962 struct de_private *de;
1963 int rc;
1964 void __iomem *regs;
afc7097f 1965 unsigned long pciaddr;
1da177e4
LT
1966 static int board_idx = -1;
1967
1968 board_idx++;
1969
1970#ifndef MODULE
1971 if (board_idx == 0)
1972 printk("%s", version);
1973#endif
1974
1975 /* allocate a new ethernet device structure, and fill in defaults */
1976 dev = alloc_etherdev(sizeof(struct de_private));
1977 if (!dev)
1978 return -ENOMEM;
1979
90d8743d 1980 dev->netdev_ops = &de_netdev_ops;
1da177e4 1981 SET_NETDEV_DEV(dev, &pdev->dev);
1da177e4 1982 dev->ethtool_ops = &de_ethtool_ops;
1da177e4
LT
1983 dev->watchdog_timeo = TX_TIMEOUT;
1984
8f15ea42 1985 de = netdev_priv(dev);
1da177e4
LT
1986 de->de21040 = ent->driver_data == 0 ? 1 : 0;
1987 de->pdev = pdev;
1988 de->dev = dev;
1989 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
1990 de->board_idx = board_idx;
1991 spin_lock_init (&de->lock);
1992 init_timer(&de->media_timer);
1993 if (de->de21040)
1994 de->media_timer.function = de21040_media_timer;
1995 else
1996 de->media_timer.function = de21041_media_timer;
1997 de->media_timer.data = (unsigned long) de;
1998
1999 netif_carrier_off(dev);
2000 netif_stop_queue(dev);
2001
2002 /* wake up device, assign resources */
2003 rc = pci_enable_device(pdev);
2004 if (rc)
2005 goto err_out_free;
2006
2007 /* reserve PCI resources to ensure driver atomicity */
2008 rc = pci_request_regions(pdev, DRV_NAME);
2009 if (rc)
2010 goto err_out_disable;
2011
2012 /* check for invalid IRQ value */
2013 if (pdev->irq < 2) {
2014 rc = -EIO;
f639dc7d 2015 pr_err(PFX "invalid irq (%d) for pci dev %s\n",
1da177e4
LT
2016 pdev->irq, pci_name(pdev));
2017 goto err_out_res;
2018 }
2019
2020 dev->irq = pdev->irq;
2021
2022 /* obtain and check validity of PCI I/O address */
2023 pciaddr = pci_resource_start(pdev, 1);
2024 if (!pciaddr) {
2025 rc = -EIO;
f639dc7d 2026 pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
1da177e4
LT
2027 goto err_out_res;
2028 }
2029 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2030 rc = -EIO;
f639dc7d
JP
2031 pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
2032 (unsigned long long)pci_resource_len(pdev, 1),
2033 pci_name(pdev));
1da177e4
LT
2034 goto err_out_res;
2035 }
2036
2037 /* remap CSR registers */
2038 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2039 if (!regs) {
2040 rc = -EIO;
f639dc7d
JP
2041 pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2042 (unsigned long long)pci_resource_len(pdev, 1),
2043 pciaddr, pci_name(pdev));
1da177e4
LT
2044 goto err_out_res;
2045 }
2046 dev->base_addr = (unsigned long) regs;
2047 de->regs = regs;
2048
2049 de_adapter_wake(de);
2050
2051 /* make sure hardware is not running */
2052 rc = de_reset_mac(de);
2053 if (rc) {
f639dc7d 2054 pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
1da177e4
LT
2055 goto err_out_iomap;
2056 }
2057
2058 /* get MAC address, initialize default media type and
2059 * get list of supported media
2060 */
2061 if (de->de21040) {
2062 de21040_get_mac_address(de);
2063 de21040_get_media_info(de);
2064 } else {
2065 de21041_get_srom_info(de);
2066 }
2067
2068 /* register new network interface with kernel */
2069 rc = register_netdev(dev);
2070 if (rc)
2071 goto err_out_iomap;
2072
2073 /* print info about board and interface just registered */
f639dc7d
JP
2074 dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
2075 de->de21040 ? "21040" : "21041",
2076 dev->base_addr,
2077 dev->dev_addr,
2078 dev->irq);
1da177e4
LT
2079
2080 pci_set_drvdata(pdev, dev);
2081
2082 /* enable busmastering */
2083 pci_set_master(pdev);
2084
2085 /* put adapter to sleep */
2086 de_adapter_sleep(de);
2087
2088 return 0;
2089
2090err_out_iomap:
b4558ea9 2091 kfree(de->ee_data);
1da177e4
LT
2092 iounmap(regs);
2093err_out_res:
2094 pci_release_regions(pdev);
2095err_out_disable:
2096 pci_disable_device(pdev);
2097err_out_free:
2098 free_netdev(dev);
2099 return rc;
2100}
2101
4a1d2d81 2102static void __devexit de_remove_one (struct pci_dev *pdev)
1da177e4
LT
2103{
2104 struct net_device *dev = pci_get_drvdata(pdev);
8f15ea42 2105 struct de_private *de = netdev_priv(dev);
1da177e4 2106
7e0b58f3 2107 BUG_ON(!dev);
1da177e4 2108 unregister_netdev(dev);
b4558ea9 2109 kfree(de->ee_data);
1da177e4
LT
2110 iounmap(de->regs);
2111 pci_release_regions(pdev);
2112 pci_disable_device(pdev);
2113 pci_set_drvdata(pdev, NULL);
2114 free_netdev(dev);
2115}
2116
2117#ifdef CONFIG_PM
2118
05adc3b7 2119static int de_suspend (struct pci_dev *pdev, pm_message_t state)
1da177e4
LT
2120{
2121 struct net_device *dev = pci_get_drvdata (pdev);
8f15ea42 2122 struct de_private *de = netdev_priv(dev);
1da177e4
LT
2123
2124 rtnl_lock();
2125 if (netif_running (dev)) {
2126 del_timer_sync(&de->media_timer);
2127
2128 disable_irq(dev->irq);
2129 spin_lock_irq(&de->lock);
2130
2131 de_stop_hw(de);
2132 netif_stop_queue(dev);
2133 netif_device_detach(dev);
2134 netif_carrier_off(dev);
2135
2136 spin_unlock_irq(&de->lock);
2137 enable_irq(dev->irq);
f3b197ac 2138
1da177e4
LT
2139 /* Update the error counts. */
2140 __de_get_stats(de);
2141
2142 synchronize_irq(dev->irq);
2143 de_clean_rings(de);
2144
2145 de_adapter_sleep(de);
2146 pci_disable_device(pdev);
2147 } else {
2148 netif_device_detach(dev);
2149 }
2150 rtnl_unlock();
2151 return 0;
2152}
2153
2154static int de_resume (struct pci_dev *pdev)
2155{
2156 struct net_device *dev = pci_get_drvdata (pdev);
8f15ea42 2157 struct de_private *de = netdev_priv(dev);
9f486ae1 2158 int retval = 0;
1da177e4
LT
2159
2160 rtnl_lock();
2161 if (netif_device_present(dev))
2162 goto out;
9f486ae1
VH
2163 if (!netif_running(dev))
2164 goto out_attach;
2165 if ((retval = pci_enable_device(pdev))) {
f639dc7d 2166 dev_err(&dev->dev, "pci_enable_device failed in resume\n");
9f486ae1 2167 goto out;
1da177e4 2168 }
9f486ae1
VH
2169 de_init_hw(de);
2170out_attach:
2171 netif_device_attach(dev);
1da177e4
LT
2172out:
2173 rtnl_unlock();
2174 return 0;
2175}
2176
2177#endif /* CONFIG_PM */
2178
2179static struct pci_driver de_driver = {
2180 .name = DRV_NAME,
2181 .id_table = de_pci_tbl,
2182 .probe = de_init_one,
4a1d2d81 2183 .remove = __devexit_p(de_remove_one),
1da177e4
LT
2184#ifdef CONFIG_PM
2185 .suspend = de_suspend,
2186 .resume = de_resume,
2187#endif
2188};
2189
2190static int __init de_init (void)
2191{
2192#ifdef MODULE
2193 printk("%s", version);
2194#endif
29917620 2195 return pci_register_driver(&de_driver);
1da177e4
LT
2196}
2197
2198static void __exit de_exit (void)
2199{
2200 pci_unregister_driver (&de_driver);
2201}
2202
2203module_init(de_init);
2204module_exit(de_exit);