]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tulip/de2104x.c
Merge branch 'vhost-net' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[net-next-2.6.git] / drivers / net / tulip / de2104x.c
CommitLineData
1da177e4
LT
1/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2/*
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
7
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
14
15 See the file COPYING in this distribution for more information.
16
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
26 Tx descriptor bit
27
28 */
29
30#define DRV_NAME "de2104x"
31#define DRV_VERSION "0.7"
32#define DRV_RELDATE "Mar 17, 2004"
33
1da177e4
LT
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/init.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/ethtool.h>
42#include <linux/compiler.h>
43#include <linux/rtnetlink.h>
44#include <linux/crc32.h>
5a0e3ad6 45#include <linux/slab.h>
1da177e4
LT
46
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/uaccess.h>
50#include <asm/unaligned.h>
51
52/* These identify the driver base version and may not be removed. */
53static char version[] =
54KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
55
56MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
57MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
58MODULE_LICENSE("GPL");
59MODULE_VERSION(DRV_VERSION);
60
61static int debug = -1;
62module_param (debug, int, 0);
63MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
64
65/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
8e95a202
JP
66#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
67 defined(CONFIG_SPARC) || defined(__ia64__) || \
68 defined(__sh__) || defined(__mips__)
1da177e4
LT
69static int rx_copybreak = 1518;
70#else
71static int rx_copybreak = 100;
72#endif
73module_param (rx_copybreak, int, 0);
74MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
75
76#define PFX DRV_NAME ": "
77
78#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
79 NETIF_MSG_PROBE | \
80 NETIF_MSG_LINK | \
81 NETIF_MSG_IFDOWN | \
82 NETIF_MSG_IFUP | \
83 NETIF_MSG_RX_ERR | \
84 NETIF_MSG_TX_ERR)
85
b77e5228
RS
86/* Descriptor skip length in 32 bit longwords. */
87#ifndef CONFIG_DE2104X_DSL
88#define DSL 0
89#else
90#define DSL CONFIG_DE2104X_DSL
91#endif
92
1da177e4
LT
93#define DE_RX_RING_SIZE 64
94#define DE_TX_RING_SIZE 64
95#define DE_RING_BYTES \
96 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
97 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
98#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
99#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
100#define TX_BUFFS_AVAIL(CP) \
101 (((CP)->tx_tail <= (CP)->tx_head) ? \
102 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
103 (CP)->tx_tail - (CP)->tx_head - 1)
104
105#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
106#define RX_OFFSET 2
107
108#define DE_SETUP_SKB ((struct sk_buff *) 1)
109#define DE_DUMMY_SKB ((struct sk_buff *) 2)
110#define DE_SETUP_FRAME_WORDS 96
111#define DE_EEPROM_WORDS 256
112#define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
113#define DE_MAX_MEDIA 5
114
115#define DE_MEDIA_TP_AUTO 0
116#define DE_MEDIA_BNC 1
117#define DE_MEDIA_AUI 2
118#define DE_MEDIA_TP 3
119#define DE_MEDIA_TP_FD 4
120#define DE_MEDIA_INVALID DE_MAX_MEDIA
121#define DE_MEDIA_FIRST 0
122#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
123#define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
124
125#define DE_TIMER_LINK (60 * HZ)
126#define DE_TIMER_NO_LINK (5 * HZ)
127
128#define DE_NUM_REGS 16
129#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
130#define DE_REGS_VER 1
131
132/* Time in jiffies before concluding the transmitter is hung. */
133#define TX_TIMEOUT (6*HZ)
134
1da177e4
LT
135/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
136 to support a pre-NWay full-duplex signaling mechanism using short frames.
137 No one knows what it should be, but if left at its default value some
138 10base2(!) packets trigger a full-duplex-request interrupt. */
139#define FULL_DUPLEX_MAGIC 0x6969
140
141enum {
142 /* NIC registers */
143 BusMode = 0x00,
144 TxPoll = 0x08,
145 RxPoll = 0x10,
146 RxRingAddr = 0x18,
147 TxRingAddr = 0x20,
148 MacStatus = 0x28,
149 MacMode = 0x30,
150 IntrMask = 0x38,
151 RxMissed = 0x40,
152 ROMCmd = 0x48,
153 CSR11 = 0x58,
154 SIAStatus = 0x60,
155 CSR13 = 0x68,
156 CSR14 = 0x70,
157 CSR15 = 0x78,
158 PCIPM = 0x40,
159
160 /* BusMode bits */
161 CmdReset = (1 << 0),
162 CacheAlign16 = 0x00008000,
163 BurstLen4 = 0x00000400,
b77e5228 164 DescSkipLen = (DSL << 2),
1da177e4
LT
165
166 /* Rx/TxPoll bits */
167 NormalTxPoll = (1 << 0),
168 NormalRxPoll = (1 << 0),
169
170 /* Tx/Rx descriptor status bits */
171 DescOwn = (1 << 31),
172 RxError = (1 << 15),
173 RxErrLong = (1 << 7),
174 RxErrCRC = (1 << 1),
175 RxErrFIFO = (1 << 0),
176 RxErrRunt = (1 << 11),
177 RxErrFrame = (1 << 14),
178 RingEnd = (1 << 25),
179 FirstFrag = (1 << 29),
180 LastFrag = (1 << 30),
181 TxError = (1 << 15),
182 TxFIFOUnder = (1 << 1),
183 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
184 TxMaxCol = (1 << 8),
185 TxOWC = (1 << 9),
186 TxJabber = (1 << 14),
187 SetupFrame = (1 << 27),
188 TxSwInt = (1 << 31),
189
190 /* MacStatus bits */
191 IntrOK = (1 << 16),
192 IntrErr = (1 << 15),
193 RxIntr = (1 << 6),
194 RxEmpty = (1 << 7),
195 TxIntr = (1 << 0),
196 TxEmpty = (1 << 2),
197 PciErr = (1 << 13),
198 TxState = (1 << 22) | (1 << 21) | (1 << 20),
199 RxState = (1 << 19) | (1 << 18) | (1 << 17),
200 LinkFail = (1 << 12),
201 LinkPass = (1 << 4),
202 RxStopped = (1 << 8),
203 TxStopped = (1 << 1),
204
205 /* MacMode bits */
206 TxEnable = (1 << 13),
207 RxEnable = (1 << 1),
208 RxTx = TxEnable | RxEnable,
209 FullDuplex = (1 << 9),
210 AcceptAllMulticast = (1 << 7),
211 AcceptAllPhys = (1 << 6),
212 BOCnt = (1 << 5),
213 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
214 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
215
216 /* ROMCmd bits */
217 EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
218 EE_CS = 0x01, /* EEPROM chip select. */
219 EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
220 EE_WRITE_0 = 0x01,
221 EE_WRITE_1 = 0x05,
222 EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
223 EE_ENB = (0x4800 | EE_CS),
224
225 /* The EEPROM commands include the alway-set leading bit. */
226 EE_READ_CMD = 6,
227
228 /* RxMissed bits */
229 RxMissedOver = (1 << 16),
230 RxMissedMask = 0xffff,
231
232 /* SROM-related bits */
233 SROMC0InfoLeaf = 27,
234 MediaBlockMask = 0x3f,
235 MediaCustomCSRs = (1 << 6),
f3b197ac 236
1da177e4
LT
237 /* PCIPM bits */
238 PM_Sleep = (1 << 31),
239 PM_Snooze = (1 << 30),
240 PM_Mask = PM_Sleep | PM_Snooze,
f3b197ac 241
1da177e4
LT
242 /* SIAStatus bits */
243 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
244 NWayRestart = (1 << 12),
245 NonselPortActive = (1 << 9),
ca9a7835 246 SelPortActive = (1 << 8),
1da177e4
LT
247 LinkFailStatus = (1 << 2),
248 NetCxnErr = (1 << 1),
249};
250
251static const u32 de_intr_mask =
252 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
253 LinkPass | LinkFail | PciErr;
254
255/*
256 * Set the programmable burst length to 4 longwords for all:
257 * DMA errors result without these values. Cache align 16 long.
258 */
b77e5228 259static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
1da177e4
LT
260
261struct de_srom_media_block {
262 u8 opts;
263 u16 csr13;
264 u16 csr14;
265 u16 csr15;
ba2d3587 266} __packed;
1da177e4
LT
267
268struct de_srom_info_leaf {
269 u16 default_media;
270 u8 n_blocks;
271 u8 unused;
ba2d3587 272} __packed;
1da177e4
LT
273
274struct de_desc {
c559a5bc
AV
275 __le32 opts1;
276 __le32 opts2;
277 __le32 addr1;
278 __le32 addr2;
b77e5228
RS
279#if DSL
280 __le32 skip[DSL];
281#endif
1da177e4
LT
282};
283
284struct media_info {
285 u16 type; /* DE_MEDIA_xxx */
286 u16 csr13;
287 u16 csr14;
288 u16 csr15;
289};
290
291struct ring_info {
292 struct sk_buff *skb;
293 dma_addr_t mapping;
294};
295
296struct de_private {
297 unsigned tx_head;
298 unsigned tx_tail;
299 unsigned rx_tail;
300
301 void __iomem *regs;
302 struct net_device *dev;
303 spinlock_t lock;
304
305 struct de_desc *rx_ring;
306 struct de_desc *tx_ring;
307 struct ring_info tx_skb[DE_TX_RING_SIZE];
308 struct ring_info rx_skb[DE_RX_RING_SIZE];
309 unsigned rx_buf_sz;
310 dma_addr_t ring_dma;
311
312 u32 msg_enable;
313
314 struct net_device_stats net_stats;
315
316 struct pci_dev *pdev;
317
318 u16 setup_frame[DE_SETUP_FRAME_WORDS];
319
320 u32 media_type;
321 u32 media_supported;
322 u32 media_advertise;
323 struct media_info media[DE_MAX_MEDIA];
324 struct timer_list media_timer;
325
326 u8 *ee_data;
327 unsigned board_idx;
328 unsigned de21040 : 1;
329 unsigned media_lock : 1;
330};
331
332
333static void de_set_rx_mode (struct net_device *dev);
334static void de_tx (struct de_private *de);
335static void de_clean_rings (struct de_private *de);
336static void de_media_interrupt (struct de_private *de, u32 status);
337static void de21040_media_timer (unsigned long data);
338static void de21041_media_timer (unsigned long data);
339static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
340
341
a3aa1884 342static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
1da177e4
LT
343 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
344 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
345 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
346 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
347 { },
348};
349MODULE_DEVICE_TABLE(pci, de_pci_tbl);
350
351static const char * const media_name[DE_MAX_MEDIA] = {
352 "10baseT auto",
353 "BNC",
354 "AUI",
355 "10baseT-HD",
356 "10baseT-FD"
357};
358
359/* 21040 transceiver register settings:
360 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
361static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
362static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
363static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
364
365/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
366static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
367static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
e0f9c4f3
OZ
368/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
369static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
1da177e4
LT
370static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
371
372
84cc1535
ML
373#define dr32(reg) ioread32(de->regs + (reg))
374#define dw32(reg, val) iowrite32((val), de->regs + (reg))
1da177e4
LT
375
376
377static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
378 u32 status, u32 len)
379{
380 if (netif_msg_rx_err (de))
381 printk (KERN_DEBUG
382 "%s: rx err, slot %d status 0x%x len %d\n",
383 de->dev->name, rx_tail, status, len);
384
385 if ((status & 0x38000300) != 0x0300) {
386 /* Ingore earlier buffers. */
387 if ((status & 0xffff) != 0x7fff) {
388 if (netif_msg_rx_err(de))
f639dc7d
JP
389 dev_warn(&de->dev->dev,
390 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
391 status);
1da177e4
LT
392 de->net_stats.rx_length_errors++;
393 }
394 } else if (status & RxError) {
395 /* There was a fatal error. */
396 de->net_stats.rx_errors++; /* end of a packet.*/
397 if (status & 0x0890) de->net_stats.rx_length_errors++;
398 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
399 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
400 }
401}
402
403static void de_rx (struct de_private *de)
404{
405 unsigned rx_tail = de->rx_tail;
406 unsigned rx_work = DE_RX_RING_SIZE;
407 unsigned drop = 0;
408 int rc;
409
46578a69 410 while (--rx_work) {
1da177e4
LT
411 u32 status, len;
412 dma_addr_t mapping;
413 struct sk_buff *skb, *copy_skb;
414 unsigned copying_skb, buflen;
415
416 skb = de->rx_skb[rx_tail].skb;
7e0b58f3 417 BUG_ON(!skb);
1da177e4
LT
418 rmb();
419 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
420 if (status & DescOwn)
421 break;
422
423 len = ((status >> 16) & 0x7ff) - 4;
424 mapping = de->rx_skb[rx_tail].mapping;
425
426 if (unlikely(drop)) {
427 de->net_stats.rx_dropped++;
428 goto rx_next;
429 }
430
431 if (unlikely((status & 0x38008300) != 0x0300)) {
432 de_rx_err_acct(de, rx_tail, status, len);
433 goto rx_next;
434 }
435
436 copying_skb = (len <= rx_copybreak);
437
438 if (unlikely(netif_msg_rx_status(de)))
439 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
440 de->dev->name, rx_tail, status, len,
441 copying_skb);
442
443 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
444 copy_skb = dev_alloc_skb (buflen);
445 if (unlikely(!copy_skb)) {
446 de->net_stats.rx_dropped++;
447 drop = 1;
448 rx_work = 100;
449 goto rx_next;
450 }
1da177e4
LT
451
452 if (!copying_skb) {
453 pci_unmap_single(de->pdev, mapping,
454 buflen, PCI_DMA_FROMDEVICE);
455 skb_put(skb, len);
456
457 mapping =
458 de->rx_skb[rx_tail].mapping =
689be439 459 pci_map_single(de->pdev, copy_skb->data,
1da177e4
LT
460 buflen, PCI_DMA_FROMDEVICE);
461 de->rx_skb[rx_tail].skb = copy_skb;
462 } else {
463 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
464 skb_reserve(copy_skb, RX_OFFSET);
d626f62b
ACM
465 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
466 len);
1da177e4
LT
467 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
468
469 /* We'll reuse the original ring buffer. */
470 skb = copy_skb;
471 }
472
473 skb->protocol = eth_type_trans (skb, de->dev);
474
475 de->net_stats.rx_packets++;
476 de->net_stats.rx_bytes += skb->len;
1da177e4
LT
477 rc = netif_rx (skb);
478 if (rc == NET_RX_DROP)
479 drop = 1;
480
481rx_next:
1da177e4
LT
482 if (rx_tail == (DE_RX_RING_SIZE - 1))
483 de->rx_ring[rx_tail].opts2 =
484 cpu_to_le32(RingEnd | de->rx_buf_sz);
485 else
486 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
487 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
b991d2bc
RS
488 wmb();
489 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
1da177e4
LT
490 rx_tail = NEXT_RX(rx_tail);
491 }
492
493 if (!rx_work)
f639dc7d 494 dev_warn(&de->dev->dev, "rx work limit reached\n");
1da177e4
LT
495
496 de->rx_tail = rx_tail;
497}
498
7d12e780 499static irqreturn_t de_interrupt (int irq, void *dev_instance)
1da177e4
LT
500{
501 struct net_device *dev = dev_instance;
8f15ea42 502 struct de_private *de = netdev_priv(dev);
1da177e4
LT
503 u32 status;
504
505 status = dr32(MacStatus);
506 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
507 return IRQ_NONE;
508
509 if (netif_msg_intr(de))
510 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
f639dc7d
JP
511 dev->name, status, dr32(MacMode),
512 de->rx_tail, de->tx_head, de->tx_tail);
1da177e4
LT
513
514 dw32(MacStatus, status);
515
516 if (status & (RxIntr | RxEmpty)) {
517 de_rx(de);
518 if (status & RxEmpty)
519 dw32(RxPoll, NormalRxPoll);
520 }
521
522 spin_lock(&de->lock);
523
524 if (status & (TxIntr | TxEmpty))
525 de_tx(de);
526
527 if (status & (LinkPass | LinkFail))
528 de_media_interrupt(de, status);
529
530 spin_unlock(&de->lock);
531
532 if (status & PciErr) {
533 u16 pci_status;
534
535 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
536 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
f639dc7d
JP
537 dev_err(&de->dev->dev,
538 "PCI bus error, status=%08x, PCI status=%04x\n",
539 status, pci_status);
1da177e4
LT
540 }
541
542 return IRQ_HANDLED;
543}
544
545static void de_tx (struct de_private *de)
546{
547 unsigned tx_head = de->tx_head;
548 unsigned tx_tail = de->tx_tail;
549
550 while (tx_tail != tx_head) {
551 struct sk_buff *skb;
552 u32 status;
553
554 rmb();
555 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
556 if (status & DescOwn)
557 break;
558
559 skb = de->tx_skb[tx_tail].skb;
7e0b58f3 560 BUG_ON(!skb);
1da177e4
LT
561 if (unlikely(skb == DE_DUMMY_SKB))
562 goto next;
563
564 if (unlikely(skb == DE_SETUP_SKB)) {
565 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
566 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
567 goto next;
568 }
569
570 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
571 skb->len, PCI_DMA_TODEVICE);
572
573 if (status & LastFrag) {
574 if (status & TxError) {
575 if (netif_msg_tx_err(de))
576 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
577 de->dev->name, status);
578 de->net_stats.tx_errors++;
579 if (status & TxOWC)
580 de->net_stats.tx_window_errors++;
581 if (status & TxMaxCol)
582 de->net_stats.tx_aborted_errors++;
583 if (status & TxLinkFail)
584 de->net_stats.tx_carrier_errors++;
585 if (status & TxFIFOUnder)
586 de->net_stats.tx_fifo_errors++;
587 } else {
588 de->net_stats.tx_packets++;
589 de->net_stats.tx_bytes += skb->len;
590 if (netif_msg_tx_done(de))
f639dc7d
JP
591 printk(KERN_DEBUG "%s: tx done, slot %d\n",
592 de->dev->name, tx_tail);
1da177e4
LT
593 }
594 dev_kfree_skb_irq(skb);
595 }
596
597next:
598 de->tx_skb[tx_tail].skb = NULL;
599
600 tx_tail = NEXT_TX(tx_tail);
601 }
602
603 de->tx_tail = tx_tail;
604
605 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
606 netif_wake_queue(de->dev);
607}
608
ad096463
SH
609static netdev_tx_t de_start_xmit (struct sk_buff *skb,
610 struct net_device *dev)
1da177e4 611{
8f15ea42 612 struct de_private *de = netdev_priv(dev);
1da177e4
LT
613 unsigned int entry, tx_free;
614 u32 mapping, len, flags = FirstFrag | LastFrag;
615 struct de_desc *txd;
616
617 spin_lock_irq(&de->lock);
618
619 tx_free = TX_BUFFS_AVAIL(de);
620 if (tx_free == 0) {
621 netif_stop_queue(dev);
622 spin_unlock_irq(&de->lock);
5b548140 623 return NETDEV_TX_BUSY;
1da177e4
LT
624 }
625 tx_free--;
626
627 entry = de->tx_head;
628
629 txd = &de->tx_ring[entry];
630
631 len = skb->len;
632 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
633 if (entry == (DE_TX_RING_SIZE - 1))
634 flags |= RingEnd;
635 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
636 flags |= TxSwInt;
637 flags |= len;
638 txd->opts2 = cpu_to_le32(flags);
639 txd->addr1 = cpu_to_le32(mapping);
640
641 de->tx_skb[entry].skb = skb;
642 de->tx_skb[entry].mapping = mapping;
643 wmb();
644
645 txd->opts1 = cpu_to_le32(DescOwn);
646 wmb();
647
648 de->tx_head = NEXT_TX(entry);
649 if (netif_msg_tx_queued(de))
650 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
651 dev->name, entry, skb->len);
652
653 if (tx_free == 0)
654 netif_stop_queue(dev);
655
656 spin_unlock_irq(&de->lock);
657
658 /* Trigger an immediate transmit demand. */
659 dw32(TxPoll, NormalTxPoll);
1da177e4 660
6ed10654 661 return NETDEV_TX_OK;
1da177e4
LT
662}
663
664/* Set or clear the multicast filter for this adaptor.
665 Note that we only use exclusion around actually queueing the
666 new frame, not around filling de->setup_frame. This is non-deterministic
667 when re-entered but still correct. */
668
669#undef set_bit_le
670#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
671
672static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
673{
8f15ea42 674 struct de_private *de = netdev_priv(dev);
1da177e4 675 u16 hash_table[32];
22bedad3 676 struct netdev_hw_addr *ha;
1da177e4
LT
677 int i;
678 u16 *eaddrs;
679
680 memset(hash_table, 0, sizeof(hash_table));
681 set_bit_le(255, hash_table); /* Broadcast entry */
682 /* This should work on big-endian machines as well. */
22bedad3
JP
683 netdev_for_each_mc_addr(ha, dev) {
684 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1da177e4
LT
685
686 set_bit_le(index, hash_table);
4302b67e 687 }
1da177e4 688
4302b67e
JP
689 for (i = 0; i < 32; i++) {
690 *setup_frm++ = hash_table[i];
691 *setup_frm++ = hash_table[i];
1da177e4 692 }
4302b67e 693 setup_frm = &de->setup_frame[13*6];
1da177e4
LT
694
695 /* Fill the final entry with our physical address. */
696 eaddrs = (u16 *)dev->dev_addr;
697 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
698 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
699 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
700}
701
702static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
703{
8f15ea42 704 struct de_private *de = netdev_priv(dev);
22bedad3 705 struct netdev_hw_addr *ha;
1da177e4
LT
706 u16 *eaddrs;
707
708 /* We have <= 14 addresses so we can use the wonderful
709 16 address perfect filtering of the Tulip. */
22bedad3
JP
710 netdev_for_each_mc_addr(ha, dev) {
711 eaddrs = (u16 *) ha->addr;
1da177e4
LT
712 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
713 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
714 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
715 }
716 /* Fill the unused entries with the broadcast address. */
4302b67e 717 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1da177e4
LT
718 setup_frm = &de->setup_frame[15*6];
719
720 /* Fill the final entry with our physical address. */
721 eaddrs = (u16 *)dev->dev_addr;
722 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
723 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
724 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
725}
726
727
728static void __de_set_rx_mode (struct net_device *dev)
729{
8f15ea42 730 struct de_private *de = netdev_priv(dev);
1da177e4
LT
731 u32 macmode;
732 unsigned int entry;
733 u32 mapping;
734 struct de_desc *txd;
735 struct de_desc *dummy_txd = NULL;
736
737 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
738
739 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
740 macmode |= AcceptAllMulticast | AcceptAllPhys;
741 goto out;
742 }
743
4cd24eaf 744 if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
1da177e4
LT
745 /* Too many to filter well -- accept all multicasts. */
746 macmode |= AcceptAllMulticast;
747 goto out;
748 }
749
750 /* Note that only the low-address shortword of setup_frame is valid!
751 The values are doubled for big-endian architectures. */
4cd24eaf 752 if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
1da177e4
LT
753 build_setup_frame_hash (de->setup_frame, dev);
754 else
755 build_setup_frame_perfect (de->setup_frame, dev);
756
757 /*
758 * Now add this frame to the Tx list.
759 */
760
761 entry = de->tx_head;
762
763 /* Avoid a chip errata by prefixing a dummy entry. */
764 if (entry != 0) {
765 de->tx_skb[entry].skb = DE_DUMMY_SKB;
766
767 dummy_txd = &de->tx_ring[entry];
768 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
769 cpu_to_le32(RingEnd) : 0;
770 dummy_txd->addr1 = 0;
771
772 /* Must set DescOwned later to avoid race with chip */
773
774 entry = NEXT_TX(entry);
775 }
776
777 de->tx_skb[entry].skb = DE_SETUP_SKB;
778 de->tx_skb[entry].mapping = mapping =
779 pci_map_single (de->pdev, de->setup_frame,
780 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
781
782 /* Put the setup frame on the Tx list. */
783 txd = &de->tx_ring[entry];
784 if (entry == (DE_TX_RING_SIZE - 1))
785 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
786 else
787 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
788 txd->addr1 = cpu_to_le32(mapping);
789 wmb();
790
791 txd->opts1 = cpu_to_le32(DescOwn);
792 wmb();
793
794 if (dummy_txd) {
795 dummy_txd->opts1 = cpu_to_le32(DescOwn);
796 wmb();
797 }
798
799 de->tx_head = NEXT_TX(entry);
800
1da177e4
LT
801 if (TX_BUFFS_AVAIL(de) == 0)
802 netif_stop_queue(dev);
803
804 /* Trigger an immediate transmit demand. */
805 dw32(TxPoll, NormalTxPoll);
806
807out:
808 if (macmode != dr32(MacMode))
809 dw32(MacMode, macmode);
810}
811
812static void de_set_rx_mode (struct net_device *dev)
813{
814 unsigned long flags;
8f15ea42 815 struct de_private *de = netdev_priv(dev);
1da177e4
LT
816
817 spin_lock_irqsave (&de->lock, flags);
818 __de_set_rx_mode(dev);
819 spin_unlock_irqrestore (&de->lock, flags);
820}
821
822static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
823{
824 if (unlikely(rx_missed & RxMissedOver))
825 de->net_stats.rx_missed_errors += RxMissedMask;
826 else
827 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
828}
829
830static void __de_get_stats(struct de_private *de)
831{
832 u32 tmp = dr32(RxMissed); /* self-clearing */
833
834 de_rx_missed(de, tmp);
835}
836
837static struct net_device_stats *de_get_stats(struct net_device *dev)
838{
8f15ea42 839 struct de_private *de = netdev_priv(dev);
1da177e4
LT
840
841 /* The chip only need report frame silently dropped. */
842 spin_lock_irq(&de->lock);
843 if (netif_running(dev) && netif_device_present(dev))
844 __de_get_stats(de);
845 spin_unlock_irq(&de->lock);
846
847 return &de->net_stats;
848}
849
850static inline int de_is_running (struct de_private *de)
851{
852 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
853}
854
855static void de_stop_rxtx (struct de_private *de)
856{
857 u32 macmode;
69cac988 858 unsigned int i = 1300/100;
1da177e4
LT
859
860 macmode = dr32(MacMode);
861 if (macmode & RxTx) {
862 dw32(MacMode, macmode & ~RxTx);
863 dr32(MacMode);
864 }
865
69cac988
GG
866 /* wait until in-flight frame completes.
867 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
868 * Typically expect this loop to end in < 50 us on 100BT.
869 */
870 while (--i) {
1da177e4
LT
871 if (!de_is_running(de))
872 return;
69cac988 873 udelay(100);
1da177e4 874 }
f3b197ac 875
f639dc7d 876 dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
1da177e4
LT
877}
878
879static inline void de_start_rxtx (struct de_private *de)
880{
881 u32 macmode;
882
883 macmode = dr32(MacMode);
884 if ((macmode & RxTx) != RxTx) {
885 dw32(MacMode, macmode | RxTx);
886 dr32(MacMode);
887 }
888}
889
890static void de_stop_hw (struct de_private *de)
891{
892
893 udelay(5);
894 dw32(IntrMask, 0);
895
896 de_stop_rxtx(de);
897
898 dw32(MacStatus, dr32(MacStatus));
899
900 udelay(10);
901
902 de->rx_tail = 0;
903 de->tx_head = de->tx_tail = 0;
904}
905
906static void de_link_up(struct de_private *de)
907{
908 if (!netif_carrier_ok(de->dev)) {
909 netif_carrier_on(de->dev);
910 if (netif_msg_link(de))
f639dc7d
JP
911 dev_info(&de->dev->dev, "link up, media %s\n",
912 media_name[de->media_type]);
1da177e4
LT
913 }
914}
915
916static void de_link_down(struct de_private *de)
917{
918 if (netif_carrier_ok(de->dev)) {
919 netif_carrier_off(de->dev);
920 if (netif_msg_link(de))
f639dc7d 921 dev_info(&de->dev->dev, "link down\n");
1da177e4
LT
922 }
923}
924
925static void de_set_media (struct de_private *de)
926{
927 unsigned media = de->media_type;
928 u32 macmode = dr32(MacMode);
929
f25f0f8d 930 if (de_is_running(de))
f639dc7d
JP
931 dev_warn(&de->dev->dev,
932 "chip is running while changing media!\n");
1da177e4
LT
933
934 if (de->de21040)
935 dw32(CSR11, FULL_DUPLEX_MAGIC);
936 dw32(CSR13, 0); /* Reset phy */
937 dw32(CSR14, de->media[media].csr14);
938 dw32(CSR15, de->media[media].csr15);
939 dw32(CSR13, de->media[media].csr13);
940
941 /* must delay 10ms before writing to other registers,
942 * especially CSR6
943 */
944 mdelay(10);
945
946 if (media == DE_MEDIA_TP_FD)
947 macmode |= FullDuplex;
948 else
949 macmode &= ~FullDuplex;
f3b197ac 950
1da177e4 951 if (netif_msg_link(de)) {
f639dc7d
JP
952 dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
953 dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
954 dr32(MacMode), dr32(SIAStatus),
955 dr32(CSR13), dr32(CSR14), dr32(CSR15));
956
957 dev_info(&de->dev->dev,
958 "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
959 macmode, de->media[media].csr13,
960 de->media[media].csr14, de->media[media].csr15);
1da177e4
LT
961 }
962 if (macmode != dr32(MacMode))
963 dw32(MacMode, macmode);
964}
965
966static void de_next_media (struct de_private *de, u32 *media,
967 unsigned int n_media)
968{
969 unsigned int i;
970
971 for (i = 0; i < n_media; i++) {
972 if (de_ok_to_advertise(de, media[i])) {
973 de->media_type = media[i];
974 return;
975 }
976 }
977}
978
979static void de21040_media_timer (unsigned long data)
980{
981 struct de_private *de = (struct de_private *) data;
982 struct net_device *dev = de->dev;
983 u32 status = dr32(SIAStatus);
984 unsigned int carrier;
985 unsigned long flags;
f3b197ac 986
1da177e4 987 carrier = (status & NetCxnErr) ? 0 : 1;
f3b197ac 988
1da177e4
LT
989 if (carrier) {
990 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
991 goto no_link_yet;
992
993 de->media_timer.expires = jiffies + DE_TIMER_LINK;
994 add_timer(&de->media_timer);
995 if (!netif_carrier_ok(dev))
996 de_link_up(de);
997 else
998 if (netif_msg_timer(de))
f639dc7d
JP
999 dev_info(&dev->dev, "%s link ok, status %x\n",
1000 media_name[de->media_type], status);
1da177e4
LT
1001 return;
1002 }
1003
f3b197ac 1004 de_link_down(de);
1da177e4
LT
1005
1006 if (de->media_lock)
1007 return;
1008
1009 if (de->media_type == DE_MEDIA_AUI) {
1010 u32 next_state = DE_MEDIA_TP;
1011 de_next_media(de, &next_state, 1);
1012 } else {
1013 u32 next_state = DE_MEDIA_AUI;
1014 de_next_media(de, &next_state, 1);
1015 }
1016
1017 spin_lock_irqsave(&de->lock, flags);
1018 de_stop_rxtx(de);
1019 spin_unlock_irqrestore(&de->lock, flags);
1020 de_set_media(de);
1021 de_start_rxtx(de);
1022
1023no_link_yet:
1024 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1025 add_timer(&de->media_timer);
1026
1027 if (netif_msg_timer(de))
f639dc7d
JP
1028 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1029 media_name[de->media_type], status);
1da177e4
LT
1030}
1031
1032static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1033{
1034 switch (new_media) {
1035 case DE_MEDIA_TP_AUTO:
1036 if (!(de->media_advertise & ADVERTISED_Autoneg))
1037 return 0;
1038 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1039 return 0;
1040 break;
1041 case DE_MEDIA_BNC:
1042 if (!(de->media_advertise & ADVERTISED_BNC))
1043 return 0;
1044 break;
1045 case DE_MEDIA_AUI:
1046 if (!(de->media_advertise & ADVERTISED_AUI))
1047 return 0;
1048 break;
1049 case DE_MEDIA_TP:
1050 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1051 return 0;
1052 break;
1053 case DE_MEDIA_TP_FD:
1054 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1055 return 0;
1056 break;
1057 }
f3b197ac 1058
1da177e4
LT
1059 return 1;
1060}
1061
1062static void de21041_media_timer (unsigned long data)
1063{
1064 struct de_private *de = (struct de_private *) data;
1065 struct net_device *dev = de->dev;
1066 u32 status = dr32(SIAStatus);
1067 unsigned int carrier;
1068 unsigned long flags;
f3b197ac 1069
ca9a7835
OZ
1070 /* clear port active bits */
1071 dw32(SIAStatus, NonselPortActive | SelPortActive);
1072
1da177e4 1073 carrier = (status & NetCxnErr) ? 0 : 1;
f3b197ac 1074
1da177e4
LT
1075 if (carrier) {
1076 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1077 de->media_type == DE_MEDIA_TP ||
1078 de->media_type == DE_MEDIA_TP_FD) &&
1079 (status & LinkFailStatus))
1080 goto no_link_yet;
1081
1082 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1083 add_timer(&de->media_timer);
1084 if (!netif_carrier_ok(dev))
1085 de_link_up(de);
1086 else
1087 if (netif_msg_timer(de))
f639dc7d
JP
1088 dev_info(&dev->dev,
1089 "%s link ok, mode %x status %x\n",
1090 media_name[de->media_type],
1091 dr32(MacMode), status);
1da177e4
LT
1092 return;
1093 }
1094
f3b197ac 1095 de_link_down(de);
1da177e4
LT
1096
1097 /* if media type locked, don't switch media */
1098 if (de->media_lock)
1099 goto set_media;
1100
1101 /* if activity detected, use that as hint for new media type */
1102 if (status & NonselPortActive) {
1103 unsigned int have_media = 1;
1104
1105 /* if AUI/BNC selected, then activity is on TP port */
1106 if (de->media_type == DE_MEDIA_AUI ||
1107 de->media_type == DE_MEDIA_BNC) {
1108 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1109 de->media_type = DE_MEDIA_TP_AUTO;
1110 else
1111 have_media = 0;
1112 }
1113
1114 /* TP selected. If there is only TP and BNC, then it's BNC */
1115 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1116 de_ok_to_advertise(de, DE_MEDIA_BNC))
1117 de->media_type = DE_MEDIA_BNC;
1118
1119 /* TP selected. If there is only TP and AUI, then it's AUI */
1120 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1121 de_ok_to_advertise(de, DE_MEDIA_AUI))
1122 de->media_type = DE_MEDIA_AUI;
1123
1124 /* otherwise, ignore the hint */
1125 else
1126 have_media = 0;
1127
1128 if (have_media)
1129 goto set_media;
1130 }
1131
1132 /*
1133 * Absent or ambiguous activity hint, move to next advertised
1134 * media state. If de->media_type is left unchanged, this
1135 * simply resets the PHY and reloads the current media settings.
1136 */
1137 if (de->media_type == DE_MEDIA_AUI) {
1138 u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1139 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1140 } else if (de->media_type == DE_MEDIA_BNC) {
1141 u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
1142 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1143 } else {
1144 u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1145 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1146 }
f3b197ac 1147
1da177e4
LT
1148set_media:
1149 spin_lock_irqsave(&de->lock, flags);
1150 de_stop_rxtx(de);
1151 spin_unlock_irqrestore(&de->lock, flags);
1152 de_set_media(de);
1153 de_start_rxtx(de);
1154
1155no_link_yet:
1156 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1157 add_timer(&de->media_timer);
1158
1159 if (netif_msg_timer(de))
f639dc7d
JP
1160 dev_info(&dev->dev, "no link, trying media %s, status %x\n",
1161 media_name[de->media_type], status);
1da177e4
LT
1162}
1163
1164static void de_media_interrupt (struct de_private *de, u32 status)
1165{
1166 if (status & LinkPass) {
ca9a7835
OZ
1167 /* Ignore if current media is AUI or BNC and we can't use TP */
1168 if ((de->media_type == DE_MEDIA_AUI ||
1169 de->media_type == DE_MEDIA_BNC) &&
1170 (de->media_lock ||
1171 !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1172 return;
1173 /* If current media is not TP, change it to TP */
1174 if ((de->media_type == DE_MEDIA_AUI ||
1175 de->media_type == DE_MEDIA_BNC)) {
1176 de->media_type = DE_MEDIA_TP_AUTO;
1177 de_stop_rxtx(de);
1178 de_set_media(de);
1179 de_start_rxtx(de);
1180 }
1da177e4
LT
1181 de_link_up(de);
1182 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1183 return;
1184 }
f3b197ac 1185
7e0b58f3 1186 BUG_ON(!(status & LinkFail));
ca9a7835
OZ
1187 /* Mark the link as down only if current media is TP */
1188 if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1189 de->media_type != DE_MEDIA_BNC) {
1da177e4
LT
1190 de_link_down(de);
1191 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1192 }
1193}
1194
1195static int de_reset_mac (struct de_private *de)
1196{
1197 u32 status, tmp;
1198
1199 /*
1200 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1201 * in this area.
1202 */
1203
1204 if (dr32(BusMode) == 0xffffffff)
1205 return -EBUSY;
1206
1207 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1208 dw32 (BusMode, CmdReset);
1209 mdelay (1);
1210
1211 dw32 (BusMode, de_bus_mode);
1212 mdelay (1);
1213
1214 for (tmp = 0; tmp < 5; tmp++) {
1215 dr32 (BusMode);
1216 mdelay (1);
1217 }
1218
1219 mdelay (1);
1220
1221 status = dr32(MacStatus);
1222 if (status & (RxState | TxState))
1223 return -EBUSY;
1224 if (status == 0xffffffff)
1225 return -ENODEV;
1226 return 0;
1227}
1228
1229static void de_adapter_wake (struct de_private *de)
1230{
1231 u32 pmctl;
1232
1233 if (de->de21040)
1234 return;
1235
1236 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1237 if (pmctl & PM_Mask) {
1238 pmctl &= ~PM_Mask;
1239 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1240
1241 /* de4x5.c delays, so we do too */
1242 msleep(10);
1243 }
1244}
1245
1246static void de_adapter_sleep (struct de_private *de)
1247{
1248 u32 pmctl;
1249
1250 if (de->de21040)
1251 return;
1252
b0255a02 1253 dw32(CSR13, 0); /* Reset phy */
1da177e4
LT
1254 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1255 pmctl |= PM_Sleep;
1256 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1257}
1258
1259static int de_init_hw (struct de_private *de)
1260{
1261 struct net_device *dev = de->dev;
1262 u32 macmode;
1263 int rc;
1264
1265 de_adapter_wake(de);
f3b197ac 1266
1da177e4
LT
1267 macmode = dr32(MacMode) & ~MacModeClear;
1268
1269 rc = de_reset_mac(de);
1270 if (rc)
1271 return rc;
1272
1273 de_set_media(de); /* reset phy */
1274
1275 dw32(RxRingAddr, de->ring_dma);
1276 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1277
1278 dw32(MacMode, RxTx | macmode);
1279
1280 dr32(RxMissed); /* self-clearing */
1281
1282 dw32(IntrMask, de_intr_mask);
1283
1284 de_set_rx_mode(dev);
1285
1286 return 0;
1287}
1288
1289static int de_refill_rx (struct de_private *de)
1290{
1291 unsigned i;
1292
1293 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1294 struct sk_buff *skb;
1295
1296 skb = dev_alloc_skb(de->rx_buf_sz);
1297 if (!skb)
1298 goto err_out;
1299
1300 skb->dev = de->dev;
1301
1302 de->rx_skb[i].mapping = pci_map_single(de->pdev,
689be439 1303 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
1304 de->rx_skb[i].skb = skb;
1305
1306 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1307 if (i == (DE_RX_RING_SIZE - 1))
1308 de->rx_ring[i].opts2 =
1309 cpu_to_le32(RingEnd | de->rx_buf_sz);
1310 else
1311 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1312 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1313 de->rx_ring[i].addr2 = 0;
1314 }
1315
1316 return 0;
1317
1318err_out:
1319 de_clean_rings(de);
1320 return -ENOMEM;
1321}
1322
1323static int de_init_rings (struct de_private *de)
1324{
1325 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1326 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1327
1328 de->rx_tail = 0;
1329 de->tx_head = de->tx_tail = 0;
1330
1331 return de_refill_rx (de);
1332}
1333
1334static int de_alloc_rings (struct de_private *de)
1335{
1336 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1337 if (!de->rx_ring)
1338 return -ENOMEM;
1339 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1340 return de_init_rings(de);
1341}
1342
1343static void de_clean_rings (struct de_private *de)
1344{
1345 unsigned i;
1346
1347 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1348 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1349 wmb();
1350 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1351 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1352 wmb();
1353
1354 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1355 if (de->rx_skb[i].skb) {
1356 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1357 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1358 dev_kfree_skb(de->rx_skb[i].skb);
1359 }
1360 }
1361
1362 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1363 struct sk_buff *skb = de->tx_skb[i].skb;
1364 if ((skb) && (skb != DE_DUMMY_SKB)) {
1365 if (skb != DE_SETUP_SKB) {
1da177e4
LT
1366 de->net_stats.tx_dropped++;
1367 pci_unmap_single(de->pdev,
1368 de->tx_skb[i].mapping,
1369 skb->len, PCI_DMA_TODEVICE);
5185c7c2 1370 dev_kfree_skb(skb);
1da177e4
LT
1371 } else {
1372 pci_unmap_single(de->pdev,
1373 de->tx_skb[i].mapping,
1374 sizeof(de->setup_frame),
1375 PCI_DMA_TODEVICE);
1376 }
1377 }
1378 }
1379
1380 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1381 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1382}
1383
1384static void de_free_rings (struct de_private *de)
1385{
1386 de_clean_rings(de);
1387 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1388 de->rx_ring = NULL;
1389 de->tx_ring = NULL;
1390}
1391
1392static int de_open (struct net_device *dev)
1393{
8f15ea42 1394 struct de_private *de = netdev_priv(dev);
1da177e4 1395 int rc;
1da177e4
LT
1396
1397 if (netif_msg_ifup(de))
1398 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1399
1400 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1401
1402 rc = de_alloc_rings(de);
1403 if (rc) {
f639dc7d 1404 dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
1da177e4
LT
1405 return rc;
1406 }
1407
3f735b76 1408 dw32(IntrMask, 0);
1da177e4 1409
1fb9df5d 1410 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1da177e4 1411 if (rc) {
f639dc7d
JP
1412 dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
1413 dev->irq, rc);
3f735b76
FR
1414 goto err_out_free;
1415 }
1416
1417 rc = de_init_hw(de);
1418 if (rc) {
f639dc7d 1419 dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
3f735b76 1420 goto err_out_free_irq;
1da177e4
LT
1421 }
1422
1423 netif_start_queue(dev);
1424 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1425
1426 return 0;
1427
3f735b76
FR
1428err_out_free_irq:
1429 free_irq(dev->irq, dev);
1da177e4
LT
1430err_out_free:
1431 de_free_rings(de);
1432 return rc;
1433}
1434
1435static int de_close (struct net_device *dev)
1436{
8f15ea42 1437 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1438 unsigned long flags;
1439
1440 if (netif_msg_ifdown(de))
1441 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1442
1443 del_timer_sync(&de->media_timer);
1444
1445 spin_lock_irqsave(&de->lock, flags);
1446 de_stop_hw(de);
1447 netif_stop_queue(dev);
1448 netif_carrier_off(dev);
1449 spin_unlock_irqrestore(&de->lock, flags);
f3b197ac 1450
1da177e4
LT
1451 free_irq(dev->irq, dev);
1452
1453 de_free_rings(de);
1454 de_adapter_sleep(de);
1da177e4
LT
1455 return 0;
1456}
1457
1458static void de_tx_timeout (struct net_device *dev)
1459{
8f15ea42 1460 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1461
1462 printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1463 dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1464 de->rx_tail, de->tx_head, de->tx_tail);
1465
1466 del_timer_sync(&de->media_timer);
1467
1468 disable_irq(dev->irq);
1469 spin_lock_irq(&de->lock);
1470
1471 de_stop_hw(de);
1472 netif_stop_queue(dev);
1473 netif_carrier_off(dev);
1474
1475 spin_unlock_irq(&de->lock);
1476 enable_irq(dev->irq);
f3b197ac 1477
1da177e4
LT
1478 /* Update the error counts. */
1479 __de_get_stats(de);
1480
1481 synchronize_irq(dev->irq);
1482 de_clean_rings(de);
1483
39bf4295
FR
1484 de_init_rings(de);
1485
1da177e4 1486 de_init_hw(de);
f3b197ac 1487
1da177e4
LT
1488 netif_wake_queue(dev);
1489}
1490
1491static void __de_get_regs(struct de_private *de, u8 *buf)
1492{
1493 int i;
1494 u32 *rbuf = (u32 *)buf;
f3b197ac 1495
1da177e4
LT
1496 /* read all CSRs */
1497 for (i = 0; i < DE_NUM_REGS; i++)
1498 rbuf[i] = dr32(i * 8);
1499
1500 /* handle self-clearing RxMissed counter, CSR8 */
1501 de_rx_missed(de, rbuf[8]);
1502}
1503
1504static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1505{
1506 ecmd->supported = de->media_supported;
1507 ecmd->transceiver = XCVR_INTERNAL;
1508 ecmd->phy_address = 0;
1509 ecmd->advertising = de->media_advertise;
f3b197ac 1510
1da177e4
LT
1511 switch (de->media_type) {
1512 case DE_MEDIA_AUI:
1513 ecmd->port = PORT_AUI;
1514 ecmd->speed = 5;
1515 break;
1516 case DE_MEDIA_BNC:
1517 ecmd->port = PORT_BNC;
1518 ecmd->speed = 2;
1519 break;
1520 default:
1521 ecmd->port = PORT_TP;
1522 ecmd->speed = SPEED_10;
1523 break;
1524 }
f3b197ac 1525
1da177e4
LT
1526 if (dr32(MacMode) & FullDuplex)
1527 ecmd->duplex = DUPLEX_FULL;
1528 else
1529 ecmd->duplex = DUPLEX_HALF;
1530
1531 if (de->media_lock)
1532 ecmd->autoneg = AUTONEG_DISABLE;
1533 else
1534 ecmd->autoneg = AUTONEG_ENABLE;
1535
1536 /* ignore maxtxpkt, maxrxpkt for now */
1537
1538 return 0;
1539}
1540
1541static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1542{
1543 u32 new_media;
1544 unsigned int media_lock;
1545
1546 if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
1547 return -EINVAL;
1548 if (de->de21040 && ecmd->speed == 2)
1549 return -EINVAL;
1550 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1551 return -EINVAL;
1552 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1553 return -EINVAL;
1554 if (de->de21040 && ecmd->port == PORT_BNC)
1555 return -EINVAL;
1556 if (ecmd->transceiver != XCVR_INTERNAL)
1557 return -EINVAL;
1558 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1559 return -EINVAL;
1560 if (ecmd->advertising & ~de->media_supported)
1561 return -EINVAL;
1562 if (ecmd->autoneg == AUTONEG_ENABLE &&
1563 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1564 return -EINVAL;
f3b197ac 1565
1da177e4
LT
1566 switch (ecmd->port) {
1567 case PORT_AUI:
1568 new_media = DE_MEDIA_AUI;
1569 if (!(ecmd->advertising & ADVERTISED_AUI))
1570 return -EINVAL;
1571 break;
1572 case PORT_BNC:
1573 new_media = DE_MEDIA_BNC;
1574 if (!(ecmd->advertising & ADVERTISED_BNC))
1575 return -EINVAL;
1576 break;
1577 default:
1578 if (ecmd->autoneg == AUTONEG_ENABLE)
1579 new_media = DE_MEDIA_TP_AUTO;
1580 else if (ecmd->duplex == DUPLEX_FULL)
1581 new_media = DE_MEDIA_TP_FD;
1582 else
1583 new_media = DE_MEDIA_TP;
1584 if (!(ecmd->advertising & ADVERTISED_TP))
1585 return -EINVAL;
1586 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1587 return -EINVAL;
1588 break;
1589 }
f3b197ac 1590
1da177e4 1591 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
f3b197ac 1592
1da177e4
LT
1593 if ((new_media == de->media_type) &&
1594 (media_lock == de->media_lock) &&
1595 (ecmd->advertising == de->media_advertise))
1596 return 0; /* nothing to change */
f3b197ac 1597
1da177e4
LT
1598 de_link_down(de);
1599 de_stop_rxtx(de);
f3b197ac 1600
1da177e4
LT
1601 de->media_type = new_media;
1602 de->media_lock = media_lock;
1603 de->media_advertise = ecmd->advertising;
1604 de_set_media(de);
f3b197ac 1605
1da177e4
LT
1606 return 0;
1607}
1608
1609static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1610{
8f15ea42 1611 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1612
1613 strcpy (info->driver, DRV_NAME);
1614 strcpy (info->version, DRV_VERSION);
1615 strcpy (info->bus_info, pci_name(de->pdev));
1616 info->eedump_len = DE_EEPROM_SIZE;
1617}
1618
1619static int de_get_regs_len(struct net_device *dev)
1620{
1621 return DE_REGS_SIZE;
1622}
1623
1624static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1625{
8f15ea42 1626 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1627 int rc;
1628
1629 spin_lock_irq(&de->lock);
1630 rc = __de_get_settings(de, ecmd);
1631 spin_unlock_irq(&de->lock);
1632
1633 return rc;
1634}
1635
1636static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1637{
8f15ea42 1638 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1639 int rc;
1640
1641 spin_lock_irq(&de->lock);
1642 rc = __de_set_settings(de, ecmd);
1643 spin_unlock_irq(&de->lock);
1644
1645 return rc;
1646}
1647
1648static u32 de_get_msglevel(struct net_device *dev)
1649{
8f15ea42 1650 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1651
1652 return de->msg_enable;
1653}
1654
1655static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1656{
8f15ea42 1657 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1658
1659 de->msg_enable = msglvl;
1660}
1661
1662static int de_get_eeprom(struct net_device *dev,
1663 struct ethtool_eeprom *eeprom, u8 *data)
1664{
8f15ea42 1665 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1666
1667 if (!de->ee_data)
1668 return -EOPNOTSUPP;
1669 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1670 (eeprom->len != DE_EEPROM_SIZE))
1671 return -EINVAL;
1672 memcpy(data, de->ee_data, eeprom->len);
1673
1674 return 0;
1675}
1676
1677static int de_nway_reset(struct net_device *dev)
1678{
8f15ea42 1679 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1680 u32 status;
1681
1682 if (de->media_type != DE_MEDIA_TP_AUTO)
1683 return -EINVAL;
1684 if (netif_carrier_ok(de->dev))
1685 de_link_down(de);
1686
1687 status = dr32(SIAStatus);
1688 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1689 if (netif_msg_link(de))
f639dc7d
JP
1690 dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
1691 status, dr32(SIAStatus));
1da177e4
LT
1692 return 0;
1693}
1694
1695static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1696 void *data)
1697{
8f15ea42 1698 struct de_private *de = netdev_priv(dev);
1da177e4
LT
1699
1700 regs->version = (DE_REGS_VER << 2) | de->de21040;
1701
1702 spin_lock_irq(&de->lock);
1703 __de_get_regs(de, data);
1704 spin_unlock_irq(&de->lock);
1705}
1706
7282d491 1707static const struct ethtool_ops de_ethtool_ops = {
1da177e4 1708 .get_link = ethtool_op_get_link,
1da177e4
LT
1709 .get_drvinfo = de_get_drvinfo,
1710 .get_regs_len = de_get_regs_len,
1711 .get_settings = de_get_settings,
1712 .set_settings = de_set_settings,
1713 .get_msglevel = de_get_msglevel,
1714 .set_msglevel = de_set_msglevel,
1715 .get_eeprom = de_get_eeprom,
1716 .nway_reset = de_nway_reset,
1717 .get_regs = de_get_regs,
1718};
1719
4c44fd00 1720static void __devinit de21040_get_mac_address (struct de_private *de)
1da177e4
LT
1721{
1722 unsigned i;
1723
1724 dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
bc0da3fc 1725 udelay(5);
1da177e4
LT
1726
1727 for (i = 0; i < 6; i++) {
1728 int value, boguscnt = 100000;
ec1d1ebb 1729 do {
1da177e4 1730 value = dr32(ROMCmd);
84cc1535 1731 rmb();
ec1d1ebb 1732 } while (value < 0 && --boguscnt > 0);
1da177e4
LT
1733 de->dev->dev_addr[i] = value;
1734 udelay(1);
1735 if (boguscnt <= 0)
f639dc7d 1736 pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
1da177e4
LT
1737 }
1738}
1739
4c44fd00 1740static void __devinit de21040_get_media_info(struct de_private *de)
1da177e4
LT
1741{
1742 unsigned int i;
1743
1744 de->media_type = DE_MEDIA_TP;
1745 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1746 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1747 de->media_advertise = de->media_supported;
1748
1749 for (i = 0; i < DE_MAX_MEDIA; i++) {
1750 switch (i) {
1751 case DE_MEDIA_AUI:
1752 case DE_MEDIA_TP:
1753 case DE_MEDIA_TP_FD:
1754 de->media[i].type = i;
1755 de->media[i].csr13 = t21040_csr13[i];
1756 de->media[i].csr14 = t21040_csr14[i];
1757 de->media[i].csr15 = t21040_csr15[i];
1758 break;
1759 default:
1760 de->media[i].type = DE_MEDIA_INVALID;
1761 break;
1762 }
1763 }
1764}
1765
1766/* Note: this routine returns extra data bits for size detection. */
4a1d2d81 1767static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1da177e4
LT
1768{
1769 int i;
1770 unsigned retval = 0;
1771 void __iomem *ee_addr = regs + ROMCmd;
1772 int read_cmd = location | (EE_READ_CMD << addr_len);
1773
1774 writel(EE_ENB & ~EE_CS, ee_addr);
1775 writel(EE_ENB, ee_addr);
1776
1777 /* Shift the read command bits out. */
1778 for (i = 4 + addr_len; i >= 0; i--) {
1779 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1780 writel(EE_ENB | dataval, ee_addr);
1781 readl(ee_addr);
1782 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1783 readl(ee_addr);
1784 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1785 }
1786 writel(EE_ENB, ee_addr);
1787 readl(ee_addr);
1788
1789 for (i = 16; i > 0; i--) {
1790 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1791 readl(ee_addr);
1792 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1793 writel(EE_ENB, ee_addr);
1794 readl(ee_addr);
1795 }
1796
1797 /* Terminate the EEPROM access. */
1798 writel(EE_ENB & ~EE_CS, ee_addr);
1799 return retval;
1800}
1801
4c44fd00 1802static void __devinit de21041_get_srom_info (struct de_private *de)
1da177e4
LT
1803{
1804 unsigned i, sa_offset = 0, ofs;
1805 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1806 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1807 struct de_srom_info_leaf *il;
1808 void *bufp;
1809
1810 /* download entire eeprom */
1811 for (i = 0; i < DE_EEPROM_WORDS; i++)
c559a5bc
AV
1812 ((__le16 *)ee_data)[i] =
1813 cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1da177e4
LT
1814
1815 /* DEC now has a specification but early board makers
1816 just put the address in the first EEPROM locations. */
1817 /* This does memcmp(eedata, eedata+16, 8) */
bc053d45
RB
1818
1819#ifndef CONFIG_MIPS_COBALT
1820
1da177e4
LT
1821 for (i = 0; i < 8; i ++)
1822 if (ee_data[i] != ee_data[16+i])
1823 sa_offset = 20;
1824
bc053d45
RB
1825#endif
1826
1da177e4
LT
1827 /* store MAC address */
1828 for (i = 0; i < 6; i ++)
1829 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1830
1831 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1832 ofs = ee_data[SROMC0InfoLeaf];
1833 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1834 goto bad_srom;
1835
1836 /* get pointer to info leaf */
1837 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1838
1839 /* paranoia checks */
1840 if (il->n_blocks == 0)
1841 goto bad_srom;
1842 if ((sizeof(ee_data) - ofs) <
1843 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1844 goto bad_srom;
1845
1846 /* get default media type */
445854f4 1847 switch (get_unaligned(&il->default_media)) {
1da177e4
LT
1848 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1849 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1850 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1851 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1852 }
f3b197ac 1853
1da177e4 1854 if (netif_msg_probe(de))
f639dc7d
JP
1855 pr_info("de%d: SROM leaf offset %u, default media %s\n",
1856 de->board_idx, ofs, media_name[de->media_type]);
1da177e4
LT
1857
1858 /* init SIA register values to defaults */
1859 for (i = 0; i < DE_MAX_MEDIA; i++) {
1860 de->media[i].type = DE_MEDIA_INVALID;
1861 de->media[i].csr13 = 0xffff;
1862 de->media[i].csr14 = 0xffff;
1863 de->media[i].csr15 = 0xffff;
1864 }
1865
1866 /* parse media blocks to see what medias are supported,
1867 * and if any custom CSR values are provided
1868 */
1869 bufp = ((void *)il) + sizeof(*il);
1870 for (i = 0; i < il->n_blocks; i++) {
1871 struct de_srom_media_block *ib = bufp;
1872 unsigned idx;
1873
1874 /* index based on media type in media block */
1875 switch(ib->opts & MediaBlockMask) {
1876 case 0: /* 10baseT */
1877 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1878 | SUPPORTED_Autoneg;
1879 idx = DE_MEDIA_TP;
1880 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1881 break;
1882 case 1: /* BNC */
1883 de->media_supported |= SUPPORTED_BNC;
1884 idx = DE_MEDIA_BNC;
1885 break;
1886 case 2: /* AUI */
1887 de->media_supported |= SUPPORTED_AUI;
1888 idx = DE_MEDIA_AUI;
1889 break;
1890 case 4: /* 10baseT-FD */
1891 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1892 | SUPPORTED_Autoneg;
1893 idx = DE_MEDIA_TP_FD;
1894 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1895 break;
1896 default:
1897 goto bad_srom;
1898 }
1899
1900 de->media[idx].type = idx;
1901
1902 if (netif_msg_probe(de))
f639dc7d
JP
1903 pr_info("de%d: media block #%u: %s",
1904 de->board_idx, i,
1905 media_name[de->media[idx].type]);
1da177e4
LT
1906
1907 bufp += sizeof (ib->opts);
1908
1909 if (ib->opts & MediaCustomCSRs) {
445854f4
HH
1910 de->media[idx].csr13 = get_unaligned(&ib->csr13);
1911 de->media[idx].csr14 = get_unaligned(&ib->csr14);
1912 de->media[idx].csr15 = get_unaligned(&ib->csr15);
1da177e4
LT
1913 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1914 sizeof(ib->csr15);
1915
1916 if (netif_msg_probe(de))
f639dc7d
JP
1917 pr_cont(" (%x,%x,%x)\n",
1918 de->media[idx].csr13,
1919 de->media[idx].csr14,
1920 de->media[idx].csr15);
f3b197ac 1921
1da177e4 1922 } else if (netif_msg_probe(de))
f639dc7d 1923 pr_cont("\n");
1da177e4
LT
1924
1925 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1926 break;
1927 }
1928
1929 de->media_advertise = de->media_supported;
1930
1931fill_defaults:
1932 /* fill in defaults, for cases where custom CSRs not used */
1933 for (i = 0; i < DE_MAX_MEDIA; i++) {
1934 if (de->media[i].csr13 == 0xffff)
1935 de->media[i].csr13 = t21041_csr13[i];
e0f9c4f3
OZ
1936 if (de->media[i].csr14 == 0xffff) {
1937 /* autonegotiation is broken at least on some chip
1938 revisions - rev. 0x21 works, 0x11 does not */
1939 if (de->pdev->revision < 0x20)
1940 de->media[i].csr14 = t21041_csr14_brk[i];
1941 else
1942 de->media[i].csr14 = t21041_csr14[i];
1943 }
1da177e4
LT
1944 if (de->media[i].csr15 == 0xffff)
1945 de->media[i].csr15 = t21041_csr15[i];
1946 }
1947
c3a9392e 1948 de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1da177e4
LT
1949
1950 return;
1951
1952bad_srom:
1953 /* for error cases, it's ok to assume we support all these */
1954 for (i = 0; i < DE_MAX_MEDIA; i++)
1955 de->media[i].type = i;
1956 de->media_supported =
1957 SUPPORTED_10baseT_Half |
1958 SUPPORTED_10baseT_Full |
1959 SUPPORTED_Autoneg |
1960 SUPPORTED_TP |
1961 SUPPORTED_AUI |
1962 SUPPORTED_BNC;
1963 goto fill_defaults;
1964}
1965
90d8743d
SH
1966static const struct net_device_ops de_netdev_ops = {
1967 .ndo_open = de_open,
1968 .ndo_stop = de_close,
1969 .ndo_set_multicast_list = de_set_rx_mode,
1970 .ndo_start_xmit = de_start_xmit,
1971 .ndo_get_stats = de_get_stats,
1972 .ndo_tx_timeout = de_tx_timeout,
1973 .ndo_change_mtu = eth_change_mtu,
1974 .ndo_set_mac_address = eth_mac_addr,
1975 .ndo_validate_addr = eth_validate_addr,
1976};
1977
4a1d2d81 1978static int __devinit de_init_one (struct pci_dev *pdev,
1da177e4
LT
1979 const struct pci_device_id *ent)
1980{
1981 struct net_device *dev;
1982 struct de_private *de;
1983 int rc;
1984 void __iomem *regs;
afc7097f 1985 unsigned long pciaddr;
1da177e4
LT
1986 static int board_idx = -1;
1987
1988 board_idx++;
1989
1990#ifndef MODULE
1991 if (board_idx == 0)
1992 printk("%s", version);
1993#endif
1994
1995 /* allocate a new ethernet device structure, and fill in defaults */
1996 dev = alloc_etherdev(sizeof(struct de_private));
1997 if (!dev)
1998 return -ENOMEM;
1999
90d8743d 2000 dev->netdev_ops = &de_netdev_ops;
1da177e4 2001 SET_NETDEV_DEV(dev, &pdev->dev);
1da177e4 2002 dev->ethtool_ops = &de_ethtool_ops;
1da177e4
LT
2003 dev->watchdog_timeo = TX_TIMEOUT;
2004
8f15ea42 2005 de = netdev_priv(dev);
1da177e4
LT
2006 de->de21040 = ent->driver_data == 0 ? 1 : 0;
2007 de->pdev = pdev;
2008 de->dev = dev;
2009 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
2010 de->board_idx = board_idx;
2011 spin_lock_init (&de->lock);
2012 init_timer(&de->media_timer);
2013 if (de->de21040)
2014 de->media_timer.function = de21040_media_timer;
2015 else
2016 de->media_timer.function = de21041_media_timer;
2017 de->media_timer.data = (unsigned long) de;
2018
2019 netif_carrier_off(dev);
2020 netif_stop_queue(dev);
2021
2022 /* wake up device, assign resources */
2023 rc = pci_enable_device(pdev);
2024 if (rc)
2025 goto err_out_free;
2026
2027 /* reserve PCI resources to ensure driver atomicity */
2028 rc = pci_request_regions(pdev, DRV_NAME);
2029 if (rc)
2030 goto err_out_disable;
2031
2032 /* check for invalid IRQ value */
2033 if (pdev->irq < 2) {
2034 rc = -EIO;
f639dc7d 2035 pr_err(PFX "invalid irq (%d) for pci dev %s\n",
1da177e4
LT
2036 pdev->irq, pci_name(pdev));
2037 goto err_out_res;
2038 }
2039
2040 dev->irq = pdev->irq;
2041
2042 /* obtain and check validity of PCI I/O address */
2043 pciaddr = pci_resource_start(pdev, 1);
2044 if (!pciaddr) {
2045 rc = -EIO;
f639dc7d 2046 pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
1da177e4
LT
2047 goto err_out_res;
2048 }
2049 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2050 rc = -EIO;
f639dc7d
JP
2051 pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
2052 (unsigned long long)pci_resource_len(pdev, 1),
2053 pci_name(pdev));
1da177e4
LT
2054 goto err_out_res;
2055 }
2056
2057 /* remap CSR registers */
2058 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2059 if (!regs) {
2060 rc = -EIO;
f639dc7d
JP
2061 pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2062 (unsigned long long)pci_resource_len(pdev, 1),
2063 pciaddr, pci_name(pdev));
1da177e4
LT
2064 goto err_out_res;
2065 }
2066 dev->base_addr = (unsigned long) regs;
2067 de->regs = regs;
2068
2069 de_adapter_wake(de);
2070
2071 /* make sure hardware is not running */
2072 rc = de_reset_mac(de);
2073 if (rc) {
f639dc7d 2074 pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
1da177e4
LT
2075 goto err_out_iomap;
2076 }
2077
2078 /* get MAC address, initialize default media type and
2079 * get list of supported media
2080 */
2081 if (de->de21040) {
2082 de21040_get_mac_address(de);
2083 de21040_get_media_info(de);
2084 } else {
2085 de21041_get_srom_info(de);
2086 }
2087
2088 /* register new network interface with kernel */
2089 rc = register_netdev(dev);
2090 if (rc)
2091 goto err_out_iomap;
2092
2093 /* print info about board and interface just registered */
f639dc7d
JP
2094 dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
2095 de->de21040 ? "21040" : "21041",
2096 dev->base_addr,
2097 dev->dev_addr,
2098 dev->irq);
1da177e4
LT
2099
2100 pci_set_drvdata(pdev, dev);
2101
2102 /* enable busmastering */
2103 pci_set_master(pdev);
2104
2105 /* put adapter to sleep */
2106 de_adapter_sleep(de);
2107
2108 return 0;
2109
2110err_out_iomap:
b4558ea9 2111 kfree(de->ee_data);
1da177e4
LT
2112 iounmap(regs);
2113err_out_res:
2114 pci_release_regions(pdev);
2115err_out_disable:
2116 pci_disable_device(pdev);
2117err_out_free:
2118 free_netdev(dev);
2119 return rc;
2120}
2121
4a1d2d81 2122static void __devexit de_remove_one (struct pci_dev *pdev)
1da177e4
LT
2123{
2124 struct net_device *dev = pci_get_drvdata(pdev);
8f15ea42 2125 struct de_private *de = netdev_priv(dev);
1da177e4 2126
7e0b58f3 2127 BUG_ON(!dev);
1da177e4 2128 unregister_netdev(dev);
b4558ea9 2129 kfree(de->ee_data);
1da177e4
LT
2130 iounmap(de->regs);
2131 pci_release_regions(pdev);
2132 pci_disable_device(pdev);
2133 pci_set_drvdata(pdev, NULL);
2134 free_netdev(dev);
2135}
2136
2137#ifdef CONFIG_PM
2138
05adc3b7 2139static int de_suspend (struct pci_dev *pdev, pm_message_t state)
1da177e4
LT
2140{
2141 struct net_device *dev = pci_get_drvdata (pdev);
8f15ea42 2142 struct de_private *de = netdev_priv(dev);
1da177e4
LT
2143
2144 rtnl_lock();
2145 if (netif_running (dev)) {
2146 del_timer_sync(&de->media_timer);
2147
2148 disable_irq(dev->irq);
2149 spin_lock_irq(&de->lock);
2150
2151 de_stop_hw(de);
2152 netif_stop_queue(dev);
2153 netif_device_detach(dev);
2154 netif_carrier_off(dev);
2155
2156 spin_unlock_irq(&de->lock);
2157 enable_irq(dev->irq);
f3b197ac 2158
1da177e4
LT
2159 /* Update the error counts. */
2160 __de_get_stats(de);
2161
2162 synchronize_irq(dev->irq);
2163 de_clean_rings(de);
2164
2165 de_adapter_sleep(de);
2166 pci_disable_device(pdev);
2167 } else {
2168 netif_device_detach(dev);
2169 }
2170 rtnl_unlock();
2171 return 0;
2172}
2173
2174static int de_resume (struct pci_dev *pdev)
2175{
2176 struct net_device *dev = pci_get_drvdata (pdev);
8f15ea42 2177 struct de_private *de = netdev_priv(dev);
9f486ae1 2178 int retval = 0;
1da177e4
LT
2179
2180 rtnl_lock();
2181 if (netif_device_present(dev))
2182 goto out;
9f486ae1
VH
2183 if (!netif_running(dev))
2184 goto out_attach;
2185 if ((retval = pci_enable_device(pdev))) {
f639dc7d 2186 dev_err(&dev->dev, "pci_enable_device failed in resume\n");
9f486ae1 2187 goto out;
1da177e4 2188 }
b0255a02
OZ
2189 pci_set_master(pdev);
2190 de_init_rings(de);
9f486ae1
VH
2191 de_init_hw(de);
2192out_attach:
2193 netif_device_attach(dev);
1da177e4
LT
2194out:
2195 rtnl_unlock();
2196 return 0;
2197}
2198
2199#endif /* CONFIG_PM */
2200
2201static struct pci_driver de_driver = {
2202 .name = DRV_NAME,
2203 .id_table = de_pci_tbl,
2204 .probe = de_init_one,
4a1d2d81 2205 .remove = __devexit_p(de_remove_one),
1da177e4
LT
2206#ifdef CONFIG_PM
2207 .suspend = de_suspend,
2208 .resume = de_resume,
2209#endif
2210};
2211
2212static int __init de_init (void)
2213{
2214#ifdef MODULE
2215 printk("%s", version);
2216#endif
29917620 2217 return pci_register_driver(&de_driver);
1da177e4
LT
2218}
2219
2220static void __exit de_exit (void)
2221{
2222 pci_unregister_driver (&de_driver);
2223}
2224
2225module_init(de_init);
2226module_exit(de_exit);