]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tulip/dmfe.c
dmfe: trivial/spelling fixes
[net-next-2.6.git] / drivers / net / tulip / dmfe.c
CommitLineData
1da177e4
LT
1/*
2 A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3 ethernet driver for Linux.
4 Copyright (C) 1997 Sten Wang
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 DAVICOM Web-Site: www.davicom.com.tw
17
18 Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19 Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
20
21 (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
22
23 Marcelo Tosatti <marcelo@conectiva.com.br> :
24 Made it compile in 2.3 (device to net_device)
25
26 Alan Cox <alan@redhat.com> :
27 Cleaned up for kernel merge.
28 Removed the back compatibility support
29 Reformatted, fixing spelling etc as I went
30 Removed IRQ 0-15 assumption
31
32 Jeff Garzik <jgarzik@pobox.com> :
33 Updated to use new PCI driver API.
34 Resource usage cleanups.
35 Report driver version to user.
36
37 Tobias Ringstrom <tori@unhappy.mine.nu> :
38 Cleaned up and added SMP safety. Thanks go to Jeff Garzik,
39 Andrew Morton and Frank Davis for the SMP safety fixes.
40
41 Vojtech Pavlik <vojtech@suse.cz> :
42 Cleaned up pointer arithmetics.
43 Fixed a lot of 64bit issues.
44 Cleaned up printk()s a bit.
45 Fixed some obvious big endian problems.
46
47 Tobias Ringstrom <tori@unhappy.mine.nu> :
48 Use time_after for jiffies calculation. Added ethtool
49 support. Updated PCI resource allocation. Do not
50 forget to unmap PCI mapped skbs.
51
52 Alan Cox <alan@redhat.com>
f3b197ac 53 Added new PCI identifiers provided by Clear Zhang at ALi
1da177e4
LT
54 for their 1563 ethernet device.
55
56 TODO
57
58 Implement pci_driver::suspend() and pci_driver::resume()
59 power management methods.
60
61 Check on 64 bit boxes.
62 Check and fix on big endian boxes.
63
64 Test and make sure PCI latency is now correct for all cases.
65*/
66
67#define DRV_NAME "dmfe"
68#define DRV_VERSION "1.36.4"
69#define DRV_RELDATE "2002-01-17"
70
71#include <linux/module.h>
72#include <linux/kernel.h>
73#include <linux/string.h>
74#include <linux/timer.h>
75#include <linux/ptrace.h>
76#include <linux/errno.h>
77#include <linux/ioport.h>
78#include <linux/slab.h>
79#include <linux/interrupt.h>
80#include <linux/pci.h>
cb199d42 81#include <linux/dma-mapping.h>
1da177e4
LT
82#include <linux/init.h>
83#include <linux/netdevice.h>
84#include <linux/etherdevice.h>
85#include <linux/ethtool.h>
86#include <linux/skbuff.h>
87#include <linux/delay.h>
88#include <linux/spinlock.h>
89#include <linux/crc32.h>
90#include <linux/bitops.h>
91
92#include <asm/processor.h>
93#include <asm/io.h>
94#include <asm/dma.h>
95#include <asm/uaccess.h>
96#include <asm/irq.h>
97
98
99/* Board/System/Debug information/definition ---------------- */
100#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
101#define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */
102#define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */
103#define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */
104
105#define DM9102_IO_SIZE 0x80
106#define DM9102A_IO_SIZE 0x100
107#define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */
108#define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */
109#define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */
110#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
111#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
112#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
113#define TX_BUF_ALLOC 0x600
114#define RX_ALLOC_SIZE 0x620
115#define DM910X_RESET 1
116#define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */
117#define CR6_DEFAULT 0x00080000 /* HD */
118#define CR7_DEFAULT 0x180c1
119#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
120#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
121#define MAX_PACKET_SIZE 1514
122#define DMFE_MAX_MULTICAST 14
123#define RX_COPY_SIZE 100
124#define MAX_CHECK_PACKET 0x8000
125#define DM9801_NOISE_FLOOR 8
126#define DM9802_NOISE_FLOOR 5
127
128#define DMFE_10MHF 0
129#define DMFE_100MHF 1
130#define DMFE_10MFD 4
131#define DMFE_100MFD 5
132#define DMFE_AUTO 8
133#define DMFE_1M_HPNA 0x10
134
135#define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */
136#define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */
137#define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */
138#define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */
139#define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */
140#define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */
141
142#define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
143#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
144#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
145
f67ba792
ML
146#define DMFE_DBUG(dbug_now, msg, value) \
147 do { \
148 if (dmfe_debug || (dbug_now)) \
149 printk(KERN_ERR DRV_NAME ": %s %lx\n",\
150 (msg), (long) (value)); \
151 } while (0)
1da177e4 152
f67ba792
ML
153#define SHOW_MEDIA_TYPE(mode) \
154 printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
155 (mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
1da177e4
LT
156
157
158/* CR9 definition: SROM/MII */
159#define CR9_SROM_READ 0x4800
160#define CR9_SRCS 0x1
161#define CR9_SRCLK 0x2
162#define CR9_CRDOUT 0x8
163#define SROM_DATA_0 0x0
164#define SROM_DATA_1 0x4
165#define PHY_DATA_1 0x20000
166#define PHY_DATA_0 0x00000
167#define MDCLKH 0x10000
168
169#define PHY_POWER_DOWN 0x800
170
171#define SROM_V41_CODE 0x14
172
f67ba792
ML
173#define SROM_CLK_WRITE(data, ioaddr) \
174 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
175 udelay(5); \
176 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
177 udelay(5); \
178 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
179 udelay(5);
180
181#define __CHK_IO_SIZE(pci_id, dev_rev) \
182 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \
183 DM9102A_IO_SIZE: DM9102_IO_SIZE)
1da177e4 184
f67ba792
ML
185#define CHK_IO_SIZE(pci_dev, dev_rev) \
186 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev))
1da177e4
LT
187
188/* Sten Check */
189#define DEVICE net_device
190
191/* Structure/enum declaration ------------------------------- */
192struct tx_desc {
193 u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
194 char *tx_buf_ptr; /* Data for us */
195 struct tx_desc *next_tx_desc;
196} __attribute__(( aligned(32) ));
197
198struct rx_desc {
199 u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
200 struct sk_buff *rx_skb_ptr; /* Data for us */
201 struct rx_desc *next_rx_desc;
202} __attribute__(( aligned(32) ));
203
204struct dmfe_board_info {
205 u32 chip_id; /* Chip vendor/Device ID */
206 u32 chip_revision; /* Chip revision */
ead9bffb 207 struct DEVICE *next_dev; /* next device */
1da177e4
LT
208 struct pci_dev *pdev; /* PCI device */
209 spinlock_t lock;
210
211 long ioaddr; /* I/O base address */
212 u32 cr0_data;
213 u32 cr5_data;
214 u32 cr6_data;
215 u32 cr7_data;
216 u32 cr15_data;
217
218 /* pointer for memory physical address */
219 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
220 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
221 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
222 dma_addr_t first_tx_desc_dma;
223 dma_addr_t first_rx_desc_dma;
224
225 /* descriptor pointer */
226 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
227 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
228 unsigned char *desc_pool_ptr; /* descriptor pool memory */
229 struct tx_desc *first_tx_desc;
230 struct tx_desc *tx_insert_ptr;
231 struct tx_desc *tx_remove_ptr;
232 struct rx_desc *first_rx_desc;
233 struct rx_desc *rx_insert_ptr;
234 struct rx_desc *rx_ready_ptr; /* packet come pointer */
235 unsigned long tx_packet_cnt; /* transmitted packet count */
236 unsigned long tx_queue_cnt; /* wait to send packet count */
237 unsigned long rx_avail_cnt; /* available rx descriptor count */
238 unsigned long interval_rx_cnt; /* rx packet count a callback time */
239
240 u16 HPNA_command; /* For HPNA register 16 */
241 u16 HPNA_timer; /* For HPNA remote device check */
242 u16 dbug_cnt;
243 u16 NIC_capability; /* NIC media capability */
244 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
245
246 u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */
247 u8 chip_type; /* Keep DM9102A chip type */
248 u8 media_mode; /* user specify media mode */
249 u8 op_mode; /* real work media mode */
250 u8 phy_addr;
251 u8 link_failed; /* Ever link failed */
252 u8 wait_reset; /* Hardware failed, need to reset */
253 u8 dm910x_chk_mode; /* Operating mode check */
254 u8 first_in_callback; /* Flag to record state */
255 struct timer_list timer;
256
257 /* System defined statistic counter */
258 struct net_device_stats stats;
259
260 /* Driver defined statistic counter */
261 unsigned long tx_fifo_underrun;
262 unsigned long tx_loss_carrier;
263 unsigned long tx_no_carrier;
264 unsigned long tx_late_collision;
265 unsigned long tx_excessive_collision;
266 unsigned long tx_jabber_timeout;
267 unsigned long reset_count;
268 unsigned long reset_cr8;
269 unsigned long reset_fatal;
270 unsigned long reset_TXtimeout;
271
272 /* NIC SROM data */
273 unsigned char srom[128];
274};
275
276enum dmfe_offsets {
277 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
278 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
279 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
280 DCR15 = 0x78
281};
282
283enum dmfe_CR6_bits {
284 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
285 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
286 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
287};
288
289/* Global variable declaration ----------------------------- */
290static int __devinitdata printed_version;
291static char version[] __devinitdata =
292 KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
293 DRV_VERSION " (" DRV_RELDATE ")\n";
294
295static int dmfe_debug;
296static unsigned char dmfe_media_mode = DMFE_AUTO;
297static u32 dmfe_cr6_user_set;
298
299/* For module input parameter */
300static int debug;
301static u32 cr6set;
302static unsigned char mode = 8;
303static u8 chkmode = 1;
304static u8 HPNA_mode; /* Default: Low Power/High Speed */
305static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */
306static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */
307static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */
308static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
309 4: TX pause packet */
310
311
312/* function declaration ------------------------------------- */
313static int dmfe_open(struct DEVICE *);
314static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
315static int dmfe_stop(struct DEVICE *);
316static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
317static void dmfe_set_filter_mode(struct DEVICE *);
7282d491 318static const struct ethtool_ops netdev_ethtool_ops;
1da177e4 319static u16 read_srom_word(long ,int);
7d12e780 320static irqreturn_t dmfe_interrupt(int , void *);
1da177e4
LT
321#ifdef CONFIG_NET_POLL_CONTROLLER
322static void poll_dmfe (struct net_device *dev);
323#endif
324static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
325static void allocate_rx_buffer(struct dmfe_board_info *);
326static void update_cr6(u32, unsigned long);
327static void send_filter_frame(struct DEVICE * ,int);
328static void dm9132_id_table(struct DEVICE * ,int);
329static u16 phy_read(unsigned long, u8, u8, u32);
330static void phy_write(unsigned long, u8, u8, u16, u32);
331static void phy_write_1bit(unsigned long, u32);
332static u16 phy_read_1bit(unsigned long);
333static u8 dmfe_sense_speed(struct dmfe_board_info *);
334static void dmfe_process_mode(struct dmfe_board_info *);
335static void dmfe_timer(unsigned long);
336static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
337static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
338static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
339static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
340static void dmfe_dynamic_reset(struct DEVICE *);
341static void dmfe_free_rxbuffer(struct dmfe_board_info *);
342static void dmfe_init_dm910x(struct DEVICE *);
343static void dmfe_parse_srom(struct dmfe_board_info *);
344static void dmfe_program_DM9801(struct dmfe_board_info *, int);
345static void dmfe_program_DM9802(struct dmfe_board_info *);
346static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
347static void dmfe_set_phyxcer(struct dmfe_board_info *);
348
f67ba792 349/* DM910X network board routine ---------------------------- */
1da177e4
LT
350
351/*
352 * Search DM910X board ,allocate space and register it
353 */
354
355static int __devinit dmfe_init_one (struct pci_dev *pdev,
356 const struct pci_device_id *ent)
357{
358 struct dmfe_board_info *db; /* board information structure */
359 struct net_device *dev;
360 u32 dev_rev, pci_pmr;
361 int i, err;
362
363 DMFE_DBUG(0, "dmfe_init_one()", 0);
364
365 if (!printed_version++)
366 printk(version);
367
368 /* Init network device */
369 dev = alloc_etherdev(sizeof(*db));
370 if (dev == NULL)
371 return -ENOMEM;
372 SET_MODULE_OWNER(dev);
373 SET_NETDEV_DEV(dev, &pdev->dev);
374
cb199d42 375 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
f67ba792
ML
376 printk(KERN_WARNING DRV_NAME
377 ": 32-bit PCI DMA not available.\n");
1da177e4
LT
378 err = -ENODEV;
379 goto err_out_free;
380 }
381
382 /* Enable Master/IO access, Disable memory access */
383 err = pci_enable_device(pdev);
384 if (err)
385 goto err_out_free;
386
387 if (!pci_resource_start(pdev, 0)) {
388 printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
389 err = -ENODEV;
390 goto err_out_disable;
391 }
392
393 /* Read Chip revision */
394 pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
395
396 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) {
397 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
398 err = -ENODEV;
399 goto err_out_disable;
400 }
401
402#if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */
403
404 /* Set Latency Timer 80h */
405 /* FIXME: setting values > 32 breaks some SiS 559x stuff.
406 Need a PCI quirk.. */
407
408 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
409#endif
410
411 if (pci_request_regions(pdev, DRV_NAME)) {
412 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
413 err = -ENODEV;
414 goto err_out_disable;
415 }
416
417 /* Init system & device */
418 db = netdev_priv(dev);
419
420 /* Allocate Tx/Rx descriptor memory */
f67ba792
ML
421 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
422 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
423
424 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
425 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
1da177e4
LT
426
427 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
428 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
429 db->buf_pool_start = db->buf_pool_ptr;
430 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
431
432 db->chip_id = ent->driver_data;
433 db->ioaddr = pci_resource_start(pdev, 0);
434 db->chip_revision = dev_rev;
435
436 db->pdev = pdev;
437
438 dev->base_addr = db->ioaddr;
439 dev->irq = pdev->irq;
440 pci_set_drvdata(pdev, dev);
441 dev->open = &dmfe_open;
442 dev->hard_start_xmit = &dmfe_start_xmit;
443 dev->stop = &dmfe_stop;
444 dev->get_stats = &dmfe_get_stats;
445 dev->set_multicast_list = &dmfe_set_filter_mode;
446#ifdef CONFIG_NET_POLL_CONTROLLER
447 dev->poll_controller = &poll_dmfe;
448#endif
449 dev->ethtool_ops = &netdev_ethtool_ops;
450 spin_lock_init(&db->lock);
451
452 pci_read_config_dword(pdev, 0x50, &pci_pmr);
453 pci_pmr &= 0x70000;
454 if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) )
455 db->chip_type = 1; /* DM9102A E3 */
456 else
457 db->chip_type = 0;
458
459 /* read 64 word srom data */
460 for (i = 0; i < 64; i++)
f67ba792
ML
461 ((u16 *) db->srom)[i] =
462 cpu_to_le16(read_srom_word(db->ioaddr, i));
1da177e4
LT
463
464 /* Set Node address */
465 for (i = 0; i < 6; i++)
466 dev->dev_addr[i] = db->srom[20 + i];
467
468 err = register_netdev (dev);
469 if (err)
470 goto err_out_res;
471
472 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s,",
473 dev->name,
474 ent->driver_data >> 16,
475 pci_name(pdev));
476 for (i = 0; i < 6; i++)
477 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
478 printk(", irq %d.\n", dev->irq);
479
480 pci_set_master(pdev);
481
482 return 0;
483
484err_out_res:
485 pci_release_regions(pdev);
486err_out_disable:
487 pci_disable_device(pdev);
488err_out_free:
489 pci_set_drvdata(pdev, NULL);
490 free_netdev(dev);
491
492 return err;
493}
494
495
496static void __devexit dmfe_remove_one (struct pci_dev *pdev)
497{
498 struct net_device *dev = pci_get_drvdata(pdev);
499 struct dmfe_board_info *db = netdev_priv(dev);
500
501 DMFE_DBUG(0, "dmfe_remove_one()", 0);
502
503 if (dev) {
504 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
505 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
506 db->desc_pool_dma_ptr);
507 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
508 db->buf_pool_ptr, db->buf_pool_dma_ptr);
509 unregister_netdev(dev);
510 pci_release_regions(pdev);
511 free_netdev(dev); /* free board information */
512 pci_set_drvdata(pdev, NULL);
513 }
514
515 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
516}
517
518
519/*
520 * Open the interface.
521 * The interface is opened whenever "ifconfig" actives it.
522 */
523
524static int dmfe_open(struct DEVICE *dev)
525{
526 int ret;
527 struct dmfe_board_info *db = netdev_priv(dev);
528
529 DMFE_DBUG(0, "dmfe_open", 0);
530
f67ba792
ML
531 ret = request_irq(dev->irq, &dmfe_interrupt,
532 IRQF_SHARED, dev->name, dev);
1da177e4
LT
533 if (ret)
534 return ret;
535
536 /* system variable init */
537 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
538 db->tx_packet_cnt = 0;
539 db->tx_queue_cnt = 0;
540 db->rx_avail_cnt = 0;
541 db->link_failed = 1;
542 db->wait_reset = 0;
543
544 db->first_in_callback = 0;
545 db->NIC_capability = 0xf; /* All capability*/
546 db->PHY_reg4 = 0x1e0;
547
548 /* CR6 operation mode decision */
549 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
550 (db->chip_revision >= 0x02000030) ) {
551 db->cr6_data |= DMFE_TXTH_256;
552 db->cr0_data = CR0_DEFAULT;
553 db->dm910x_chk_mode=4; /* Enter the normal mode */
554 } else {
555 db->cr6_data |= CR6_SFT; /* Store & Forward mode */
556 db->cr0_data = 0;
557 db->dm910x_chk_mode = 1; /* Enter the check mode */
558 }
559
560 /* Initilize DM910X board */
561 dmfe_init_dm910x(dev);
562
563 /* Active System Interface */
564 netif_wake_queue(dev);
565
566 /* set and active a timer process */
567 init_timer(&db->timer);
568 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
569 db->timer.data = (unsigned long)dev;
570 db->timer.function = &dmfe_timer;
571 add_timer(&db->timer);
572
573 return 0;
574}
575
576
577/* Initilize DM910X board
578 * Reset DM910X board
579 * Initilize TX/Rx descriptor chain structure
580 * Send the set-up frame
581 * Enable Tx/Rx machine
582 */
583
584static void dmfe_init_dm910x(struct DEVICE *dev)
585{
586 struct dmfe_board_info *db = netdev_priv(dev);
587 unsigned long ioaddr = db->ioaddr;
588
589 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
590
591 /* Reset DM910x MAC controller */
592 outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */
593 udelay(100);
594 outl(db->cr0_data, ioaddr + DCR0);
595 udelay(5);
596
597 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
598 db->phy_addr = 1;
599
600 /* Parser SROM and media mode */
601 dmfe_parse_srom(db);
602 db->media_mode = dmfe_media_mode;
603
604 /* RESET Phyxcer Chip by GPR port bit 7 */
605 outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */
606 if (db->chip_id == PCI_DM9009_ID) {
607 outl(0x80, ioaddr + DCR12); /* Issue RESET signal */
608 mdelay(300); /* Delay 300 ms */
609 }
610 outl(0x0, ioaddr + DCR12); /* Clear RESET signal */
611
612 /* Process Phyxcer Media Mode */
613 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
614 dmfe_set_phyxcer(db);
615
616 /* Media Mode Process */
617 if ( !(db->media_mode & DMFE_AUTO) )
618 db->op_mode = db->media_mode; /* Force Mode */
619
620 /* Initiliaze Transmit/Receive decriptor and CR3/4 */
621 dmfe_descriptor_init(db, ioaddr);
622
623 /* Init CR6 to program DM910x operation */
624 update_cr6(db->cr6_data, ioaddr);
625
626 /* Send setup frame */
627 if (db->chip_id == PCI_DM9132_ID)
628 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
629 else
630 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
631
632 /* Init CR7, interrupt active bit */
633 db->cr7_data = CR7_DEFAULT;
634 outl(db->cr7_data, ioaddr + DCR7);
635
636 /* Init CR15, Tx jabber and Rx watchdog timer */
637 outl(db->cr15_data, ioaddr + DCR15);
638
639 /* Enable DM910X Tx/Rx function */
640 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
641 update_cr6(db->cr6_data, ioaddr);
642}
643
644
645/*
646 * Hardware start transmission.
647 * Send a packet to media from the upper layer.
648 */
649
650static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
651{
652 struct dmfe_board_info *db = netdev_priv(dev);
653 struct tx_desc *txptr;
654 unsigned long flags;
655
656 DMFE_DBUG(0, "dmfe_start_xmit", 0);
657
658 /* Resource flag check */
659 netif_stop_queue(dev);
660
661 /* Too large packet check */
662 if (skb->len > MAX_PACKET_SIZE) {
663 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
664 dev_kfree_skb(skb);
665 return 0;
666 }
667
668 spin_lock_irqsave(&db->lock, flags);
669
670 /* No Tx resource check, it never happen nromally */
671 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
672 spin_unlock_irqrestore(&db->lock, flags);
f67ba792
ML
673 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
674 db->tx_queue_cnt);
1da177e4
LT
675 return 1;
676 }
677
678 /* Disable NIC interrupt */
679 outl(0, dev->base_addr + DCR7);
680
681 /* transmit this packet */
682 txptr = db->tx_insert_ptr;
683 memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
684 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
685
686 /* Point to next transmit free descriptor */
687 db->tx_insert_ptr = txptr->next_tx_desc;
688
689 /* Transmit Packet Process */
690 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
691 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
692 db->tx_packet_cnt++; /* Ready to send */
693 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
694 dev->trans_start = jiffies; /* saved time stamp */
695 } else {
696 db->tx_queue_cnt++; /* queue TX packet */
697 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
698 }
699
700 /* Tx resource check */
701 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
702 netif_wake_queue(dev);
703
704 /* Restore CR7 to enable interrupt */
705 spin_unlock_irqrestore(&db->lock, flags);
706 outl(db->cr7_data, dev->base_addr + DCR7);
707
708 /* free this SKB */
709 dev_kfree_skb(skb);
710
711 return 0;
712}
713
714
715/*
716 * Stop the interface.
717 * The interface is stopped when it is brought.
718 */
719
720static int dmfe_stop(struct DEVICE *dev)
721{
722 struct dmfe_board_info *db = netdev_priv(dev);
723 unsigned long ioaddr = dev->base_addr;
724
725 DMFE_DBUG(0, "dmfe_stop", 0);
726
727 /* disable system */
728 netif_stop_queue(dev);
729
730 /* deleted timer */
731 del_timer_sync(&db->timer);
732
733 /* Reset & stop DM910X board */
734 outl(DM910X_RESET, ioaddr + DCR0);
735 udelay(5);
736 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
737
738 /* free interrupt */
739 free_irq(dev->irq, dev);
740
741 /* free allocated rx buffer */
742 dmfe_free_rxbuffer(db);
743
744#if 0
745 /* show statistic counter */
f67ba792
ML
746 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx"
747 " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
1da177e4
LT
748 db->tx_fifo_underrun, db->tx_excessive_collision,
749 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
750 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
751 db->reset_fatal, db->reset_TXtimeout);
752#endif
753
754 return 0;
755}
756
757
758/*
759 * DM9102 insterrupt handler
760 * receive the packet to upper layer, free the transmitted packet
761 */
762
7d12e780 763static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
1da177e4
LT
764{
765 struct DEVICE *dev = dev_id;
766 struct dmfe_board_info *db = netdev_priv(dev);
767 unsigned long ioaddr = dev->base_addr;
768 unsigned long flags;
769
770 DMFE_DBUG(0, "dmfe_interrupt()", 0);
771
1da177e4
LT
772 spin_lock_irqsave(&db->lock, flags);
773
774 /* Got DM910X status */
775 db->cr5_data = inl(ioaddr + DCR5);
776 outl(db->cr5_data, ioaddr + DCR5);
777 if ( !(db->cr5_data & 0xc1) ) {
778 spin_unlock_irqrestore(&db->lock, flags);
779 return IRQ_HANDLED;
780 }
781
782 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
783 outl(0, ioaddr + DCR7);
784
785 /* Check system status */
786 if (db->cr5_data & 0x2000) {
787 /* system bus error happen */
788 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
789 db->reset_fatal++;
790 db->wait_reset = 1; /* Need to RESET */
791 spin_unlock_irqrestore(&db->lock, flags);
792 return IRQ_HANDLED;
793 }
794
795 /* Received the coming packet */
796 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
797 dmfe_rx_packet(dev, db);
798
799 /* reallocate rx descriptor buffer */
800 if (db->rx_avail_cnt<RX_DESC_CNT)
801 allocate_rx_buffer(db);
802
803 /* Free the transmitted descriptor */
804 if ( db->cr5_data & 0x01)
805 dmfe_free_tx_pkt(dev, db);
806
807 /* Mode Check */
808 if (db->dm910x_chk_mode & 0x2) {
809 db->dm910x_chk_mode = 0x4;
810 db->cr6_data |= 0x100;
811 update_cr6(db->cr6_data, db->ioaddr);
812 }
813
814 /* Restore CR7 to enable interrupt mask */
815 outl(db->cr7_data, ioaddr + DCR7);
816
817 spin_unlock_irqrestore(&db->lock, flags);
818 return IRQ_HANDLED;
819}
820
821
822#ifdef CONFIG_NET_POLL_CONTROLLER
823/*
824 * Polling 'interrupt' - used by things like netconsole to send skbs
825 * without having to re-enable interrupts. It's not called while
826 * the interrupt routine is executing.
827 */
828
829static void poll_dmfe (struct net_device *dev)
830{
831 /* disable_irq here is not very nice, but with the lockless
832 interrupt handler we have no other choice. */
833 disable_irq(dev->irq);
7d12e780 834 dmfe_interrupt (dev->irq, dev);
1da177e4
LT
835 enable_irq(dev->irq);
836}
837#endif
838
839/*
840 * Free TX resource after TX complete
841 */
842
843static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
844{
845 struct tx_desc *txptr;
846 unsigned long ioaddr = dev->base_addr;
847 u32 tdes0;
848
849 txptr = db->tx_remove_ptr;
850 while(db->tx_packet_cnt) {
851 tdes0 = le32_to_cpu(txptr->tdes0);
852 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
853 if (tdes0 & 0x80000000)
854 break;
855
856 /* A packet sent completed */
857 db->tx_packet_cnt--;
858 db->stats.tx_packets++;
859
860 /* Transmit statistic counter */
861 if ( tdes0 != 0x7fffffff ) {
862 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
863 db->stats.collisions += (tdes0 >> 3) & 0xf;
864 db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
865 if (tdes0 & TDES0_ERR_MASK) {
866 db->stats.tx_errors++;
867
868 if (tdes0 & 0x0002) { /* UnderRun */
869 db->tx_fifo_underrun++;
870 if ( !(db->cr6_data & CR6_SFT) ) {
871 db->cr6_data = db->cr6_data | CR6_SFT;
872 update_cr6(db->cr6_data, db->ioaddr);
873 }
874 }
875 if (tdes0 & 0x0100)
876 db->tx_excessive_collision++;
877 if (tdes0 & 0x0200)
878 db->tx_late_collision++;
879 if (tdes0 & 0x0400)
880 db->tx_no_carrier++;
881 if (tdes0 & 0x0800)
882 db->tx_loss_carrier++;
883 if (tdes0 & 0x4000)
884 db->tx_jabber_timeout++;
885 }
886 }
887
888 txptr = txptr->next_tx_desc;
889 }/* End of while */
890
891 /* Update TX remove pointer to next */
892 db->tx_remove_ptr = txptr;
893
894 /* Send the Tx packet in queue */
895 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
896 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
897 db->tx_packet_cnt++; /* Ready to send */
898 db->tx_queue_cnt--;
899 outl(0x1, ioaddr + DCR1); /* Issue Tx polling */
900 dev->trans_start = jiffies; /* saved time stamp */
901 }
902
903 /* Resource available check */
904 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
905 netif_wake_queue(dev); /* Active upper layer, send again */
906}
907
908
909/*
910 * Calculate the CRC valude of the Rx packet
911 * flag = 1 : return the reverse CRC (for the received packet CRC)
912 * 0 : return the normal CRC (for Hash Table index)
913 */
914
915static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
916{
917 u32 crc = crc32(~0, Data, Len);
918 if (flag) crc = ~crc;
919 return crc;
920}
921
922
923/*
924 * Receive the come packet and pass to upper layer
925 */
926
927static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
928{
929 struct rx_desc *rxptr;
930 struct sk_buff *skb;
931 int rxlen;
932 u32 rdes0;
933
934 rxptr = db->rx_ready_ptr;
935
936 while(db->rx_avail_cnt) {
937 rdes0 = le32_to_cpu(rxptr->rdes0);
938 if (rdes0 & 0x80000000) /* packet owner check */
939 break;
940
941 db->rx_avail_cnt--;
942 db->interval_rx_cnt++;
943
f67ba792
ML
944 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
945 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
946
1da177e4
LT
947 if ( (rdes0 & 0x300) != 0x300) {
948 /* A packet without First/Last flag */
949 /* reuse this SKB */
950 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
951 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
952 } else {
953 /* A packet with First/Last flag */
954 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
955
956 /* error summary bit check */
957 if (rdes0 & 0x8000) {
958 /* This is a error packet */
959 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
960 db->stats.rx_errors++;
961 if (rdes0 & 1)
962 db->stats.rx_fifo_errors++;
963 if (rdes0 & 2)
964 db->stats.rx_crc_errors++;
965 if (rdes0 & 0x80)
966 db->stats.rx_length_errors++;
967 }
968
969 if ( !(rdes0 & 0x8000) ||
970 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
971 skb = rxptr->rx_skb_ptr;
972
973 /* Received Packet CRC check need or not */
974 if ( (db->dm910x_chk_mode & 1) &&
689be439
DM
975 (cal_CRC(skb->data, rxlen, 1) !=
976 (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
1da177e4
LT
977 /* Found a error received packet */
978 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
979 db->dm910x_chk_mode = 3;
980 } else {
981 /* Good packet, send to upper layer */
982 /* Shorst packet used new SKB */
983 if ( (rxlen < RX_COPY_SIZE) &&
984 ( (skb = dev_alloc_skb(rxlen + 2) )
985 != NULL) ) {
986 /* size less than COPY_SIZE, allocate a rxlen SKB */
987 skb->dev = dev;
988 skb_reserve(skb, 2); /* 16byte align */
689be439 989 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen);
1da177e4
LT
990 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
991 } else {
992 skb->dev = dev;
993 skb_put(skb, rxlen);
994 }
995 skb->protocol = eth_type_trans(skb, dev);
996 netif_rx(skb);
997 dev->last_rx = jiffies;
998 db->stats.rx_packets++;
999 db->stats.rx_bytes += rxlen;
1000 }
1001 } else {
1002 /* Reuse SKB buffer when the packet is error */
1003 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1004 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1005 }
1006 }
1007
1008 rxptr = rxptr->next_rx_desc;
1009 }
1010
1011 db->rx_ready_ptr = rxptr;
1012}
1013
1014
1015/*
1016 * Get statistics from driver.
1017 */
1018
1019static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
1020{
1021 struct dmfe_board_info *db = netdev_priv(dev);
1022
1023 DMFE_DBUG(0, "dmfe_get_stats", 0);
1024 return &db->stats;
1025}
1026
1027
1028/*
1029 * Set DM910X multicast address
1030 */
1031
1032static void dmfe_set_filter_mode(struct DEVICE * dev)
1033{
1034 struct dmfe_board_info *db = netdev_priv(dev);
1035 unsigned long flags;
1036
1037 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1038 spin_lock_irqsave(&db->lock, flags);
1039
1040 if (dev->flags & IFF_PROMISC) {
1041 DMFE_DBUG(0, "Enable PROM Mode", 0);
1042 db->cr6_data |= CR6_PM | CR6_PBF;
1043 update_cr6(db->cr6_data, db->ioaddr);
1044 spin_unlock_irqrestore(&db->lock, flags);
1045 return;
1046 }
1047
1048 if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
1049 DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
1050 db->cr6_data &= ~(CR6_PM | CR6_PBF);
1051 db->cr6_data |= CR6_PAM;
1052 spin_unlock_irqrestore(&db->lock, flags);
1053 return;
1054 }
1055
1056 DMFE_DBUG(0, "Set multicast address", dev->mc_count);
1057 if (db->chip_id == PCI_DM9132_ID)
1058 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
1059 else
1060 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
1061 spin_unlock_irqrestore(&db->lock, flags);
1062}
1063
1064static void netdev_get_drvinfo(struct net_device *dev,
1065 struct ethtool_drvinfo *info)
1066{
1067 struct dmfe_board_info *np = netdev_priv(dev);
1068
1069 strcpy(info->driver, DRV_NAME);
1070 strcpy(info->version, DRV_VERSION);
1071 if (np->pdev)
1072 strcpy(info->bus_info, pci_name(np->pdev));
1073 else
1074 sprintf(info->bus_info, "EISA 0x%lx %d",
1075 dev->base_addr, dev->irq);
1076}
1077
7282d491 1078static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
1079 .get_drvinfo = netdev_get_drvinfo,
1080};
1081
1082/*
1083 * A periodic timer routine
1084 * Dynamic media sense, allocate Rx buffer...
1085 */
1086
1087static void dmfe_timer(unsigned long data)
1088{
1089 u32 tmp_cr8;
1090 unsigned char tmp_cr12;
1091 struct DEVICE *dev = (struct DEVICE *) data;
1092 struct dmfe_board_info *db = netdev_priv(dev);
1093 unsigned long flags;
1094
1095 DMFE_DBUG(0, "dmfe_timer()", 0);
1096 spin_lock_irqsave(&db->lock, flags);
1097
1098 /* Media mode process when Link OK before enter this route */
1099 if (db->first_in_callback == 0) {
1100 db->first_in_callback = 1;
1101 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1102 db->cr6_data &= ~0x40000;
1103 update_cr6(db->cr6_data, db->ioaddr);
f67ba792
ML
1104 phy_write(db->ioaddr,
1105 db->phy_addr, 0, 0x1000, db->chip_id);
1da177e4
LT
1106 db->cr6_data |= 0x40000;
1107 update_cr6(db->cr6_data, db->ioaddr);
1108 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1109 add_timer(&db->timer);
1110 spin_unlock_irqrestore(&db->lock, flags);
1111 return;
1112 }
1113 }
1114
1115
1116 /* Operating Mode Check */
1117 if ( (db->dm910x_chk_mode & 0x1) &&
1118 (db->stats.rx_packets > MAX_CHECK_PACKET) )
1119 db->dm910x_chk_mode = 0x4;
1120
1121 /* Dynamic reset DM910X : system error or transmit time-out */
1122 tmp_cr8 = inl(db->ioaddr + DCR8);
1123 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1124 db->reset_cr8++;
1125 db->wait_reset = 1;
1126 }
1127 db->interval_rx_cnt = 0;
1128
1129 /* TX polling kick monitor */
1130 if ( db->tx_packet_cnt &&
1131 time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
1132 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
1133
1134 /* TX Timeout */
1135 if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
1136 db->reset_TXtimeout++;
1137 db->wait_reset = 1;
1138 printk(KERN_WARNING "%s: Tx timeout - resetting\n",
1139 dev->name);
1140 }
1141 }
1142
1143 if (db->wait_reset) {
1144 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1145 db->reset_count++;
1146 dmfe_dynamic_reset(dev);
1147 db->first_in_callback = 0;
1148 db->timer.expires = DMFE_TIMER_WUT;
1149 add_timer(&db->timer);
1150 spin_unlock_irqrestore(&db->lock, flags);
1151 return;
1152 }
1153
1154 /* Link status check, Dynamic media type change */
1155 if (db->chip_id == PCI_DM9132_ID)
1156 tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */
1157 else
1158 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
1159
1160 if ( ((db->chip_id == PCI_DM9102_ID) &&
1161 (db->chip_revision == 0x02000030)) ||
1162 ((db->chip_id == PCI_DM9132_ID) &&
1163 (db->chip_revision == 0x02000010)) ) {
1164 /* DM9102A Chip */
1165 if (tmp_cr12 & 2)
1166 tmp_cr12 = 0x0; /* Link failed */
1167 else
1168 tmp_cr12 = 0x3; /* Link OK */
1169 }
1170
1171 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
1172 /* Link Failed */
1173 DMFE_DBUG(0, "Link Failed", tmp_cr12);
1174 db->link_failed = 1;
1175
1176 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1177 /* AUTO or force 1M Homerun/Longrun don't need */
1178 if ( !(db->media_mode & 0x38) )
f67ba792
ML
1179 phy_write(db->ioaddr, db->phy_addr,
1180 0, 0x1000, db->chip_id);
1da177e4
LT
1181
1182 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1183 if (db->media_mode & DMFE_AUTO) {
1184 /* 10/100M link failed, used 1M Home-Net */
1185 db->cr6_data|=0x00040000; /* bit18=1, MII */
1186 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1187 update_cr6(db->cr6_data, db->ioaddr);
1188 }
1189 } else
1190 if ((tmp_cr12 & 0x3) && db->link_failed) {
1191 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1192 db->link_failed = 0;
1193
1194 /* Auto Sense Speed */
1195 if ( (db->media_mode & DMFE_AUTO) &&
1196 dmfe_sense_speed(db) )
1197 db->link_failed = 1;
1198 dmfe_process_mode(db);
1199 /* SHOW_MEDIA_TYPE(db->op_mode); */
1200 }
1201
1202 /* HPNA remote command check */
1203 if (db->HPNA_command & 0xf00) {
1204 db->HPNA_timer--;
1205 if (!db->HPNA_timer)
1206 dmfe_HPNA_remote_cmd_chk(db);
1207 }
1208
1209 /* Timer active again */
1210 db->timer.expires = DMFE_TIMER_WUT;
1211 add_timer(&db->timer);
1212 spin_unlock_irqrestore(&db->lock, flags);
1213}
1214
1215
1216/*
1217 * Dynamic reset the DM910X board
1218 * Stop DM910X board
1219 * Free Tx/Rx allocated memory
1220 * Reset DM910X board
1221 * Re-initilize DM910X board
1222 */
1223
1224static void dmfe_dynamic_reset(struct DEVICE *dev)
1225{
1226 struct dmfe_board_info *db = netdev_priv(dev);
1227
1228 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1229
1230 /* Sopt MAC controller */
1231 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1232 update_cr6(db->cr6_data, dev->base_addr);
1233 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
1234 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1235
1236 /* Disable upper layer interface */
1237 netif_stop_queue(dev);
1238
1239 /* Free Rx Allocate buffer */
1240 dmfe_free_rxbuffer(db);
1241
1242 /* system variable init */
1243 db->tx_packet_cnt = 0;
1244 db->tx_queue_cnt = 0;
1245 db->rx_avail_cnt = 0;
1246 db->link_failed = 1;
1247 db->wait_reset = 0;
1248
1249 /* Re-initilize DM910X board */
1250 dmfe_init_dm910x(dev);
1251
1252 /* Restart upper layer interface */
1253 netif_wake_queue(dev);
1254}
1255
1256
1257/*
1258 * free all allocated rx buffer
1259 */
1260
1261static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1262{
1263 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1264
1265 /* free allocated rx buffer */
1266 while (db->rx_avail_cnt) {
1267 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1268 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1269 db->rx_avail_cnt--;
1270 }
1271}
1272
1273
1274/*
1275 * Reuse the SK buffer
1276 */
1277
1278static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1279{
1280 struct rx_desc *rxptr = db->rx_insert_ptr;
1281
1282 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1283 rxptr->rx_skb_ptr = skb;
f67ba792
ML
1284 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1285 skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1da177e4
LT
1286 wmb();
1287 rxptr->rdes0 = cpu_to_le32(0x80000000);
1288 db->rx_avail_cnt++;
1289 db->rx_insert_ptr = rxptr->next_rx_desc;
1290 } else
1291 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1292}
1293
1294
1295/*
1296 * Initialize transmit/Receive descriptor
1297 * Using Chain structure, and allocate Tx/Rx buffer
1298 */
1299
1300static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1301{
1302 struct tx_desc *tmp_tx;
1303 struct rx_desc *tmp_rx;
1304 unsigned char *tmp_buf;
1305 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1306 dma_addr_t tmp_buf_dma;
1307 int i;
1308
1309 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1310
1311 /* tx descriptor start pointer */
1312 db->tx_insert_ptr = db->first_tx_desc;
1313 db->tx_remove_ptr = db->first_tx_desc;
1314 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1315
1316 /* rx descriptor start pointer */
f67ba792
ML
1317 db->first_rx_desc = (void *)db->first_tx_desc +
1318 sizeof(struct tx_desc) * TX_DESC_CNT;
1319
1320 db->first_rx_desc_dma = db->first_tx_desc_dma +
1321 sizeof(struct tx_desc) * TX_DESC_CNT;
1da177e4
LT
1322 db->rx_insert_ptr = db->first_rx_desc;
1323 db->rx_ready_ptr = db->first_rx_desc;
1324 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
1325
1326 /* Init Transmit chain */
1327 tmp_buf = db->buf_pool_start;
1328 tmp_buf_dma = db->buf_pool_dma_start;
1329 tmp_tx_dma = db->first_tx_desc_dma;
1330 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1331 tmp_tx->tx_buf_ptr = tmp_buf;
1332 tmp_tx->tdes0 = cpu_to_le32(0);
1333 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1334 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1335 tmp_tx_dma += sizeof(struct tx_desc);
1336 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1337 tmp_tx->next_tx_desc = tmp_tx + 1;
1338 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1339 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1340 }
1341 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1342 tmp_tx->next_tx_desc = db->first_tx_desc;
1343
1344 /* Init Receive descriptor chain */
1345 tmp_rx_dma=db->first_rx_desc_dma;
1346 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1347 tmp_rx->rdes0 = cpu_to_le32(0);
1348 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1349 tmp_rx_dma += sizeof(struct rx_desc);
1350 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1351 tmp_rx->next_rx_desc = tmp_rx + 1;
1352 }
1353 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1354 tmp_rx->next_rx_desc = db->first_rx_desc;
1355
1356 /* pre-allocate Rx buffer */
1357 allocate_rx_buffer(db);
1358}
1359
1360
1361/*
1362 * Update CR6 value
1363 * Firstly stop DM910X , then written value and start
1364 */
1365
1366static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1367{
1368 u32 cr6_tmp;
1369
1370 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
1371 outl(cr6_tmp, ioaddr + DCR6);
1372 udelay(5);
1373 outl(cr6_data, ioaddr + DCR6);
1374 udelay(5);
1375}
1376
1377
1378/*
1379 * Send a setup frame for DM9132
1380 * This setup frame initilize DM910X address filter mode
1381*/
1382
1383static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
1384{
1385 struct dev_mc_list *mcptr;
1386 u16 * addrptr;
1387 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1388 u32 hash_val;
1389 u16 i, hash_table[4];
1390
1391 DMFE_DBUG(0, "dm9132_id_table()", 0);
1392
1393 /* Node address */
1394 addrptr = (u16 *) dev->dev_addr;
1395 outw(addrptr[0], ioaddr);
1396 ioaddr += 4;
1397 outw(addrptr[1], ioaddr);
1398 ioaddr += 4;
1399 outw(addrptr[2], ioaddr);
1400 ioaddr += 4;
1401
1402 /* Clear Hash Table */
1403 for (i = 0; i < 4; i++)
1404 hash_table[i] = 0x0;
1405
1406 /* broadcast address */
1407 hash_table[3] = 0x8000;
1408
1409 /* the multicast address in Hash Table : 64 bits */
1410 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1411 hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
1412 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1413 }
1414
1415 /* Write the hash table to MAC MD table */
1416 for (i = 0; i < 4; i++, ioaddr += 4)
1417 outw(hash_table[i], ioaddr);
1418}
1419
1420
1421/*
1422 * Send a setup frame for DM9102/DM9102A
1423 * This setup frame initilize DM910X address filter mode
1424 */
1425
1426static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
1427{
1428 struct dmfe_board_info *db = netdev_priv(dev);
1429 struct dev_mc_list *mcptr;
1430 struct tx_desc *txptr;
1431 u16 * addrptr;
1432 u32 * suptr;
1433 int i;
1434
1435 DMFE_DBUG(0, "send_filter_frame()", 0);
1436
1437 txptr = db->tx_insert_ptr;
1438 suptr = (u32 *) txptr->tx_buf_ptr;
1439
1440 /* Node address */
1441 addrptr = (u16 *) dev->dev_addr;
1442 *suptr++ = addrptr[0];
1443 *suptr++ = addrptr[1];
1444 *suptr++ = addrptr[2];
1445
1446 /* broadcast address */
1447 *suptr++ = 0xffff;
1448 *suptr++ = 0xffff;
1449 *suptr++ = 0xffff;
1450
1451 /* fit the multicast address */
1452 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1453 addrptr = (u16 *) mcptr->dmi_addr;
1454 *suptr++ = addrptr[0];
1455 *suptr++ = addrptr[1];
1456 *suptr++ = addrptr[2];
1457 }
1458
1459 for (; i<14; i++) {
1460 *suptr++ = 0xffff;
1461 *suptr++ = 0xffff;
1462 *suptr++ = 0xffff;
1463 }
1464
1465 /* prepare the setup frame */
1466 db->tx_insert_ptr = txptr->next_tx_desc;
1467 txptr->tdes1 = cpu_to_le32(0x890000c0);
1468
1469 /* Resource Check and Send the setup packet */
1470 if (!db->tx_packet_cnt) {
1471 /* Resource Empty */
1472 db->tx_packet_cnt++;
1473 txptr->tdes0 = cpu_to_le32(0x80000000);
1474 update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1475 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
1476 update_cr6(db->cr6_data, dev->base_addr);
1477 dev->trans_start = jiffies;
1478 } else
1479 db->tx_queue_cnt++; /* Put in TX queue */
1480}
1481
1482
1483/*
1484 * Allocate rx buffer,
1485 * As possible as allocate maxiumn Rx buffer
1486 */
1487
1488static void allocate_rx_buffer(struct dmfe_board_info *db)
1489{
1490 struct rx_desc *rxptr;
1491 struct sk_buff *skb;
1492
1493 rxptr = db->rx_insert_ptr;
1494
1495 while(db->rx_avail_cnt < RX_DESC_CNT) {
1496 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1497 break;
1498 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
f67ba792
ML
1499 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1500 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1da177e4
LT
1501 wmb();
1502 rxptr->rdes0 = cpu_to_le32(0x80000000);
1503 rxptr = rxptr->next_rx_desc;
1504 db->rx_avail_cnt++;
1505 }
1506
1507 db->rx_insert_ptr = rxptr;
1508}
1509
1510
1511/*
1512 * Read one word data from the serial ROM
1513 */
1514
1515static u16 read_srom_word(long ioaddr, int offset)
1516{
1517 int i;
1518 u16 srom_data = 0;
1519 long cr9_ioaddr = ioaddr + DCR9;
1520
1521 outl(CR9_SROM_READ, cr9_ioaddr);
1522 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1523
1524 /* Send the Read Command 110b */
1525 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1526 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1527 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1528
1529 /* Send the offset */
1530 for (i = 5; i >= 0; i--) {
1531 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1532 SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1533 }
1534
1535 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1536
1537 for (i = 16; i > 0; i--) {
1538 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1539 udelay(5);
f67ba792
ML
1540 srom_data = (srom_data << 1) |
1541 ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1da177e4
LT
1542 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1543 udelay(5);
1544 }
1545
1546 outl(CR9_SROM_READ, cr9_ioaddr);
1547 return srom_data;
1548}
1549
1550
1551/*
1552 * Auto sense the media mode
1553 */
1554
1555static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1556{
1557 u8 ErrFlag = 0;
1558 u16 phy_mode;
1559
1560 /* CR6 bit18=0, select 10/100M */
1561 update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1562
1563 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1564 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1565
1566 if ( (phy_mode & 0x24) == 0x24 ) {
1567 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
f67ba792
ML
1568 phy_mode = phy_read(db->ioaddr,
1569 db->phy_addr, 7, db->chip_id) & 0xf000;
1da177e4 1570 else /* DM9102/DM9102A */
f67ba792
ML
1571 phy_mode = phy_read(db->ioaddr,
1572 db->phy_addr, 17, db->chip_id) & 0xf000;
1da177e4
LT
1573 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1574 switch (phy_mode) {
1575 case 0x1000: db->op_mode = DMFE_10MHF; break;
1576 case 0x2000: db->op_mode = DMFE_10MFD; break;
1577 case 0x4000: db->op_mode = DMFE_100MHF; break;
1578 case 0x8000: db->op_mode = DMFE_100MFD; break;
1579 default: db->op_mode = DMFE_10MHF;
1580 ErrFlag = 1;
1581 break;
1582 }
1583 } else {
1584 db->op_mode = DMFE_10MHF;
1585 DMFE_DBUG(0, "Link Failed :", phy_mode);
1586 ErrFlag = 1;
1587 }
1588
1589 return ErrFlag;
1590}
1591
1592
1593/*
1594 * Set 10/100 phyxcer capability
1595 * AUTO mode : phyxcer register4 is NIC capability
1596 * Force mode: phyxcer register4 is the force media
1597 */
1598
1599static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1600{
1601 u16 phy_reg;
1602
1603 /* Select 10/100M phyxcer */
1604 db->cr6_data &= ~0x40000;
1605 update_cr6(db->cr6_data, db->ioaddr);
1606
1607 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1608 if (db->chip_id == PCI_DM9009_ID) {
f67ba792
ML
1609 phy_reg = phy_read(db->ioaddr,
1610 db->phy_addr, 18, db->chip_id) & ~0x1000;
1611
1612 phy_write(db->ioaddr,
1613 db->phy_addr, 18, phy_reg, db->chip_id);
1da177e4
LT
1614 }
1615
1616 /* Phyxcer capability setting */
1617 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1618
1619 if (db->media_mode & DMFE_AUTO) {
1620 /* AUTO Mode */
1621 phy_reg |= db->PHY_reg4;
1622 } else {
1623 /* Force Mode */
1624 switch(db->media_mode) {
1625 case DMFE_10MHF: phy_reg |= 0x20; break;
1626 case DMFE_10MFD: phy_reg |= 0x40; break;
1627 case DMFE_100MHF: phy_reg |= 0x80; break;
1628 case DMFE_100MFD: phy_reg |= 0x100; break;
1629 }
1630 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1631 }
1632
1633 /* Write new capability to Phyxcer Reg4 */
1634 if ( !(phy_reg & 0x01e0)) {
1635 phy_reg|=db->PHY_reg4;
1636 db->media_mode|=DMFE_AUTO;
1637 }
1638 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1639
1640 /* Restart Auto-Negotiation */
1641 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1642 phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1643 if ( !db->chip_type )
1644 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1645}
1646
1647
1648/*
1649 * Process op-mode
1650 * AUTO mode : PHY controller in Auto-negotiation Mode
1651 * Force mode: PHY controller in force mode with HUB
1652 * N-way force capability with SWITCH
1653 */
1654
1655static void dmfe_process_mode(struct dmfe_board_info *db)
1656{
1657 u16 phy_reg;
1658
1659 /* Full Duplex Mode Check */
1660 if (db->op_mode & 0x4)
1661 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1662 else
1663 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1664
1665 /* Transciver Selection */
1666 if (db->op_mode & 0x10) /* 1M HomePNA */
1667 db->cr6_data |= 0x40000;/* External MII select */
1668 else
1669 db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1670
1671 update_cr6(db->cr6_data, db->ioaddr);
1672
1673 /* 10/100M phyxcer force mode need */
1674 if ( !(db->media_mode & 0x18)) {
1675 /* Forece Mode */
1676 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1677 if ( !(phy_reg & 0x1) ) {
1678 /* parter without N-Way capability */
1679 phy_reg = 0x0;
1680 switch(db->op_mode) {
1681 case DMFE_10MHF: phy_reg = 0x0; break;
1682 case DMFE_10MFD: phy_reg = 0x100; break;
1683 case DMFE_100MHF: phy_reg = 0x2000; break;
1684 case DMFE_100MFD: phy_reg = 0x2100; break;
1685 }
f67ba792
ML
1686 phy_write(db->ioaddr,
1687 db->phy_addr, 0, phy_reg, db->chip_id);
1da177e4
LT
1688 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1689 mdelay(20);
f67ba792
ML
1690 phy_write(db->ioaddr,
1691 db->phy_addr, 0, phy_reg, db->chip_id);
1da177e4
LT
1692 }
1693 }
1694}
1695
1696
1697/*
1698 * Write a word to Phy register
1699 */
1700
f67ba792
ML
1701static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1702 u16 phy_data, u32 chip_id)
1da177e4
LT
1703{
1704 u16 i;
1705 unsigned long ioaddr;
1706
1707 if (chip_id == PCI_DM9132_ID) {
1708 ioaddr = iobase + 0x80 + offset * 4;
1709 outw(phy_data, ioaddr);
1710 } else {
1711 /* DM9102/DM9102A Chip */
1712 ioaddr = iobase + DCR9;
1713
1714 /* Send 33 synchronization clock to Phy controller */
1715 for (i = 0; i < 35; i++)
1716 phy_write_1bit(ioaddr, PHY_DATA_1);
1717
1718 /* Send start command(01) to Phy */
1719 phy_write_1bit(ioaddr, PHY_DATA_0);
1720 phy_write_1bit(ioaddr, PHY_DATA_1);
1721
1722 /* Send write command(01) to Phy */
1723 phy_write_1bit(ioaddr, PHY_DATA_0);
1724 phy_write_1bit(ioaddr, PHY_DATA_1);
1725
1726 /* Send Phy address */
1727 for (i = 0x10; i > 0; i = i >> 1)
f67ba792
ML
1728 phy_write_1bit(ioaddr,
1729 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1da177e4
LT
1730
1731 /* Send register address */
1732 for (i = 0x10; i > 0; i = i >> 1)
f67ba792
ML
1733 phy_write_1bit(ioaddr,
1734 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1da177e4
LT
1735
1736 /* written trasnition */
1737 phy_write_1bit(ioaddr, PHY_DATA_1);
1738 phy_write_1bit(ioaddr, PHY_DATA_0);
1739
1740 /* Write a word data to PHY controller */
1741 for ( i = 0x8000; i > 0; i >>= 1)
f67ba792
ML
1742 phy_write_1bit(ioaddr,
1743 phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1da177e4
LT
1744 }
1745}
1746
1747
1748/*
1749 * Read a word data from phy register
1750 */
1751
1752static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1753{
1754 int i;
1755 u16 phy_data;
1756 unsigned long ioaddr;
1757
1758 if (chip_id == PCI_DM9132_ID) {
1759 /* DM9132 Chip */
1760 ioaddr = iobase + 0x80 + offset * 4;
1761 phy_data = inw(ioaddr);
1762 } else {
1763 /* DM9102/DM9102A Chip */
1764 ioaddr = iobase + DCR9;
1765
1766 /* Send 33 synchronization clock to Phy controller */
1767 for (i = 0; i < 35; i++)
1768 phy_write_1bit(ioaddr, PHY_DATA_1);
1769
1770 /* Send start command(01) to Phy */
1771 phy_write_1bit(ioaddr, PHY_DATA_0);
1772 phy_write_1bit(ioaddr, PHY_DATA_1);
1773
1774 /* Send read command(10) to Phy */
1775 phy_write_1bit(ioaddr, PHY_DATA_1);
1776 phy_write_1bit(ioaddr, PHY_DATA_0);
1777
1778 /* Send Phy address */
1779 for (i = 0x10; i > 0; i = i >> 1)
f67ba792
ML
1780 phy_write_1bit(ioaddr,
1781 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1da177e4
LT
1782
1783 /* Send register address */
1784 for (i = 0x10; i > 0; i = i >> 1)
f67ba792
ML
1785 phy_write_1bit(ioaddr,
1786 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1da177e4
LT
1787
1788 /* Skip transition state */
1789 phy_read_1bit(ioaddr);
1790
1791 /* read 16bit data */
1792 for (phy_data = 0, i = 0; i < 16; i++) {
1793 phy_data <<= 1;
1794 phy_data |= phy_read_1bit(ioaddr);
1795 }
1796 }
1797
1798 return phy_data;
1799}
1800
1801
1802/*
1803 * Write one bit data to Phy Controller
1804 */
1805
1806static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1807{
1808 outl(phy_data, ioaddr); /* MII Clock Low */
1809 udelay(1);
1810 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
1811 udelay(1);
1812 outl(phy_data, ioaddr); /* MII Clock Low */
1813 udelay(1);
1814}
1815
1816
1817/*
1818 * Read one bit phy data from PHY controller
1819 */
1820
1821static u16 phy_read_1bit(unsigned long ioaddr)
1822{
1823 u16 phy_data;
1824
1825 outl(0x50000, ioaddr);
1826 udelay(1);
1827 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1828 outl(0x40000, ioaddr);
1829 udelay(1);
1830
1831 return phy_data;
1832}
1833
1834
1835/*
1836 * Parser SROM and media mode
1837 */
1838
1839static void dmfe_parse_srom(struct dmfe_board_info * db)
1840{
1841 char * srom = db->srom;
1842 int dmfe_mode, tmp_reg;
1843
1844 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1845
1846 /* Init CR15 */
1847 db->cr15_data = CR15_DEFAULT;
1848
1849 /* Check SROM Version */
1850 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1851 /* SROM V4.01 */
1852 /* Get NIC support media mode */
16b110c3 1853 db->NIC_capability = le16_to_cpup((__le16 *)srom + 34/2);
1da177e4
LT
1854 db->PHY_reg4 = 0;
1855 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1856 switch( db->NIC_capability & tmp_reg ) {
1857 case 0x1: db->PHY_reg4 |= 0x0020; break;
1858 case 0x2: db->PHY_reg4 |= 0x0040; break;
1859 case 0x4: db->PHY_reg4 |= 0x0080; break;
1860 case 0x8: db->PHY_reg4 |= 0x0100; break;
1861 }
1862 }
1863
1864 /* Media Mode Force or not check */
16b110c3
AM
1865 dmfe_mode = le32_to_cpup((__le32 *)srom + 34/4) &
1866 le32_to_cpup((__le32 *)srom + 36/4);
1da177e4
LT
1867 switch(dmfe_mode) {
1868 case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1869 case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */
1870 case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1871 case 0x100:
1872 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1873 }
1874
1875 /* Special Function setting */
1876 /* VLAN function */
1877 if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1878 db->cr15_data |= 0x40;
1879
1880 /* Flow Control */
1881 if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1882 db->cr15_data |= 0x400;
1883
1884 /* TX pause packet */
1885 if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1886 db->cr15_data |= 0x9800;
1887 }
1888
1889 /* Parse HPNA parameter */
1890 db->HPNA_command = 1;
1891
1892 /* Accept remote command or not */
1893 if (HPNA_rx_cmd == 0)
1894 db->HPNA_command |= 0x8000;
1895
1896 /* Issue remote command & operation mode */
1897 if (HPNA_tx_cmd == 1)
1898 switch(HPNA_mode) { /* Issue Remote Command */
1899 case 0: db->HPNA_command |= 0x0904; break;
1900 case 1: db->HPNA_command |= 0x0a00; break;
1901 case 2: db->HPNA_command |= 0x0506; break;
1902 case 3: db->HPNA_command |= 0x0602; break;
1903 }
1904 else
1905 switch(HPNA_mode) { /* Don't Issue */
1906 case 0: db->HPNA_command |= 0x0004; break;
1907 case 1: db->HPNA_command |= 0x0000; break;
1908 case 2: db->HPNA_command |= 0x0006; break;
1909 case 3: db->HPNA_command |= 0x0002; break;
1910 }
1911
1912 /* Check DM9801 or DM9802 present or not */
1913 db->HPNA_present = 0;
1914 update_cr6(db->cr6_data|0x40000, db->ioaddr);
1915 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1916 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1917 /* DM9801 or DM9802 present */
1918 db->HPNA_timer = 8;
1919 if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1920 /* DM9801 HomeRun */
1921 db->HPNA_present = 1;
1922 dmfe_program_DM9801(db, tmp_reg);
1923 } else {
1924 /* DM9802 LongRun */
1925 db->HPNA_present = 2;
1926 dmfe_program_DM9802(db);
1927 }
1928 }
1929
1930}
1931
1932
1933/*
1934 * Init HomeRun DM9801
1935 */
1936
1937static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
1938{
1939 uint reg17, reg25;
1940
1941 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
1942 switch(HPNA_rev) {
1943 case 0xb900: /* DM9801 E3 */
1944 db->HPNA_command |= 0x1000;
1945 reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
1946 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
1947 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1948 break;
1949 case 0xb901: /* DM9801 E4 */
1950 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1951 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
1952 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1953 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
1954 break;
1955 case 0xb902: /* DM9801 E5 */
1956 case 0xb903: /* DM9801 E6 */
1957 default:
1958 db->HPNA_command |= 0x1000;
1959 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1960 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
1961 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1962 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
1963 break;
1964 }
1965 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1966 phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
1967 phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
1968}
1969
1970
1971/*
1972 * Init HomeRun DM9802
1973 */
1974
1975static void dmfe_program_DM9802(struct dmfe_board_info * db)
1976{
1977 uint phy_reg;
1978
1979 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
1980 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1981 phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1982 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
1983 phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
1984}
1985
1986
1987/*
1988 * Check remote HPNA power and speed status. If not correct,
1989 * issue command again.
1990*/
1991
1992static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
1993{
1994 uint phy_reg;
1995
1996 /* Got remote device status */
1997 phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
1998 switch(phy_reg) {
1999 case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2000 case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2001 case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2002 case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2003 }
2004
2005 /* Check remote device status match our setting ot not */
2006 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
f67ba792
ML
2007 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2008 db->chip_id);
1da177e4
LT
2009 db->HPNA_timer=8;
2010 } else
2011 db->HPNA_timer=600; /* Match, every 10 minutes, check */
2012}
2013
2014
2015
2016static struct pci_device_id dmfe_pci_tbl[] = {
2017 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2018 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2019 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2020 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2021 { 0, }
2022};
2023MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2024
2025
2026static struct pci_driver dmfe_driver = {
2027 .name = "dmfe",
2028 .id_table = dmfe_pci_tbl,
2029 .probe = dmfe_init_one,
2030 .remove = __devexit_p(dmfe_remove_one),
2031};
2032
2033MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2034MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2035MODULE_LICENSE("GPL");
2036MODULE_VERSION(DRV_VERSION);
2037
2038module_param(debug, int, 0);
2039module_param(mode, byte, 0);
2040module_param(cr6set, int, 0);
2041module_param(chkmode, byte, 0);
2042module_param(HPNA_mode, byte, 0);
2043module_param(HPNA_rx_cmd, byte, 0);
2044module_param(HPNA_tx_cmd, byte, 0);
2045module_param(HPNA_NoiseFloor, byte, 0);
2046module_param(SF_mode, byte, 0);
2047MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
f67ba792
ML
2048MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2049 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2050
2051MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2052 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
1da177e4
LT
2053
2054/* Description:
2055 * when user used insmod to add module, system invoked init_module()
2056 * to initilize and register.
2057 */
2058
2059static int __init dmfe_init_module(void)
2060{
2061 int rc;
2062
2063 printk(version);
2064 printed_version = 1;
2065
2066 DMFE_DBUG(0, "init_module() ", debug);
2067
2068 if (debug)
2069 dmfe_debug = debug; /* set debug flag */
2070 if (cr6set)
2071 dmfe_cr6_user_set = cr6set;
2072
2073 switch(mode) {
2074 case DMFE_10MHF:
2075 case DMFE_100MHF:
2076 case DMFE_10MFD:
2077 case DMFE_100MFD:
2078 case DMFE_1M_HPNA:
2079 dmfe_media_mode = mode;
2080 break;
2081 default:dmfe_media_mode = DMFE_AUTO;
2082 break;
2083 }
2084
2085 if (HPNA_mode > 4)
2086 HPNA_mode = 0; /* Default: LP/HS */
2087 if (HPNA_rx_cmd > 1)
2088 HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */
2089 if (HPNA_tx_cmd > 1)
2090 HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */
2091 if (HPNA_NoiseFloor > 15)
2092 HPNA_NoiseFloor = 0;
2093
29917620 2094 rc = pci_register_driver(&dmfe_driver);
1da177e4
LT
2095 if (rc < 0)
2096 return rc;
2097
2098 return 0;
2099}
2100
2101
2102/*
2103 * Description:
2104 * when user used rmmod to delete module, system invoked clean_module()
2105 * to un-register all registered services.
2106 */
2107
2108static void __exit dmfe_cleanup_module(void)
2109{
2110 DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2111 pci_unregister_driver(&dmfe_driver);
2112}
2113
2114module_init(dmfe_init_module);
2115module_exit(dmfe_cleanup_module);