]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/3c527.c
tg3: Refine tg3_vlan_rx_register()
[net-next-2.6.git] / drivers / net / 3c527.c
CommitLineData
1da177e4
LT
1/* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
2 *
3 * (c) Copyright 1998 Red Hat Software Inc
6aa20a22 4 * Written by Alan Cox.
1da177e4
LT
5 * Further debugging by Carl Drougge.
6 * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
7 * Heavily modified by Richard Procter <rnp@paradise.net.nz>
8 *
9 * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
10 * (for the MCA stuff) written by Wim Dumon.
11 *
12 * Thanks to 3Com for making this possible by providing me with the
13 * documentation.
14 *
15 * This software may be used and distributed according to the terms
16 * of the GNU General Public License, incorporated herein by reference.
17 *
18 */
19
20#define DRV_NAME "3c527"
21#define DRV_VERSION "0.7-SMP"
22#define DRV_RELDATE "2003/09/21"
23
24static const char *version =
25DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
26
27/**
28 * DOC: Traps for the unwary
29 *
30 * The diagram (Figure 1-1) and the POS summary disagree with the
31 * "Interrupt Level" section in the manual.
32 *
6aa20a22
JG
33 * The manual contradicts itself when describing the minimum number
34 * buffers in the 'configure lists' command.
35 * My card accepts a buffer config of 4/4.
1da177e4
LT
36 *
37 * Setting the SAV BP bit does not save bad packets, but
6aa20a22 38 * only enables RX on-card stats collection.
1da177e4
LT
39 *
40 * The documentation in places seems to miss things. In actual fact
41 * I've always eventually found everything is documented, it just
42 * requires careful study.
43 *
44 * DOC: Theory Of Operation
45 *
46 * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
47 * amount of on board intelligence that housekeeps a somewhat dumber
48 * Intel NIC. For performance we want to keep the transmit queue deep
49 * as the card can transmit packets while fetching others from main
50 * memory by bus master DMA. Transmission and reception are driven by
51 * circular buffer queues.
52 *
53 * The mailboxes can be used for controlling how the card traverses
54 * its buffer rings, but are used only for inital setup in this
55 * implementation. The exec mailbox allows a variety of commands to
56 * be executed. Each command must complete before the next is
57 * executed. Primarily we use the exec mailbox for controlling the
58 * multicast lists. We have to do a certain amount of interesting
59 * hoop jumping as the multicast list changes can occur in interrupt
60 * state when the card has an exec command pending. We defer such
61 * events until the command completion interrupt.
62 *
63 * A copy break scheme (taken from 3c59x.c) is employed whereby
64 * received frames exceeding a configurable length are passed
65 * directly to the higher networking layers without incuring a copy,
66 * in what amounts to a time/space trade-off.
6aa20a22 67 *
1da177e4
LT
68 * The card also keeps a large amount of statistical information
69 * on-board. In a perfect world, these could be used safely at no
70 * cost. However, lacking information to the contrary, processing
71 * them without races would involve so much extra complexity as to
72 * make it unworthwhile to do so. In the end, a hybrid SW/HW
6aa20a22 73 * implementation was made necessary --- see mc32_update_stats().
1da177e4
LT
74 *
75 * DOC: Notes
6aa20a22 76 *
1da177e4
LT
77 * It should be possible to use two or more cards, but at this stage
78 * only by loading two copies of the same module.
79 *
80 * The on-board 82586 NIC has trouble receiving multiple
81 * back-to-back frames and so is likely to drop packets from fast
82 * senders.
83**/
84
85#include <linux/module.h>
86
87#include <linux/errno.h>
88#include <linux/netdevice.h>
89#include <linux/etherdevice.h>
90#include <linux/if_ether.h>
91#include <linux/init.h>
92#include <linux/kernel.h>
93#include <linux/types.h>
94#include <linux/fcntl.h>
95#include <linux/interrupt.h>
96#include <linux/mca-legacy.h>
97#include <linux/ioport.h>
98#include <linux/in.h>
99#include <linux/skbuff.h>
100#include <linux/slab.h>
101#include <linux/string.h>
102#include <linux/wait.h>
103#include <linux/ethtool.h>
104#include <linux/completion.h>
105#include <linux/bitops.h>
6188e10d 106#include <linux/semaphore.h>
1da177e4 107
1da177e4
LT
108#include <asm/uaccess.h>
109#include <asm/system.h>
110#include <asm/io.h>
111#include <asm/dma.h>
112
113#include "3c527.h"
114
115MODULE_LICENSE("GPL");
116
117/*
118 * The name of the card. Is used for messages and in the requests for
119 * io regions, irqs and dma channels
120 */
121static const char* cardname = DRV_NAME;
122
123/* use 0 for production, 1 for verification, >2 for debug */
124#ifndef NET_DEBUG
125#define NET_DEBUG 2
126#endif
127
128#undef DEBUG_IRQ
129
130static unsigned int mc32_debug = NET_DEBUG;
131
132/* The number of low I/O ports used by the ethercard. */
133#define MC32_IO_EXTENT 8
134
6aa20a22 135/* As implemented, values must be a power-of-2 -- 4/8/16/32 */
1da177e4
LT
136#define TX_RING_LEN 32 /* Typically the card supports 37 */
137#define RX_RING_LEN 8 /* " " " */
138
6aa20a22
JG
139/* Copy break point, see above for details.
140 * Setting to > 1512 effectively disables this feature. */
1da177e4
LT
141#define RX_COPYBREAK 200 /* Value from 3c59x.c */
142
143/* Issue the 82586 workaround command - this is for "busy lans", but
6aa20a22
JG
144 * basically means for all lans now days - has a performance (latency)
145 * cost, but best set. */
1da177e4
LT
146static const int WORKAROUND_82586=1;
147
148/* Pointers to buffers and their on-card records */
6aa20a22 149struct mc32_ring_desc
1da177e4 150{
6aa20a22
JG
151 volatile struct skb_header *p;
152 struct sk_buff *skb;
1da177e4
LT
153};
154
155/* Information that needs to be kept for each board. */
6aa20a22 156struct mc32_local
1da177e4
LT
157{
158 int slot;
159
160 u32 base;
1da177e4
LT
161 volatile struct mc32_mailbox *rx_box;
162 volatile struct mc32_mailbox *tx_box;
163 volatile struct mc32_mailbox *exec_box;
164 volatile struct mc32_stats *stats; /* Start of on-card statistics */
165 u16 tx_chain; /* Transmit list start offset */
166 u16 rx_chain; /* Receive list start offset */
6aa20a22 167 u16 tx_len; /* Transmit list count */
1da177e4
LT
168 u16 rx_len; /* Receive list count */
169
170 u16 xceiver_desired_state; /* HALTED or RUNNING */
171 u16 cmd_nonblocking; /* Thread is uninterested in command result */
172 u16 mc_reload_wait; /* A multicast load request is pending */
173 u32 mc_list_valid; /* True when the mclist is set */
174
175 struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */
176 struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */
177
178 atomic_t tx_count; /* buffers left */
179 atomic_t tx_ring_head; /* index to tx en-queue end */
180 u16 tx_ring_tail; /* index to tx de-queue end */
181
6aa20a22 182 u16 rx_ring_tail; /* index to rx de-queue end */
1da177e4
LT
183
184 struct semaphore cmd_mutex; /* Serialises issuing of execute commands */
185 struct completion execution_cmd; /* Card has completed an execute command */
186 struct completion xceiver_cmd; /* Card has completed a tx or rx command */
187};
188
189/* The station (ethernet) address prefix, used for a sanity check. */
190#define SA_ADDR0 0x02
191#define SA_ADDR1 0x60
192#define SA_ADDR2 0xAC
193
194struct mca_adapters_t {
195 unsigned int id;
196 char *name;
197};
198
199static const struct mca_adapters_t mc32_adapters[] = {
200 { 0x0041, "3COM EtherLink MC/32" },
201 { 0x8EF5, "IBM High Performance Lan Adapter" },
202 { 0x0000, NULL }
203};
204
205
6aa20a22 206/* Macros for ring index manipulations */
1da177e4
LT
207static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
208static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
209
210static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
211
212
213/* Index to functions, as function prototypes. */
214static int mc32_probe1(struct net_device *dev, int ioaddr);
215static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
216static int mc32_open(struct net_device *dev);
217static void mc32_timeout(struct net_device *dev);
218static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev);
7d12e780 219static irqreturn_t mc32_interrupt(int irq, void *dev_id);
1da177e4
LT
220static int mc32_close(struct net_device *dev);
221static struct net_device_stats *mc32_get_stats(struct net_device *dev);
222static void mc32_set_multicast_list(struct net_device *dev);
223static void mc32_reset_multicast_list(struct net_device *dev);
7282d491 224static const struct ethtool_ops netdev_ethtool_ops;
1da177e4
LT
225
226static void cleanup_card(struct net_device *dev)
227{
228 struct mc32_local *lp = netdev_priv(dev);
229 unsigned slot = lp->slot;
230 mca_mark_as_unused(slot);
231 mca_set_adapter_name(slot, NULL);
232 free_irq(dev->irq, dev);
233 release_region(dev->base_addr, MC32_IO_EXTENT);
234}
235
236/**
237 * mc32_probe - Search for supported boards
238 * @unit: interface number to use
239 *
240 * Because MCA bus is a real bus and we can scan for cards we could do a
241 * single scan for all boards here. Right now we use the passed in device
242 * structure and scan for only one board. This needs fixing for modules
243 * in particular.
244 */
245
246struct net_device *__init mc32_probe(int unit)
247{
248 struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
249 static int current_mca_slot = -1;
250 int i;
251 int err;
252
253 if (!dev)
254 return ERR_PTR(-ENOMEM);
255
256 if (unit >= 0)
257 sprintf(dev->name, "eth%d", unit);
258
6aa20a22 259 /* Do not check any supplied i/o locations.
1da177e4
LT
260 POS registers usually don't fail :) */
261
6aa20a22
JG
262 /* MCA cards have POS registers.
263 Autodetecting MCA cards is extremely simple.
1da177e4
LT
264 Just search for the card. */
265
266 for(i = 0; (mc32_adapters[i].name != NULL); i++) {
6aa20a22 267 current_mca_slot =
1da177e4
LT
268 mca_find_unused_adapter(mc32_adapters[i].id, 0);
269
270 if(current_mca_slot != MCA_NOTFOUND) {
271 if(!mc32_probe1(dev, current_mca_slot))
272 {
6aa20a22 273 mca_set_adapter_name(current_mca_slot,
1da177e4
LT
274 mc32_adapters[i].name);
275 mca_mark_as_used(current_mca_slot);
276 err = register_netdev(dev);
277 if (err) {
278 cleanup_card(dev);
279 free_netdev(dev);
280 dev = ERR_PTR(err);
281 }
282 return dev;
283 }
6aa20a22 284
1da177e4
LT
285 }
286 }
287 free_netdev(dev);
288 return ERR_PTR(-ENODEV);
289}
290
4394e653
SH
291static const struct net_device_ops netdev_ops = {
292 .ndo_open = mc32_open,
293 .ndo_stop = mc32_close,
294 .ndo_start_xmit = mc32_send_packet,
295 .ndo_get_stats = mc32_get_stats,
296 .ndo_set_multicast_list = mc32_set_multicast_list,
297 .ndo_tx_timeout = mc32_timeout,
298 .ndo_change_mtu = eth_change_mtu,
299 .ndo_set_mac_address = eth_mac_addr,
300 .ndo_validate_addr = eth_validate_addr,
301};
302
1da177e4
LT
303/**
304 * mc32_probe1 - Check a given slot for a board and test the card
305 * @dev: Device structure to fill in
306 * @slot: The MCA bus slot being used by this card
307 *
308 * Decode the slot data and configure the card structures. Having done this we
309 * can reset the card and configure it. The card does a full self test cycle
6aa20a22 310 * in firmware so we have to wait for it to return and post us either a
1da177e4
LT
311 * failure case or some addresses we use to find the board internals.
312 */
313
314static int __init mc32_probe1(struct net_device *dev, int slot)
315{
316 static unsigned version_printed;
317 int i, err;
318 u8 POS;
319 u32 base;
320 struct mc32_local *lp = netdev_priv(dev);
321 static u16 mca_io_bases[]={
322 0x7280,0x7290,
323 0x7680,0x7690,
324 0x7A80,0x7A90,
325 0x7E80,0x7E90
326 };
327 static u32 mca_mem_bases[]={
328 0x00C0000,
329 0x00C4000,
330 0x00C8000,
331 0x00CC000,
332 0x00D0000,
333 0x00D4000,
334 0x00D8000,
335 0x00DC000
336 };
337 static char *failures[]={
338 "Processor instruction",
339 "Processor data bus",
340 "Processor data bus",
341 "Processor data bus",
342 "Adapter bus",
343 "ROM checksum",
344 "Base RAM",
345 "Extended RAM",
346 "82586 internal loopback",
347 "82586 initialisation failure",
348 "Adapter list configuration error"
349 };
350
351 /* Time to play MCA games */
352
353 if (mc32_debug && version_printed++ == 0)
354 printk(KERN_DEBUG "%s", version);
355
356 printk(KERN_INFO "%s: %s found in slot %d:", dev->name, cardname, slot);
357
358 POS = mca_read_stored_pos(slot, 2);
6aa20a22 359
1da177e4
LT
360 if(!(POS&1))
361 {
362 printk(" disabled.\n");
363 return -ENODEV;
364 }
365
366 /* Fill in the 'dev' fields. */
367 dev->base_addr = mca_io_bases[(POS>>1)&7];
368 dev->mem_start = mca_mem_bases[(POS>>4)&7];
6aa20a22 369
1da177e4
LT
370 POS = mca_read_stored_pos(slot, 4);
371 if(!(POS&1))
372 {
373 printk("memory window disabled.\n");
374 return -ENODEV;
375 }
376
377 POS = mca_read_stored_pos(slot, 5);
6aa20a22 378
1da177e4
LT
379 i=(POS>>4)&3;
380 if(i==3)
381 {
382 printk("invalid memory window.\n");
383 return -ENODEV;
384 }
6aa20a22 385
1da177e4
LT
386 i*=16384;
387 i+=16384;
6aa20a22 388
1da177e4 389 dev->mem_end=dev->mem_start + i;
6aa20a22 390
1da177e4 391 dev->irq = ((POS>>2)&3)+9;
6aa20a22 392
1da177e4
LT
393 if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
394 {
395 printk("io 0x%3lX, which is busy.\n", dev->base_addr);
396 return -EBUSY;
397 }
398
399 printk("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
400 dev->base_addr, dev->irq, dev->mem_start, i/1024);
6aa20a22
JG
401
402
1da177e4 403 /* We ought to set the cache line size here.. */
6aa20a22
JG
404
405
1da177e4
LT
406 /*
407 * Go PROM browsing
408 */
6aa20a22 409
1da177e4
LT
410 /* Retrieve and print the ethernet address. */
411 for (i = 0; i < 6; i++)
412 {
413 mca_write_pos(slot, 6, i+12);
414 mca_write_pos(slot, 7, 0);
6aa20a22 415
0795af57 416 dev->dev_addr[i] = mca_read_pos(slot,3);
1da177e4
LT
417 }
418
e174961c 419 printk("%s: Address %pM", dev->name, dev->dev_addr);
0795af57 420
1da177e4
LT
421 mca_write_pos(slot, 6, 0);
422 mca_write_pos(slot, 7, 0);
423
424 POS = mca_read_stored_pos(slot, 4);
6aa20a22 425
1da177e4
LT
426 if(POS&2)
427 printk(" : BNC port selected.\n");
6aa20a22 428 else
1da177e4 429 printk(" : AUI port selected.\n");
6aa20a22 430
1da177e4
LT
431 POS=inb(dev->base_addr+HOST_CTRL);
432 POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
433 POS&=~HOST_CTRL_INTE;
434 outb(POS, dev->base_addr+HOST_CTRL);
435 /* Reset adapter */
436 udelay(100);
437 /* Reset off */
438 POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
439 outb(POS, dev->base_addr+HOST_CTRL);
6aa20a22 440
1da177e4 441 udelay(300);
6aa20a22 442
1da177e4
LT
443 /*
444 * Grab the IRQ
445 */
446
1fb9df5d 447 err = request_irq(dev->irq, &mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev);
1da177e4
LT
448 if (err) {
449 release_region(dev->base_addr, MC32_IO_EXTENT);
450 printk(KERN_ERR "%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
451 goto err_exit_ports;
452 }
453
454 memset(lp, 0, sizeof(struct mc32_local));
455 lp->slot = slot;
456
457 i=0;
458
459 base = inb(dev->base_addr);
6aa20a22 460
1da177e4
LT
461 while(base == 0xFF)
462 {
463 i++;
464 if(i == 1000)
465 {
466 printk(KERN_ERR "%s: failed to boot adapter.\n", dev->name);
6aa20a22 467 err = -ENODEV;
1da177e4
LT
468 goto err_exit_irq;
469 }
470 udelay(1000);
471 if(inb(dev->base_addr+2)&(1<<5))
472 base = inb(dev->base_addr);
473 }
474
475 if(base>0)
476 {
477 if(base < 0x0C)
478 printk(KERN_ERR "%s: %s%s.\n", dev->name, failures[base-1],
479 base<0x0A?" test failure":"");
480 else
481 printk(KERN_ERR "%s: unknown failure %d.\n", dev->name, base);
6aa20a22 482 err = -ENODEV;
1da177e4
LT
483 goto err_exit_irq;
484 }
6aa20a22 485
1da177e4
LT
486 base=0;
487 for(i=0;i<4;i++)
488 {
489 int n=0;
6aa20a22 490
1da177e4
LT
491 while(!(inb(dev->base_addr+2)&(1<<5)))
492 {
493 n++;
494 udelay(50);
495 if(n>100)
496 {
497 printk(KERN_ERR "%s: mailbox read fail (%d).\n", dev->name, i);
498 err = -ENODEV;
499 goto err_exit_irq;
500 }
501 }
502
503 base|=(inb(dev->base_addr)<<(8*i));
504 }
6aa20a22 505
1da177e4 506 lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
6aa20a22
JG
507
508 base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
509
1da177e4 510 lp->base = dev->mem_start+base;
6aa20a22
JG
511
512 lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
1da177e4 513 lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
6aa20a22 514
1da177e4
LT
515 lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
516
517 /*
518 * Descriptor chains (card relative)
519 */
6aa20a22 520
1da177e4
LT
521 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
522 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
6aa20a22 523 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
1da177e4
LT
524 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
525
526 init_MUTEX_LOCKED(&lp->cmd_mutex);
527 init_completion(&lp->execution_cmd);
528 init_completion(&lp->xceiver_cmd);
6aa20a22 529
1da177e4
LT
530 printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
531 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
532
4394e653 533 dev->netdev_ops = &netdev_ops;
1da177e4
LT
534 dev->watchdog_timeo = HZ*5; /* Board does all the work */
535 dev->ethtool_ops = &netdev_ethtool_ops;
536
537 return 0;
538
539err_exit_irq:
540 free_irq(dev->irq, dev);
541err_exit_ports:
542 release_region(dev->base_addr, MC32_IO_EXTENT);
543 return err;
544}
545
546
547/**
548 * mc32_ready_poll - wait until we can feed it a command
549 * @dev: The device to wait for
6aa20a22 550 *
1da177e4
LT
551 * Wait until the card becomes ready to accept a command via the
552 * command register. This tells us nothing about the completion
553 * status of any pending commands and takes very little time at all.
554 */
6aa20a22 555
1da177e4
LT
556static inline void mc32_ready_poll(struct net_device *dev)
557{
558 int ioaddr = dev->base_addr;
559 while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
560}
561
562
563/**
564 * mc32_command_nowait - send a command non blocking
565 * @dev: The 3c527 to issue the command to
566 * @cmd: The command word to write to the mailbox
567 * @data: A data block if the command expects one
568 * @len: Length of the data block
569 *
570 * Send a command from interrupt state. If there is a command
571 * currently being executed then we return an error of -1. It
572 * simply isn't viable to wait around as commands may be
573 * slow. This can theoretically be starved on SMP, but it's hard
574 * to see a realistic situation. We do not wait for the command
575 * to complete --- we rely on the interrupt handler to tidy up
576 * after us.
577 */
578
579static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
580{
581 struct mc32_local *lp = netdev_priv(dev);
582 int ioaddr = dev->base_addr;
583 int ret = -1;
584
585 if (down_trylock(&lp->cmd_mutex) == 0)
586 {
587 lp->cmd_nonblocking=1;
588 lp->exec_box->mbox=0;
589 lp->exec_box->mbox=cmd;
590 memcpy((void *)lp->exec_box->data, data, len);
591 barrier(); /* the memcpy forgot the volatile so be sure */
592
593 /* Send the command */
594 mc32_ready_poll(dev);
595 outb(1<<6, ioaddr+HOST_CMD);
596
597 ret = 0;
598
599 /* Interrupt handler will signal mutex on completion */
600 }
601
602 return ret;
603}
604
605
606/**
607 * mc32_command - send a command and sleep until completion
608 * @dev: The 3c527 card to issue the command to
609 * @cmd: The command word to write to the mailbox
610 * @data: A data block if the command expects one
611 * @len: Length of the data block
612 *
613 * Sends exec commands in a user context. This permits us to wait around
614 * for the replies and also to wait for the command buffer to complete
6aa20a22 615 * from a previous command before we execute our command. After our
1da177e4
LT
616 * command completes we will attempt any pending multicast reload
617 * we blocked off by hogging the exec buffer.
618 *
6aa20a22 619 * You feed the card a command, you wait, it interrupts you get a
1da177e4
LT
620 * reply. All well and good. The complication arises because you use
621 * commands for filter list changes which come in at bh level from things
622 * like IPV6 group stuff.
623 */
6aa20a22 624
1da177e4
LT
625static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
626{
627 struct mc32_local *lp = netdev_priv(dev);
628 int ioaddr = dev->base_addr;
629 int ret = 0;
6aa20a22 630
1da177e4
LT
631 down(&lp->cmd_mutex);
632
633 /*
634 * My Turn
635 */
636
637 lp->cmd_nonblocking=0;
638 lp->exec_box->mbox=0;
639 lp->exec_box->mbox=cmd;
640 memcpy((void *)lp->exec_box->data, data, len);
641 barrier(); /* the memcpy forgot the volatile so be sure */
642
643 mc32_ready_poll(dev);
644 outb(1<<6, ioaddr+HOST_CMD);
645
646 wait_for_completion(&lp->execution_cmd);
6aa20a22 647
1da177e4
LT
648 if(lp->exec_box->mbox&(1<<13))
649 ret = -1;
650
651 up(&lp->cmd_mutex);
652
653 /*
654 * A multicast set got blocked - try it now
655 */
656
657 if(lp->mc_reload_wait)
658 {
659 mc32_reset_multicast_list(dev);
660 }
661
662 return ret;
663}
664
665
666/**
667 * mc32_start_transceiver - tell board to restart tx/rx
668 * @dev: The 3c527 card to issue the command to
669 *
670 * This may be called from the interrupt state, where it is used
6aa20a22
JG
671 * to restart the rx ring if the card runs out of rx buffers.
672 *
1da177e4
LT
673 * We must first check if it's ok to (re)start the transceiver. See
674 * mc32_close for details.
675 */
676
677static void mc32_start_transceiver(struct net_device *dev) {
678
679 struct mc32_local *lp = netdev_priv(dev);
680 int ioaddr = dev->base_addr;
681
6aa20a22 682 /* Ignore RX overflow on device closure */
1da177e4 683 if (lp->xceiver_desired_state==HALTED)
6aa20a22 684 return;
1da177e4
LT
685
686 /* Give the card the offset to the post-EOL-bit RX descriptor */
6aa20a22 687 mc32_ready_poll(dev);
1da177e4 688 lp->rx_box->mbox=0;
6aa20a22
JG
689 lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
690 outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
1da177e4 691
6aa20a22 692 mc32_ready_poll(dev);
1da177e4 693 lp->tx_box->mbox=0;
6aa20a22
JG
694 outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */
695
696 /* We are not interrupted on start completion */
1da177e4
LT
697}
698
699
700/**
701 * mc32_halt_transceiver - tell board to stop tx/rx
702 * @dev: The 3c527 card to issue the command to
703 *
704 * We issue the commands to halt the card's transceiver. In fact,
705 * after some experimenting we now simply tell the card to
706 * suspend. When issuing aborts occasionally odd things happened.
707 *
708 * We then sleep until the card has notified us that both rx and
709 * tx have been suspended.
6aa20a22 710 */
1da177e4 711
6aa20a22 712static void mc32_halt_transceiver(struct net_device *dev)
1da177e4
LT
713{
714 struct mc32_local *lp = netdev_priv(dev);
715 int ioaddr = dev->base_addr;
716
6aa20a22 717 mc32_ready_poll(dev);
1da177e4 718 lp->rx_box->mbox=0;
6aa20a22 719 outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
1da177e4
LT
720 wait_for_completion(&lp->xceiver_cmd);
721
6aa20a22 722 mc32_ready_poll(dev);
1da177e4 723 lp->tx_box->mbox=0;
6aa20a22 724 outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
1da177e4
LT
725 wait_for_completion(&lp->xceiver_cmd);
726}
727
728
729/**
730 * mc32_load_rx_ring - load the ring of receive buffers
731 * @dev: 3c527 to build the ring for
732 *
733 * This initalises the on-card and driver datastructures to
734 * the point where mc32_start_transceiver() can be called.
735 *
736 * The card sets up the receive ring for us. We are required to use the
737 * ring it provides, although the size of the ring is configurable.
738 *
739 * We allocate an sk_buff for each ring entry in turn and
740 * initalise its house-keeping info. At the same time, we read
741 * each 'next' pointer in our rx_ring array. This reduces slow
742 * shared-memory reads and makes it easy to access predecessor
743 * descriptors.
744 *
745 * We then set the end-of-list bit for the last entry so that the
746 * card will know when it has run out of buffers.
747 */
6aa20a22 748
1da177e4
LT
749static int mc32_load_rx_ring(struct net_device *dev)
750{
751 struct mc32_local *lp = netdev_priv(dev);
752 int i;
753 u16 rx_base;
754 volatile struct skb_header *p;
6aa20a22 755
1da177e4
LT
756 rx_base=lp->rx_chain;
757
758 for(i=0; i<RX_RING_LEN; i++) {
759 lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
760 if (lp->rx_ring[i].skb==NULL) {
761 for (;i>=0;i--)
762 kfree_skb(lp->rx_ring[i].skb);
763 return -ENOBUFS;
764 }
765 skb_reserve(lp->rx_ring[i].skb, 18);
766
767 p=isa_bus_to_virt(lp->base+rx_base);
6aa20a22 768
1da177e4
LT
769 p->control=0;
770 p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
771 p->status=0;
772 p->length=1532;
6aa20a22
JG
773
774 lp->rx_ring[i].p=p;
775 rx_base=p->next;
1da177e4
LT
776 }
777
778 lp->rx_ring[i-1].p->control |= CONTROL_EOL;
779
780 lp->rx_ring_tail=0;
781
782 return 0;
6aa20a22 783}
1da177e4
LT
784
785
786/**
787 * mc32_flush_rx_ring - free the ring of receive buffers
788 * @lp: Local data of 3c527 to flush the rx ring of
789 *
6aa20a22 790 * Free the buffer for each ring slot. This may be called
1da177e4
LT
791 * before mc32_load_rx_ring(), eg. on error in mc32_open().
792 * Requires rx skb pointers to point to a valid skb, or NULL.
793 */
794
795static void mc32_flush_rx_ring(struct net_device *dev)
796{
797 struct mc32_local *lp = netdev_priv(dev);
6aa20a22 798 int i;
1da177e4 799
6aa20a22
JG
800 for(i=0; i < RX_RING_LEN; i++)
801 {
1da177e4
LT
802 if (lp->rx_ring[i].skb) {
803 dev_kfree_skb(lp->rx_ring[i].skb);
804 lp->rx_ring[i].skb = NULL;
805 }
6aa20a22
JG
806 lp->rx_ring[i].p=NULL;
807 }
1da177e4
LT
808}
809
810
811/**
812 * mc32_load_tx_ring - load transmit ring
813 * @dev: The 3c527 card to issue the command to
814 *
6aa20a22 815 * This sets up the host transmit data-structures.
1da177e4
LT
816 *
817 * First, we obtain from the card it's current postion in the tx
818 * ring, so that we will know where to begin transmitting
819 * packets.
6aa20a22 820 *
1da177e4
LT
821 * Then, we read the 'next' pointers from the on-card tx ring into
822 * our tx_ring array to reduce slow shared-mem reads. Finally, we
823 * intitalise the tx house keeping variables.
6aa20a22
JG
824 *
825 */
1da177e4
LT
826
827static void mc32_load_tx_ring(struct net_device *dev)
6aa20a22 828{
1da177e4
LT
829 struct mc32_local *lp = netdev_priv(dev);
830 volatile struct skb_header *p;
6aa20a22 831 int i;
1da177e4
LT
832 u16 tx_base;
833
6aa20a22 834 tx_base=lp->tx_box->data[0];
1da177e4
LT
835
836 for(i=0 ; i<TX_RING_LEN ; i++)
837 {
838 p=isa_bus_to_virt(lp->base+tx_base);
6aa20a22 839 lp->tx_ring[i].p=p;
1da177e4
LT
840 lp->tx_ring[i].skb=NULL;
841
842 tx_base=p->next;
843 }
844
845 /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
846 /* see mc32_tx_ring */
847
6aa20a22
JG
848 atomic_set(&lp->tx_count, TX_RING_LEN-1);
849 atomic_set(&lp->tx_ring_head, 0);
850 lp->tx_ring_tail=0;
851}
1da177e4
LT
852
853
854/**
855 * mc32_flush_tx_ring - free transmit ring
856 * @lp: Local data of 3c527 to flush the tx ring of
857 *
858 * If the ring is non-empty, zip over the it, freeing any
859 * allocated skb_buffs. The tx ring house-keeping variables are
860 * then reset. Requires rx skb pointers to point to a valid skb,
861 * or NULL.
862 */
863
864static void mc32_flush_tx_ring(struct net_device *dev)
865{
866 struct mc32_local *lp = netdev_priv(dev);
867 int i;
868
869 for (i=0; i < TX_RING_LEN; i++)
870 {
871 if (lp->tx_ring[i].skb)
872 {
873 dev_kfree_skb(lp->tx_ring[i].skb);
874 lp->tx_ring[i].skb = NULL;
875 }
876 }
877
6aa20a22
JG
878 atomic_set(&lp->tx_count, 0);
879 atomic_set(&lp->tx_ring_head, 0);
1da177e4
LT
880 lp->tx_ring_tail=0;
881}
6aa20a22 882
1da177e4
LT
883
884/**
885 * mc32_open - handle 'up' of card
886 * @dev: device to open
887 *
888 * The user is trying to bring the card into ready state. This requires
889 * a brief dialogue with the card. Firstly we enable interrupts and then
890 * 'indications'. Without these enabled the card doesn't bother telling
891 * us what it has done. This had me puzzled for a week.
892 *
893 * We configure the number of card descriptors, then load the network
894 * address and multicast filters. Turn on the workaround mode. This
895 * works around a bug in the 82586 - it asks the firmware to do
896 * so. It has a performance (latency) hit but is needed on busy
897 * [read most] lans. We load the ring with buffers then we kick it
898 * all off.
899 */
900
901static int mc32_open(struct net_device *dev)
902{
903 int ioaddr = dev->base_addr;
904 struct mc32_local *lp = netdev_priv(dev);
905 u8 one=1;
906 u8 regs;
907 u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
908
909 /*
910 * Interrupts enabled
911 */
912
913 regs=inb(ioaddr+HOST_CTRL);
914 regs|=HOST_CTRL_INTE;
915 outb(regs, ioaddr+HOST_CTRL);
6aa20a22 916
1da177e4
LT
917 /*
918 * Allow ourselves to issue commands
919 */
920
921 up(&lp->cmd_mutex);
922
923
924 /*
925 * Send the indications on command
926 */
927
928 mc32_command(dev, 4, &one, 2);
929
930 /*
6aa20a22 931 * Poke it to make sure it's really dead.
1da177e4
LT
932 */
933
6aa20a22
JG
934 mc32_halt_transceiver(dev);
935 mc32_flush_tx_ring(dev);
1da177e4 936
6aa20a22
JG
937 /*
938 * Ask card to set up on-card descriptors to our spec
939 */
1da177e4 940
6aa20a22 941 if(mc32_command(dev, 8, descnumbuffs, 4)) {
1da177e4
LT
942 printk("%s: %s rejected our buffer configuration!\n",
943 dev->name, cardname);
6aa20a22
JG
944 mc32_close(dev);
945 return -ENOBUFS;
1da177e4 946 }
6aa20a22
JG
947
948 /* Report new configuration */
949 mc32_command(dev, 6, NULL, 0);
1da177e4
LT
950
951 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
952 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
6aa20a22 953 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
1da177e4 954 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
6aa20a22 955
1da177e4
LT
956 /* Set Network Address */
957 mc32_command(dev, 1, dev->dev_addr, 6);
6aa20a22 958
1da177e4
LT
959 /* Set the filters */
960 mc32_set_multicast_list(dev);
6aa20a22
JG
961
962 if (WORKAROUND_82586) {
1da177e4
LT
963 u16 zero_word=0;
964 mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */
965 }
966
967 mc32_load_tx_ring(dev);
6aa20a22
JG
968
969 if(mc32_load_rx_ring(dev))
1da177e4
LT
970 {
971 mc32_close(dev);
972 return -ENOBUFS;
973 }
974
975 lp->xceiver_desired_state = RUNNING;
6aa20a22 976
1da177e4
LT
977 /* And finally, set the ball rolling... */
978 mc32_start_transceiver(dev);
979
980 netif_start_queue(dev);
981
982 return 0;
983}
984
985
986/**
987 * mc32_timeout - handle a timeout from the network layer
988 * @dev: 3c527 that timed out
989 *
990 * Handle a timeout on transmit from the 3c527. This normally means
991 * bad things as the hardware handles cable timeouts and mess for
992 * us.
993 *
994 */
995
996static void mc32_timeout(struct net_device *dev)
997{
998 printk(KERN_WARNING "%s: transmit timed out?\n", dev->name);
999 /* Try to restart the adaptor. */
1000 netif_wake_queue(dev);
1001}
1002
1003
1004/**
1005 * mc32_send_packet - queue a frame for transmit
1006 * @skb: buffer to transmit
1007 * @dev: 3c527 to send it out of
1008 *
1009 * Transmit a buffer. This normally means throwing the buffer onto
1010 * the transmit queue as the queue is quite large. If the queue is
1011 * full then we set tx_busy and return. Once the interrupt handler
1012 * gets messages telling it to reclaim transmit queue entries, we will
1013 * clear tx_busy and the kernel will start calling this again.
1014 *
1015 * We do not disable interrupts or acquire any locks; this can
1016 * run concurrently with mc32_tx_ring(), and the function itself
1017 * is serialised at a higher layer. However, similarly for the
1018 * card itself, we must ensure that we update tx_ring_head only
1019 * after we've established a valid packet on the tx ring (and
1020 * before we let the card "see" it, to prevent it racing with the
1021 * irq handler).
6aa20a22 1022 *
1da177e4
LT
1023 */
1024
1025static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
1026{
1027 struct mc32_local *lp = netdev_priv(dev);
1028 u32 head = atomic_read(&lp->tx_ring_head);
6aa20a22 1029
1da177e4
LT
1030 volatile struct skb_header *p, *np;
1031
1032 netif_stop_queue(dev);
1033
1034 if(atomic_read(&lp->tx_count)==0) {
1035 return 1;
1036 }
1037
5b057c6b 1038 if (skb_padto(skb, ETH_ZLEN)) {
1da177e4
LT
1039 netif_wake_queue(dev);
1040 return 0;
1041 }
1042
6aa20a22 1043 atomic_dec(&lp->tx_count);
1da177e4
LT
1044
1045 /* P is the last sending/sent buffer as a pointer */
1046 p=lp->tx_ring[head].p;
6aa20a22 1047
1da177e4
LT
1048 head = next_tx(head);
1049
1050 /* NP is the buffer we will be loading */
6aa20a22
JG
1051 np=lp->tx_ring[head].p;
1052
1da177e4
LT
1053 /* We will need this to flush the buffer out */
1054 lp->tx_ring[head].skb=skb;
1055
6aa20a22 1056 np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
1da177e4
LT
1057 np->data = isa_virt_to_bus(skb->data);
1058 np->status = 0;
6aa20a22 1059 np->control = CONTROL_EOP | CONTROL_EOL;
1da177e4 1060 wmb();
6aa20a22 1061
1da177e4
LT
1062 /*
1063 * The new frame has been setup; we can now
1064 * let the interrupt handler and card "see" it
1065 */
1066
6aa20a22 1067 atomic_set(&lp->tx_ring_head, head);
1da177e4
LT
1068 p->control &= ~CONTROL_EOL;
1069
1070 netif_wake_queue(dev);
1071 return 0;
1072}
1073
1074
1075/**
1076 * mc32_update_stats - pull off the on board statistics
1077 * @dev: 3c527 to service
1078 *
6aa20a22 1079 *
1da177e4
LT
1080 * Query and reset the on-card stats. There's the small possibility
1081 * of a race here, which would result in an underestimation of
1082 * actual errors. As such, we'd prefer to keep all our stats
1083 * collection in software. As a rule, we do. However it can't be
1084 * used for rx errors and collisions as, by default, the card discards
6aa20a22 1085 * bad rx packets.
1da177e4
LT
1086 *
1087 * Setting the SAV BP in the rx filter command supposedly
1088 * stops this behaviour. However, testing shows that it only seems to
1089 * enable the collation of on-card rx statistics --- the driver
1090 * never sees an RX descriptor with an error status set.
1091 *
1092 */
1093
1094static void mc32_update_stats(struct net_device *dev)
1095{
1096 struct mc32_local *lp = netdev_priv(dev);
6aa20a22 1097 volatile struct mc32_stats *st = lp->stats;
1da177e4 1098
6aa20a22
JG
1099 u32 rx_errors=0;
1100
4711c841 1101 rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors;
1da177e4 1102 st->rx_crc_errors=0;
4711c841 1103 rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors;
6aa20a22 1104 st->rx_overrun_errors=0;
4711c841 1105 rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors;
1da177e4 1106 st->rx_alignment_errors=0;
4711c841 1107 rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors;
1da177e4 1108 st->rx_tooshort_errors=0;
4711c841 1109 rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors;
6aa20a22 1110 st->rx_outofresource_errors=0;
4711c841 1111 dev->stats.rx_errors=rx_errors;
6aa20a22 1112
1da177e4 1113 /* Number of packets which saw one collision */
4711c841 1114 dev->stats.collisions+=st->dataC[10];
6aa20a22 1115 st->dataC[10]=0;
1da177e4 1116
6aa20a22 1117 /* Number of packets which saw 2--15 collisions */
4711c841 1118 dev->stats.collisions+=st->dataC[11];
6aa20a22
JG
1119 st->dataC[11]=0;
1120}
1da177e4
LT
1121
1122
1123/**
1124 * mc32_rx_ring - process the receive ring
1125 * @dev: 3c527 that needs its receive ring processing
1126 *
1127 *
1128 * We have received one or more indications from the card that a
1129 * receive has completed. The buffer ring thus contains dirty
1130 * entries. We walk the ring by iterating over the circular rx_ring
1131 * array, starting at the next dirty buffer (which happens to be the
1132 * one we finished up at last time around).
1133 *
1134 * For each completed packet, we will either copy it and pass it up
1135 * the stack or, if the packet is near MTU sized, we allocate
1136 * another buffer and flip the old one up the stack.
6aa20a22 1137 *
1da177e4
LT
1138 * We must succeed in keeping a buffer on the ring. If necessary we
1139 * will toss a received packet rather than lose a ring entry. Once
1140 * the first uncompleted descriptor is found, we move the
1141 * End-Of-List bit to include the buffers just processed.
1142 *
1143 */
1144
1145static void mc32_rx_ring(struct net_device *dev)
1146{
1147 struct mc32_local *lp = netdev_priv(dev);
1148 volatile struct skb_header *p;
1149 u16 rx_ring_tail;
1150 u16 rx_old_tail;
1151 int x=0;
1152
1153 rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
6aa20a22 1154
1da177e4 1155 do
6aa20a22
JG
1156 {
1157 p=lp->rx_ring[rx_ring_tail].p;
1da177e4 1158
6aa20a22 1159 if(!(p->status & (1<<7))) { /* Not COMPLETED */
1da177e4 1160 break;
6aa20a22 1161 }
1da177e4 1162 if(p->status & (1<<6)) /* COMPLETED_OK */
6aa20a22 1163 {
1da177e4
LT
1164
1165 u16 length=p->length;
6aa20a22
JG
1166 struct sk_buff *skb;
1167 struct sk_buff *newskb;
1da177e4
LT
1168
1169 /* Try to save time by avoiding a copy on big frames */
1170
6aa20a22
JG
1171 if ((length > RX_COPYBREAK)
1172 && ((newskb=dev_alloc_skb(1532)) != NULL))
1173 {
1da177e4
LT
1174 skb=lp->rx_ring[rx_ring_tail].skb;
1175 skb_put(skb, length);
6aa20a22
JG
1176
1177 skb_reserve(newskb,18);
1178 lp->rx_ring[rx_ring_tail].skb=newskb;
1179 p->data=isa_virt_to_bus(newskb->data);
1180 }
1181 else
1da177e4 1182 {
6aa20a22 1183 skb=dev_alloc_skb(length+2);
1da177e4
LT
1184
1185 if(skb==NULL) {
4711c841 1186 dev->stats.rx_dropped++;
6aa20a22 1187 goto dropped;
1da177e4
LT
1188 }
1189
1190 skb_reserve(skb,2);
1191 memcpy(skb_put(skb, length),
1192 lp->rx_ring[rx_ring_tail].skb->data, length);
1193 }
6aa20a22
JG
1194
1195 skb->protocol=eth_type_trans(skb,dev);
4711c841
PZ
1196 dev->stats.rx_packets++;
1197 dev->stats.rx_bytes += length;
1da177e4
LT
1198 netif_rx(skb);
1199 }
1200
1201 dropped:
6aa20a22 1202 p->length = 1532;
1da177e4 1203 p->status = 0;
6aa20a22
JG
1204
1205 rx_ring_tail=next_rx(rx_ring_tail);
1da177e4 1206 }
6aa20a22 1207 while(x++<48);
1da177e4 1208
6aa20a22
JG
1209 /* If there was actually a frame to be processed, place the EOL bit */
1210 /* at the descriptor prior to the one to be filled next */
1da177e4 1211
6aa20a22
JG
1212 if (rx_ring_tail != rx_old_tail)
1213 {
1214 lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
1215 lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
1da177e4 1216
6aa20a22 1217 lp->rx_ring_tail=rx_ring_tail;
1da177e4
LT
1218 }
1219}
1220
1221
1222/**
1223 * mc32_tx_ring - process completed transmits
1224 * @dev: 3c527 that needs its transmit ring processing
1225 *
1226 *
1227 * This operates in a similar fashion to mc32_rx_ring. We iterate
1228 * over the transmit ring. For each descriptor which has been
1229 * processed by the card, we free its associated buffer and note
1230 * any errors. This continues until the transmit ring is emptied
1231 * or we reach a descriptor that hasn't yet been processed by the
1232 * card.
6aa20a22 1233 *
1da177e4
LT
1234 */
1235
6aa20a22 1236static void mc32_tx_ring(struct net_device *dev)
1da177e4
LT
1237{
1238 struct mc32_local *lp = netdev_priv(dev);
1239 volatile struct skb_header *np;
1240
1241 /*
1242 * We rely on head==tail to mean 'queue empty'.
1243 * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
1244 * tx_ring_head wrapping to tail and confusing a 'queue empty'
1245 * condition with 'queue full'
1246 */
1247
6aa20a22
JG
1248 while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
1249 {
1250 u16 t;
1da177e4 1251
6aa20a22
JG
1252 t=next_tx(lp->tx_ring_tail);
1253 np=lp->tx_ring[t].p;
1da177e4 1254
6aa20a22 1255 if(!(np->status & (1<<7)))
1da177e4 1256 {
6aa20a22
JG
1257 /* Not COMPLETED */
1258 break;
1259 }
4711c841 1260 dev->stats.tx_packets++;
1da177e4
LT
1261 if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
1262 {
4711c841 1263 dev->stats.tx_errors++;
1da177e4
LT
1264
1265 switch(np->status&0x0F)
1266 {
1267 case 1:
4711c841 1268 dev->stats.tx_aborted_errors++;
6aa20a22 1269 break; /* Max collisions */
1da177e4 1270 case 2:
4711c841 1271 dev->stats.tx_fifo_errors++;
1da177e4
LT
1272 break;
1273 case 3:
4711c841 1274 dev->stats.tx_carrier_errors++;
1da177e4
LT
1275 break;
1276 case 4:
4711c841 1277 dev->stats.tx_window_errors++;
6aa20a22 1278 break; /* CTS Lost */
1da177e4 1279 case 5:
4711c841 1280 dev->stats.tx_aborted_errors++;
6aa20a22 1281 break; /* Transmit timeout */
1da177e4
LT
1282 }
1283 }
1284 /* Packets are sent in order - this is
1285 basically a FIFO queue of buffers matching
1286 the card ring */
4711c841 1287 dev->stats.tx_bytes+=lp->tx_ring[t].skb->len;
1da177e4
LT
1288 dev_kfree_skb_irq(lp->tx_ring[t].skb);
1289 lp->tx_ring[t].skb=NULL;
1290 atomic_inc(&lp->tx_count);
1291 netif_wake_queue(dev);
1292
6aa20a22 1293 lp->tx_ring_tail=t;
1da177e4
LT
1294 }
1295
6aa20a22 1296}
1da177e4
LT
1297
1298
1299/**
1300 * mc32_interrupt - handle an interrupt from a 3c527
1301 * @irq: Interrupt number
1302 * @dev_id: 3c527 that requires servicing
1303 * @regs: Registers (unused)
1304 *
1305 *
1306 * An interrupt is raised whenever the 3c527 writes to the command
1307 * register. This register contains the message it wishes to send us
1308 * packed into a single byte field. We keep reading status entries
1309 * until we have processed all the control items, but simply count
1310 * transmit and receive reports. When all reports are in we empty the
1311 * transceiver rings as appropriate. This saves the overhead of
1312 * multiple command requests.
1313 *
1314 * Because MCA is level-triggered, we shouldn't miss indications.
1315 * Therefore, we needn't ask the card to suspend interrupts within
1316 * this handler. The card receives an implicit acknowledgment of the
1317 * current interrupt when we read the command register.
1318 *
1319 */
1320
7d12e780 1321static irqreturn_t mc32_interrupt(int irq, void *dev_id)
1da177e4
LT
1322{
1323 struct net_device *dev = dev_id;
1324 struct mc32_local *lp;
1325 int ioaddr, status, boguscount = 0;
1326 int rx_event = 0;
6aa20a22
JG
1327 int tx_event = 0;
1328
1da177e4
LT
1329 ioaddr = dev->base_addr;
1330 lp = netdev_priv(dev);
1331
1332 /* See whats cooking */
1333
1334 while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
1335 {
1336 status=inb(ioaddr+HOST_CMD);
1337
6aa20a22 1338#ifdef DEBUG_IRQ
1da177e4
LT
1339 printk("Status TX%d RX%d EX%d OV%d BC%d\n",
1340 (status&7), (status>>3)&7, (status>>6)&1,
1341 (status>>7)&1, boguscount);
1342#endif
6aa20a22 1343
1da177e4
LT
1344 switch(status&7)
1345 {
1346 case 0:
1347 break;
1348 case 6: /* TX fail */
1349 case 2: /* TX ok */
6aa20a22 1350 tx_event = 1;
1da177e4
LT
1351 break;
1352 case 3: /* Halt */
1353 case 4: /* Abort */
1354 complete(&lp->xceiver_cmd);
1355 break;
1356 default:
1357 printk("%s: strange tx ack %d\n", dev->name, status&7);
1358 }
1359 status>>=3;
1360 switch(status&7)
1361 {
1362 case 0:
1363 break;
1364 case 2: /* RX */
6aa20a22 1365 rx_event=1;
1da177e4
LT
1366 break;
1367 case 3: /* Halt */
1368 case 4: /* Abort */
1369 complete(&lp->xceiver_cmd);
1370 break;
1371 case 6:
1372 /* Out of RX buffers stat */
1373 /* Must restart rx */
4711c841 1374 dev->stats.rx_dropped++;
6aa20a22
JG
1375 mc32_rx_ring(dev);
1376 mc32_start_transceiver(dev);
1da177e4
LT
1377 break;
1378 default:
6aa20a22
JG
1379 printk("%s: strange rx ack %d\n",
1380 dev->name, status&7);
1da177e4
LT
1381 }
1382 status>>=3;
1383 if(status&1)
1384 {
1385 /*
1386 * No thread is waiting: we need to tidy
1387 * up ourself.
1388 */
6aa20a22 1389
1da177e4
LT
1390 if (lp->cmd_nonblocking) {
1391 up(&lp->cmd_mutex);
6aa20a22 1392 if (lp->mc_reload_wait)
1da177e4
LT
1393 mc32_reset_multicast_list(dev);
1394 }
1395 else complete(&lp->execution_cmd);
1396 }
1397 if(status&2)
1398 {
1399 /*
1400 * We get interrupted once per
6aa20a22 1401 * counter that is about to overflow.
1da177e4
LT
1402 */
1403
6aa20a22 1404 mc32_update_stats(dev);
1da177e4
LT
1405 }
1406 }
1407
1408
1409 /*
6aa20a22 1410 * Process the transmit and receive rings
1da177e4
LT
1411 */
1412
6aa20a22 1413 if(tx_event)
1da177e4 1414 mc32_tx_ring(dev);
6aa20a22
JG
1415
1416 if(rx_event)
1da177e4
LT
1417 mc32_rx_ring(dev);
1418
1419 return IRQ_HANDLED;
1420}
1421
1422
1423/**
1424 * mc32_close - user configuring the 3c527 down
1425 * @dev: 3c527 card to shut down
1426 *
1427 * The 3c527 is a bus mastering device. We must be careful how we
1428 * shut it down. It may also be running shared interrupt so we have
1429 * to be sure to silence it properly
1430 *
1431 * We indicate that the card is closing to the rest of the
1432 * driver. Otherwise, it is possible that the card may run out
1433 * of receive buffers and restart the transceiver while we're
1434 * trying to close it.
6aa20a22 1435 *
1da177e4
LT
1436 * We abort any receive and transmits going on and then wait until
1437 * any pending exec commands have completed in other code threads.
1438 * In theory we can't get here while that is true, in practice I am
1439 * paranoid
1440 *
1441 * We turn off the interrupt enable for the board to be sure it can't
1442 * intefere with other devices.
1443 */
1444
1445static int mc32_close(struct net_device *dev)
1446{
1447 struct mc32_local *lp = netdev_priv(dev);
1448 int ioaddr = dev->base_addr;
1449
1450 u8 regs;
1451 u16 one=1;
6aa20a22 1452
1da177e4
LT
1453 lp->xceiver_desired_state = HALTED;
1454 netif_stop_queue(dev);
1455
1456 /*
1457 * Send the indications on command (handy debug check)
1458 */
1459
1460 mc32_command(dev, 4, &one, 2);
1461
1462 /* Shut down the transceiver */
1463
6aa20a22
JG
1464 mc32_halt_transceiver(dev);
1465
1da177e4
LT
1466 /* Ensure we issue no more commands beyond this point */
1467
1468 down(&lp->cmd_mutex);
6aa20a22
JG
1469
1470 /* Ok the card is now stopping */
1471
1da177e4
LT
1472 regs=inb(ioaddr+HOST_CTRL);
1473 regs&=~HOST_CTRL_INTE;
1474 outb(regs, ioaddr+HOST_CTRL);
1475
1476 mc32_flush_rx_ring(dev);
1477 mc32_flush_tx_ring(dev);
6aa20a22
JG
1478
1479 mc32_update_stats(dev);
1da177e4
LT
1480
1481 return 0;
1482}
1483
1484
1485/**
1486 * mc32_get_stats - hand back stats to network layer
1487 * @dev: The 3c527 card to handle
1488 *
1489 * We've collected all the stats we can in software already. Now
6aa20a22
JG
1490 * it's time to update those kept on-card and return the lot.
1491 *
1da177e4
LT
1492 */
1493
1494static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1495{
6aa20a22 1496 mc32_update_stats(dev);
4711c841 1497 return &dev->stats;
1da177e4
LT
1498}
1499
1500
1501/**
1502 * do_mc32_set_multicast_list - attempt to update multicasts
1503 * @dev: 3c527 device to load the list on
6aa20a22 1504 * @retry: indicates this is not the first call.
1da177e4
LT
1505 *
1506 *
1507 * Actually set or clear the multicast filter for this adaptor. The
1508 * locking issues are handled by this routine. We have to track
1509 * state as it may take multiple calls to get the command sequence
1510 * completed. We just keep trying to schedule the loads until we
1511 * manage to process them all.
6aa20a22 1512 *
1da177e4 1513 * num_addrs == -1 Promiscuous mode, receive all packets
6aa20a22 1514 *
1da177e4 1515 * num_addrs == 0 Normal mode, clear multicast list
1da177e4 1516 *
6aa20a22
JG
1517 * num_addrs > 0 Multicast mode, receive normal and MC packets,
1518 * and do best-effort filtering.
1519 *
1520 * See mc32_update_stats() regards setting the SAV BP bit.
1da177e4
LT
1521 *
1522 */
1523
1524static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1525{
1526 struct mc32_local *lp = netdev_priv(dev);
6aa20a22 1527 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
1da177e4 1528
c16d1185
WC
1529 if ((dev->flags&IFF_PROMISC) ||
1530 (dev->flags&IFF_ALLMULTI) ||
1531 dev->mc_count > 10)
1da177e4
LT
1532 /* Enable promiscuous mode */
1533 filt |= 1;
1da177e4
LT
1534 else if(dev->mc_count)
1535 {
1536 unsigned char block[62];
1537 unsigned char *bp;
1538 struct dev_mc_list *dmc=dev->mc_list;
6aa20a22 1539
1da177e4 1540 int i;
6aa20a22 1541
1da177e4
LT
1542 if(retry==0)
1543 lp->mc_list_valid = 0;
1544 if(!lp->mc_list_valid)
1545 {
1546 block[1]=0;
1547 block[0]=dev->mc_count;
1548 bp=block+2;
6aa20a22 1549
1da177e4
LT
1550 for(i=0;i<dev->mc_count;i++)
1551 {
1552 memcpy(bp, dmc->dmi_addr, 6);
1553 bp+=6;
1554 dmc=dmc->next;
1555 }
1556 if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
1557 {
1558 lp->mc_reload_wait = 1;
1559 return;
1560 }
1561 lp->mc_list_valid=1;
1562 }
1563 }
6aa20a22
JG
1564
1565 if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
1da177e4
LT
1566 {
1567 lp->mc_reload_wait = 1;
6aa20a22
JG
1568 }
1569 else {
1da177e4
LT
1570 lp->mc_reload_wait = 0;
1571 }
1572}
1573
1574
1575/**
1576 * mc32_set_multicast_list - queue multicast list update
1577 * @dev: The 3c527 to use
1578 *
1579 * Commence loading the multicast list. This is called when the kernel
1580 * changes the lists. It will override any pending list we are trying to
1581 * load.
1582 */
1583
1584static void mc32_set_multicast_list(struct net_device *dev)
1585{
1586 do_mc32_set_multicast_list(dev,0);
1587}
1588
1589
1590/**
1591 * mc32_reset_multicast_list - reset multicast list
1592 * @dev: The 3c527 to use
1593 *
1594 * Attempt the next step in loading the multicast lists. If this attempt
1595 * fails to complete then it will be scheduled and this function called
1596 * again later from elsewhere.
1597 */
1598
1599static void mc32_reset_multicast_list(struct net_device *dev)
1600{
1601 do_mc32_set_multicast_list(dev,1);
1602}
1603
1604static void netdev_get_drvinfo(struct net_device *dev,
1605 struct ethtool_drvinfo *info)
1606{
1607 strcpy(info->driver, DRV_NAME);
1608 strcpy(info->version, DRV_VERSION);
1609 sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
1610}
1611
1612static u32 netdev_get_msglevel(struct net_device *dev)
1613{
1614 return mc32_debug;
1615}
1616
1617static void netdev_set_msglevel(struct net_device *dev, u32 level)
1618{
1619 mc32_debug = level;
1620}
1621
7282d491 1622static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
1623 .get_drvinfo = netdev_get_drvinfo,
1624 .get_msglevel = netdev_get_msglevel,
1625 .set_msglevel = netdev_set_msglevel,
1626};
1627
1628#ifdef MODULE
1629
1630static struct net_device *this_device;
1631
1632/**
1633 * init_module - entry point
1634 *
1635 * Probe and locate a 3c527 card. This really should probe and locate
1636 * all the 3c527 cards in the machine not just one of them. Yes you can
1637 * insmod multiple modules for now but it's a hack.
1638 */
1639
96e672c7 1640int __init init_module(void)
1da177e4
LT
1641{
1642 this_device = mc32_probe(-1);
1643 if (IS_ERR(this_device))
1644 return PTR_ERR(this_device);
1645 return 0;
1646}
1647
1648/**
1649 * cleanup_module - free resources for an unload
1650 *
1651 * Unloading time. We release the MCA bus resources and the interrupt
1652 * at which point everything is ready to unload. The card must be stopped
1653 * at this point or we would not have been called. When we unload we
1654 * leave the card stopped but not totally shut down. When the card is
1655 * initialized it must be rebooted or the rings reloaded before any
1656 * transmit operations are allowed to start scribbling into memory.
1657 */
1658
afc8eb46 1659void __exit cleanup_module(void)
1da177e4
LT
1660{
1661 unregister_netdev(this_device);
1662 cleanup_card(this_device);
1663 free_netdev(this_device);
1664}
1665
1666#endif /* MODULE */