]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/tokenring/3c359.c
net: convert multicast list to list_head
[net-next-2.6.git] / drivers / net / tokenring / 3c359.c
CommitLineData
1da177e4
LT
1/*
2 * 3c359.c (c) 2000 Mike Phillips (mikep@linuxtr.net) All Rights Reserved
3 *
4 * Linux driver for 3Com 3c359 Tokenlink Velocity XL PCI NIC
5 *
6 * Base Driver Olympic:
7 * Written 1999 Peter De Schrijver & Mike Phillips
8 *
9 * This software may be used and distributed according to the terms
10 * of the GNU General Public License, incorporated herein by reference.
11 *
12 * 7/17/00 - Clean up, version number 0.9.0. Ready to release to the world.
13 *
14 * 2/16/01 - Port up to kernel 2.4.2 ready for submission into the kernel.
15 * 3/05/01 - Last clean up stuff before submission.
16 * 2/15/01 - Finally, update to new pci api.
17 *
18 * To Do:
19 */
20
21/*
22 * Technical Card Details
23 *
24 * All access to data is done with 16/8 bit transfers. The transfer
25 * method really sucks. You can only read or write one location at a time.
26 *
27 * Also, the microcode for the card must be uploaded if the card does not have
28 * the flashrom on board. This is a 28K bloat in the driver when compiled
29 * as a module.
30 *
31 * Rx is very simple, status into a ring of descriptors, dma data transfer,
32 * interrupts to tell us when a packet is received.
33 *
34 * Tx is a little more interesting. Similar scenario, descriptor and dma data
35 * transfers, but we don't have to interrupt the card to tell it another packet
36 * is ready for transmission, we are just doing simple memory writes, not io or mmio
37 * writes. The card can be set up to simply poll on the next
38 * descriptor pointer and when this value is non-zero will automatically download
39 * the next packet. The card then interrupts us when the packet is done.
40 *
41 */
42
43#define XL_DEBUG 0
44
b7aa6909 45#include <linux/jiffies.h>
1da177e4
LT
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/errno.h>
49#include <linux/timer.h>
50#include <linux/in.h>
51#include <linux/ioport.h>
52#include <linux/string.h>
53#include <linux/proc_fs.h>
54#include <linux/ptrace.h>
55#include <linux/skbuff.h>
56#include <linux/interrupt.h>
57#include <linux/delay.h>
58#include <linux/netdevice.h>
59#include <linux/trdevice.h>
60#include <linux/stddef.h>
61#include <linux/init.h>
62#include <linux/pci.h>
63#include <linux/spinlock.h>
64#include <linux/bitops.h>
4b6ece97 65#include <linux/firmware.h>
1da177e4
LT
66
67#include <net/checksum.h>
68
69#include <asm/io.h>
70#include <asm/system.h>
71
72#include "3c359.h"
73
74static char version[] __devinitdata =
75"3c359.c v1.2.0 2/17/01 - Mike Phillips (mikep@linuxtr.net)" ;
76
4b6ece97 77#define FW_NAME "3com/3C359.bin"
1da177e4 78MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
014e4668 79MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver\n") ;
4b6ece97 80MODULE_FIRMWARE(FW_NAME);
1da177e4 81
98a1708d 82/* Module parameters */
1da177e4
LT
83
84/* Ring Speed 0,4,16
85 * 0 = Autosense
86 * 4,16 = Selected speed only, no autosense
87 * This allows the card to be the first on the ring
88 * and become the active monitor.
89 *
90 * WARNING: Some hubs will allow you to insert
91 * at the wrong speed.
92 *
93 * The adapter will _not_ fail to open if there are no
94 * active monitors on the ring, it will simply open up in
95 * its last known ringspeed if no ringspeed is specified.
96 */
97
98static int ringspeed[XL_MAX_ADAPTERS] = {0,} ;
99
100module_param_array(ringspeed, int, NULL, 0);
61a2d07d 101MODULE_PARM_DESC(ringspeed,"3c359: Ringspeed selection - 4,16 or 0") ;
1da177e4
LT
102
103/* Packet buffer size */
104
105static int pkt_buf_sz[XL_MAX_ADAPTERS] = {0,} ;
106
107module_param_array(pkt_buf_sz, int, NULL, 0) ;
61a2d07d 108MODULE_PARM_DESC(pkt_buf_sz,"3c359: Initial buffer size") ;
1da177e4
LT
109/* Message Level */
110
61a2d07d 111static int message_level[XL_MAX_ADAPTERS] = {0,} ;
1da177e4
LT
112
113module_param_array(message_level, int, NULL, 0) ;
61a2d07d 114MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
1da177e4
LT
115/*
116 * This is a real nasty way of doing this, but otherwise you
117 * will be stuck with 1555 lines of hex #'s in the code.
118 */
119
a3aa1884 120static DEFINE_PCI_DEVICE_TABLE(xl_pci_tbl) =
1da177e4
LT
121{
122 {PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, },
123 { } /* terminate list */
124};
125MODULE_DEVICE_TABLE(pci,xl_pci_tbl) ;
126
127static int xl_init(struct net_device *dev);
128static int xl_open(struct net_device *dev);
129static int xl_open_hw(struct net_device *dev) ;
130static int xl_hw_reset(struct net_device *dev);
61a84108 131static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev);
1da177e4
LT
132static void xl_dn_comp(struct net_device *dev);
133static int xl_close(struct net_device *dev);
134static void xl_set_rx_mode(struct net_device *dev);
7d12e780 135static irqreturn_t xl_interrupt(int irq, void *dev_id);
1da177e4
LT
136static int xl_set_mac_address(struct net_device *dev, void *addr) ;
137static void xl_arb_cmd(struct net_device *dev);
138static void xl_asb_cmd(struct net_device *dev) ;
139static void xl_srb_cmd(struct net_device *dev, int srb_cmd) ;
140static void xl_wait_misr_flags(struct net_device *dev) ;
141static int xl_change_mtu(struct net_device *dev, int mtu);
142static void xl_srb_bh(struct net_device *dev) ;
143static void xl_asb_bh(struct net_device *dev) ;
144static void xl_reset(struct net_device *dev) ;
145static void xl_freemem(struct net_device *dev) ;
146
147
148/* EEProm Access Functions */
149static u16 xl_ee_read(struct net_device *dev, int ee_addr) ;
150static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value) ;
151
152/* Debugging functions */
153#if XL_DEBUG
154static void print_tx_state(struct net_device *dev) ;
155static void print_rx_state(struct net_device *dev) ;
156
157static void print_tx_state(struct net_device *dev)
158{
159
eda10531 160 struct xl_private *xl_priv = netdev_priv(dev);
1da177e4
LT
161 struct xl_tx_desc *txd ;
162 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
163 int i ;
164
014e4668 165 printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d\n",xl_priv->tx_ring_head,
1da177e4 166 xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ;
014e4668 167 printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len\n");
1da177e4
LT
168 for (i = 0; i < 16; i++) {
169 txd = &(xl_priv->xl_tx_ring[i]) ;
014e4668 170 printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(txd),
1da177e4
LT
171 txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ;
172 }
173
014e4668 174 printk("DNLISTPTR = %04x\n", readl(xl_mmio + MMIO_DNLISTPTR) );
1da177e4 175
014e4668
FP
176 printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL) );
177 printk("Queue status = %0x\n",netif_running(dev) ) ;
1da177e4
LT
178}
179
180static void print_rx_state(struct net_device *dev)
181{
182
eda10531 183 struct xl_private *xl_priv = netdev_priv(dev);
1da177e4
LT
184 struct xl_rx_desc *rxd ;
185 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
186 int i ;
187
014e4668
FP
188 printk("rx_ring_tail: %d\n", xl_priv->rx_ring_tail);
189 printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len\n");
1da177e4
LT
190 for (i = 0; i < 16; i++) {
191 /* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */
192 rxd = &(xl_priv->xl_rx_ring[i]) ;
014e4668 193 printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(rxd),
1da177e4
LT
194 rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ;
195 }
196
014e4668 197 printk("UPLISTPTR = %04x\n", readl(xl_mmio + MMIO_UPLISTPTR));
1da177e4 198
014e4668
FP
199 printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL));
200 printk("Queue status = %0x\n",netif_running(dev));
1da177e4
LT
201}
202#endif
203
204/*
205 * Read values from the on-board EEProm. This looks very strange
206 * but you have to wait for the EEProm to get/set the value before
207 * passing/getting the next value from the nic. As with all requests
208 * on this nic it has to be done in two stages, a) tell the nic which
209 * memory address you want to access and b) pass/get the value from the nic.
210 * With the EEProm, you have to wait before and inbetween access a) and b).
211 * As this is only read at initialization time and the wait period is very
212 * small we shouldn't have to worry about scheduling issues.
213 */
214
215static u16 xl_ee_read(struct net_device *dev, int ee_addr)
216{
eda10531 217 struct xl_private *xl_priv = netdev_priv(dev);
1da177e4
LT
218 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
219
220 /* Wait for EEProm to not be busy */
221 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
222 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
223
224 /* Tell EEProm what we want to do and where */
225 writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
226 writew(EEREAD + ee_addr, xl_mmio + MMIO_MACDATA) ;
227
228 /* Wait for EEProm to not be busy */
229 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
230 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
231
232 /* Tell EEProm what we want to do and where */
233 writel(IO_WORD_WRITE | EECONTROL , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
234 writew(EEREAD + ee_addr, xl_mmio + MMIO_MACDATA) ;
235
236 /* Finally read the value from the EEProm */
237 writel(IO_WORD_READ | EEDATA , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
238 return readw(xl_mmio + MMIO_MACDATA) ;
239}
240
241/*
242 * Write values to the onboard eeprom. As with eeprom read you need to
243 * set which location to write, wait, value to write, wait, with the
244 * added twist of having to enable eeprom writes as well.
245 */
246
247static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value)
248{
eda10531 249 struct xl_private *xl_priv = netdev_priv(dev);
1da177e4
LT
250 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
251
252 /* Wait for EEProm to not be busy */
253 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
254 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
255
256 /* Enable write/erase */
257 writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
258 writew(EE_ENABLE_WRITE, xl_mmio + MMIO_MACDATA) ;
259
260 /* Wait for EEProm to not be busy */
261 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
262 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
263
264 /* Put the value we want to write into EEDATA */
265 writel(IO_WORD_WRITE | EEDATA, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
266 writew(ee_value, xl_mmio + MMIO_MACDATA) ;
267
268 /* Tell EEProm to write eevalue into ee_addr */
269 writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
270 writew(EEWRITE + ee_addr, xl_mmio + MMIO_MACDATA) ;
271
272 /* Wait for EEProm to not be busy, to ensure write gets done */
273 writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
274 while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
275
276 return ;
277}
69d65169
SH
278
279static const struct net_device_ops xl_netdev_ops = {
280 .ndo_open = xl_open,
281 .ndo_stop = xl_close,
282 .ndo_start_xmit = xl_xmit,
283 .ndo_change_mtu = xl_change_mtu,
284 .ndo_set_multicast_list = xl_set_rx_mode,
285 .ndo_set_mac_address = xl_set_mac_address,
286};
1da177e4 287
de70b4c8
AB
288static int __devinit xl_probe(struct pci_dev *pdev,
289 const struct pci_device_id *ent)
1da177e4
LT
290{
291 struct net_device *dev ;
292 struct xl_private *xl_priv ;
293 static int card_no = -1 ;
294 int i ;
295
296 card_no++ ;
297
298 if (pci_enable_device(pdev)) {
299 return -ENODEV ;
300 }
301
302 pci_set_master(pdev);
303
304 if ((i = pci_request_regions(pdev,"3c359"))) {
305 return i ;
306 } ;
307
308 /*
b74ca3a8
WC
309 * Allowing init_trdev to allocate the private data will align
310 * xl_private on a 32 bytes boundary which we need for the rx/tx
311 * descriptors
1da177e4
LT
312 */
313
314 dev = alloc_trdev(sizeof(struct xl_private)) ;
315 if (!dev) {
316 pci_release_regions(pdev) ;
317 return -ENOMEM ;
318 }
eda10531 319 xl_priv = netdev_priv(dev);
1da177e4
LT
320
321#if XL_DEBUG
322 printk("pci_device: %p, dev:%p, dev->priv: %p, ba[0]: %10x, ba[1]:%10x\n",
eda10531 323 pdev, dev, netdev_priv(dev), (unsigned int)pdev->resource[0].start, (unsigned int)pdev->resource[1].start);
1da177e4
LT
324#endif
325
326 dev->irq=pdev->irq;
327 dev->base_addr=pci_resource_start(pdev,0) ;
328 xl_priv->xl_card_name = pci_name(pdev);
329 xl_priv->xl_mmio=ioremap(pci_resource_start(pdev,1), XL_IO_SPACE);
330 xl_priv->pdev = pdev ;
331
332 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
333 xl_priv->pkt_buf_sz = PKT_BUF_SZ ;
334 else
335 xl_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
336
337 dev->mtu = xl_priv->pkt_buf_sz - TR_HLEN ;
338 xl_priv->xl_ring_speed = ringspeed[card_no] ;
339 xl_priv->xl_message_level = message_level[card_no] ;
340 xl_priv->xl_functional_addr[0] = xl_priv->xl_functional_addr[1] = xl_priv->xl_functional_addr[2] = xl_priv->xl_functional_addr[3] = 0 ;
341 xl_priv->xl_copy_all_options = 0 ;
342
343 if((i = xl_init(dev))) {
344 iounmap(xl_priv->xl_mmio) ;
345 free_netdev(dev) ;
346 pci_release_regions(pdev) ;
347 return i ;
348 }
349
69d65169 350 dev->netdev_ops = &xl_netdev_ops;
1da177e4
LT
351 SET_NETDEV_DEV(dev, &pdev->dev);
352
353 pci_set_drvdata(pdev,dev) ;
354 if ((i = register_netdev(dev))) {
355 printk(KERN_ERR "3C359, register netdev failed\n") ;
356 pci_set_drvdata(pdev,NULL) ;
357 iounmap(xl_priv->xl_mmio) ;
358 free_netdev(dev) ;
359 pci_release_regions(pdev) ;
360 return i ;
361 }
362
363 printk(KERN_INFO "3C359: %s registered as: %s\n",xl_priv->xl_card_name,dev->name) ;
364
365 return 0;
366}
367
4b6ece97
JSR
368static int xl_init_firmware(struct xl_private *xl_priv)
369{
370 int err;
371
372 err = request_firmware(&xl_priv->fw, FW_NAME, &xl_priv->pdev->dev);
373 if (err) {
374 printk(KERN_ERR "Failed to load firmware \"%s\"\n", FW_NAME);
375 return err;
376 }
377
378 if (xl_priv->fw->size < 16) {
379 printk(KERN_ERR "Bogus length %zu in \"%s\"\n",
380 xl_priv->fw->size, FW_NAME);
381 release_firmware(xl_priv->fw);
382 err = -EINVAL;
383 }
384
385 return err;
386}
1da177e4 387
9b5587cd 388static int __devinit xl_init(struct net_device *dev)
1da177e4 389{
eda10531 390 struct xl_private *xl_priv = netdev_priv(dev);
4b6ece97 391 int err;
1da177e4 392
014e4668 393 printk(KERN_INFO "%s\n", version);
1da177e4
LT
394 printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n",
395 xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq);
396
397 spin_lock_init(&xl_priv->xl_lock) ;
398
4b6ece97
JSR
399 err = xl_init_firmware(xl_priv);
400 if (err == 0)
401 err = xl_hw_reset(dev);
1da177e4 402
4b6ece97 403 return err;
1da177e4
LT
404}
405
406
407/*
408 * Hardware reset. This needs to be a separate entity as we need to reset the card
409 * when we change the EEProm settings.
410 */
411
412static int xl_hw_reset(struct net_device *dev)
4b6ece97 413{
eda10531 414 struct xl_private *xl_priv = netdev_priv(dev);
1da177e4
LT
415 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
416 unsigned long t ;
417 u16 i ;
418 u16 result_16 ;
419 u8 result_8 ;
420 u16 start ;
421 int j ;
422
4b6ece97
JSR
423 if (xl_priv->fw == NULL)
424 return -EINVAL;
425
1da177e4
LT
426 /*
427 * Reset the card. If the card has got the microcode on board, we have
428 * missed the initialization interrupt, so we must always do this.
429 */
430
431 writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
432
433 /*
434 * Must wait for cmdInProgress bit (12) to clear before continuing with
435 * card configuration.
436 */
437
438 t=jiffies;
439 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
440 schedule();
b7aa6909 441 if (time_after(jiffies, t + 40 * HZ)) {
1da177e4
LT
442 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL card not responding to global reset.\n", dev->name);
443 return -ENODEV;
444 }
445 }
446
447 /*
448 * Enable pmbar by setting bit in CPAttention
449 */
450
451 writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
452 result_8 = readb(xl_mmio + MMIO_MACDATA) ;
453 result_8 = result_8 | CPA_PMBARVIS ;
454 writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
455 writeb(result_8, xl_mmio + MMIO_MACDATA) ;
456
457 /*
458 * Read cpHold bit in pmbar, if cleared we have got Flashrom on board.
459 * If not, we need to upload the microcode to the card
460 */
461
462 writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
463
464#if XL_DEBUG
014e4668 465 printk(KERN_INFO "Read from PMBAR = %04x\n", readw(xl_mmio + MMIO_MACDATA));
1da177e4
LT
466#endif
467
468 if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) {
469
470 /* Set PmBar, privateMemoryBase bits (8:2) to 0 */
471
472 writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
473 result_16 = readw(xl_mmio + MMIO_MACDATA) ;
474 result_16 = result_16 & ~((0x7F) << 2) ;
475 writel( (IO_WORD_WRITE | PMBAR), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
476 writew(result_16,xl_mmio + MMIO_MACDATA) ;
477
478 /* Set CPAttention, memWrEn bit */
479
480 writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
481 result_8 = readb(xl_mmio + MMIO_MACDATA) ;
482 result_8 = result_8 | CPA_MEMWREN ;
483 writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
484 writeb(result_8, xl_mmio + MMIO_MACDATA) ;
485
486 /*
487 * Now to write the microcode into the shared ram
4b6ece97
JSR
488 * The microcode must finish at position 0xFFFF,
489 * so we must subtract to get the start position for the code
490 *
491 * Looks strange but ensures compiler only uses
492 * 16 bit unsigned int
1da177e4 493 */
4b6ece97 494 start = (0xFFFF - (xl_priv->fw->size) + 1) ;
1da177e4 495
1da177e4 496 printk(KERN_INFO "3C359: Uploading Microcode: ");
4b6ece97
JSR
497
498 for (i = start, j = 0; j < xl_priv->fw->size; i++, j++) {
499 writel(MEM_BYTE_WRITE | 0XD0000 | i,
500 xl_mmio + MMIO_MAC_ACCESS_CMD);
501 writeb(xl_priv->fw->data[j], xl_mmio + MMIO_MACDATA);
1da177e4
LT
502 if (j % 1024 == 0)
503 printk(".");
504 }
505 printk("\n") ;
506
4b6ece97
JSR
507 for (i = 0; i < 16; i++) {
508 writel((MEM_BYTE_WRITE | 0xDFFF0) + i,
509 xl_mmio + MMIO_MAC_ACCESS_CMD);
510 writeb(xl_priv->fw->data[xl_priv->fw->size - 16 + i],
511 xl_mmio + MMIO_MACDATA);
1da177e4
LT
512 }
513
514 /*
515 * Have to write the start address of the upload to FFF4, but
516 * the address must be >> 4. You do not want to know how long
517 * it took me to discover this.
518 */
519
520 writel(MEM_WORD_WRITE | 0xDFFF4, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
521 writew(start >> 4, xl_mmio + MMIO_MACDATA);
522
523 /* Clear the CPAttention, memWrEn Bit */
524
525 writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
526 result_8 = readb(xl_mmio + MMIO_MACDATA) ;
527 result_8 = result_8 & ~CPA_MEMWREN ;
528 writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
529 writeb(result_8, xl_mmio + MMIO_MACDATA) ;
530
531 /* Clear the cpHold bit in pmbar */
532
533 writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
534 result_16 = readw(xl_mmio + MMIO_MACDATA) ;
535 result_16 = result_16 & ~PMB_CPHOLD ;
536 writel( (IO_WORD_WRITE | PMBAR), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
537 writew(result_16,xl_mmio + MMIO_MACDATA) ;
538
539
540 } /* If microcode upload required */
541
542 /*
543 * The card should now go though a self test procedure and get itself ready
544 * to be opened, we must wait for an srb response with the initialization
545 * information.
546 */
547
548#if XL_DEBUG
549 printk(KERN_INFO "%s: Microcode uploaded, must wait for the self test to complete\n", dev->name);
550#endif
551
552 writew(SETINDENABLE | 0xFFF, xl_mmio + MMIO_COMMAND) ;
553
554 t=jiffies;
555 while ( !(readw(xl_mmio + MMIO_INTSTATUS_AUTO) & INTSTAT_SRB) ) {
556 schedule();
b7aa6909 557 if (time_after(jiffies, t + 15 * HZ)) {
1da177e4
LT
558 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
559 return -ENODEV;
560 }
561 }
562
563 /*
564 * Write the RxBufArea with D000, RxEarlyThresh, TxStartThresh,
565 * DnPriReqThresh, read the tech docs if you want to know what
566 * values they need to be.
567 */
568
569 writel(MMIO_WORD_WRITE | RXBUFAREA, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
570 writew(0xD000, xl_mmio + MMIO_MACDATA) ;
571
572 writel(MMIO_WORD_WRITE | RXEARLYTHRESH, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
573 writew(0X0020, xl_mmio + MMIO_MACDATA) ;
574
575 writew( SETTXSTARTTHRESH | 0x40 , xl_mmio + MMIO_COMMAND) ;
576
577 writeb(0x04, xl_mmio + MMIO_DNBURSTTHRESH) ;
578 writeb(0x04, xl_mmio + DNPRIREQTHRESH) ;
579
580 /*
581 * Read WRBR to provide the location of the srb block, have to use byte reads not word reads.
582 * Tech docs have this wrong !!!!
583 */
584
585 writel(MMIO_BYTE_READ | WRBR, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
586 xl_priv->srb = readb(xl_mmio + MMIO_MACDATA) << 8 ;
587 writel( (MMIO_BYTE_READ | WRBR) + 1, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
588 xl_priv->srb = xl_priv->srb | readb(xl_mmio + MMIO_MACDATA) ;
589
590#if XL_DEBUG
591 writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
592 if ( readw(xl_mmio + MMIO_MACDATA) & 2) {
014e4668 593 printk(KERN_INFO "Default ring speed 4 mbps\n");
1da177e4 594 } else {
014e4668 595 printk(KERN_INFO "Default ring speed 16 mbps\n");
1da177e4
LT
596 }
597 printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb);
598#endif
599
600 return 0;
601}
602
603static int xl_open(struct net_device *dev)
604{
eda10531 605 struct xl_private *xl_priv=netdev_priv(dev);
1da177e4
LT
606 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
607 u8 i ;
9914cad5 608 __le16 hwaddr[3] ; /* Should be u8[6] but we get word return values */
1da177e4
LT
609 int open_err ;
610
611 u16 switchsettings, switchsettings_eeprom ;
612
dddcb445 613 if (request_irq(dev->irq, xl_interrupt, IRQF_SHARED , "3c359", dev))
1da177e4 614 return -EAGAIN;
1da177e4
LT
615
616 /*
9914cad5 617 * Read the information from the EEPROM that we need.
1da177e4
LT
618 */
619
9914cad5
AV
620 hwaddr[0] = cpu_to_le16(xl_ee_read(dev,0x10));
621 hwaddr[1] = cpu_to_le16(xl_ee_read(dev,0x11));
622 hwaddr[2] = cpu_to_le16(xl_ee_read(dev,0x12));
1da177e4
LT
623
624 /* Ring speed */
625
626 switchsettings_eeprom = xl_ee_read(dev,0x08) ;
627 switchsettings = switchsettings_eeprom ;
628
629 if (xl_priv->xl_ring_speed != 0) {
630 if (xl_priv->xl_ring_speed == 4)
631 switchsettings = switchsettings | 0x02 ;
632 else
633 switchsettings = switchsettings & ~0x02 ;
634 }
635
636 /* Only write EEProm if there has been a change */
637 if (switchsettings != switchsettings_eeprom) {
638 xl_ee_write(dev,0x08,switchsettings) ;
639 /* Hardware reset after changing EEProm */
640 xl_hw_reset(dev) ;
641 }
642
643 memcpy(dev->dev_addr,hwaddr,dev->addr_len) ;
644
645 open_err = xl_open_hw(dev) ;
646
647 /*
648 * This really needs to be cleaned up with better error reporting.
649 */
650
651 if (open_err != 0) { /* Something went wrong with the open command */
652 if (open_err & 0x07) { /* Wrong speed, retry at different speed */
014e4668 653 printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed\n", dev->name);
1da177e4
LT
654 switchsettings = switchsettings ^ 2 ;
655 xl_ee_write(dev,0x08,switchsettings) ;
656 xl_hw_reset(dev) ;
657 open_err = xl_open_hw(dev) ;
658 if (open_err != 0) {
659 printk(KERN_WARNING "%s: Open error returned a second time, we're bombing out now\n", dev->name);
660 free_irq(dev->irq,dev) ;
661 return -ENODEV ;
662 }
663 } else {
664 printk(KERN_WARNING "%s: Open Error = %04x\n", dev->name, open_err) ;
665 free_irq(dev->irq,dev) ;
666 return -ENODEV ;
667 }
668 }
669
670 /*
671 * Now to set up the Rx and Tx buffer structures
672 */
673 /* These MUST be on 8 byte boundaries */
c821d55c 674 xl_priv->xl_tx_ring = kzalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL);
1da177e4 675 if (xl_priv->xl_tx_ring == NULL) {
138a5cdf 676 printk(KERN_WARNING "%s: Not enough memory to allocate tx buffers.\n",
1da177e4
LT
677 dev->name);
678 free_irq(dev->irq,dev);
679 return -ENOMEM;
680 }
c821d55c 681 xl_priv->xl_rx_ring = kzalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL);
d0cc10ab 682 if (xl_priv->xl_rx_ring == NULL) {
1da177e4
LT
683 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers.\n",
684 dev->name);
685 free_irq(dev->irq,dev);
686 kfree(xl_priv->xl_tx_ring);
687 return -ENOMEM;
688 }
1da177e4
LT
689
690 /* Setup Rx Ring */
691 for (i=0 ; i < XL_RX_RING_SIZE ; i++) {
692 struct sk_buff *skb ;
693
694 skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
695 if (skb==NULL)
696 break ;
697
698 skb->dev = dev ;
9914cad5
AV
699 xl_priv->xl_rx_ring[i].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
700 xl_priv->xl_rx_ring[i].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
1da177e4
LT
701 xl_priv->rx_ring_skb[i] = skb ;
702 }
703
704 if (i==0) {
014e4668 705 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
1da177e4 706 free_irq(dev->irq,dev) ;
5c94afd7
JP
707 kfree(xl_priv->xl_tx_ring);
708 kfree(xl_priv->xl_rx_ring);
1da177e4
LT
709 return -EIO ;
710 }
711
712 xl_priv->rx_ring_no = i ;
713 xl_priv->rx_ring_tail = 0 ;
714 xl_priv->rx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_rx_ring, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_TODEVICE) ;
715 for (i=0;i<(xl_priv->rx_ring_no-1);i++) {
9914cad5 716 xl_priv->xl_rx_ring[i].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * (i+1)));
1da177e4
LT
717 }
718 xl_priv->xl_rx_ring[i].upnextptr = 0 ;
719
720 writel(xl_priv->rx_ring_dma_addr, xl_mmio + MMIO_UPLISTPTR) ;
721
722 /* Setup Tx Ring */
723
724 xl_priv->tx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_tx_ring, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
725
726 xl_priv->tx_ring_head = 1 ;
727 xl_priv->tx_ring_tail = 255 ; /* Special marker for first packet */
728 xl_priv->free_ring_entries = XL_TX_RING_SIZE ;
729
730 /*
731 * Setup the first dummy DPD entry for polling to start working.
732 */
733
9914cad5 734 xl_priv->xl_tx_ring[0].framestartheader = TXDPDEMPTY;
1da177e4
LT
735 xl_priv->xl_tx_ring[0].buffer = 0 ;
736 xl_priv->xl_tx_ring[0].buffer_length = 0 ;
737 xl_priv->xl_tx_ring[0].dnnextptr = 0 ;
738
739 writel(xl_priv->tx_ring_dma_addr, xl_mmio + MMIO_DNLISTPTR) ;
740 writel(DNUNSTALL, xl_mmio + MMIO_COMMAND) ;
741 writel(UPUNSTALL, xl_mmio + MMIO_COMMAND) ;
742 writel(DNENABLE, xl_mmio + MMIO_COMMAND) ;
743 writeb(0x40, xl_mmio + MMIO_DNPOLL) ;
744
745 /*
746 * Enable interrupts on the card
747 */
748
749 writel(SETINTENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
750 writel(SETINDENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
751
752 netif_start_queue(dev) ;
753 return 0;
754
755}
756
757static int xl_open_hw(struct net_device *dev)
758{
eda10531 759 struct xl_private *xl_priv=netdev_priv(dev);
1da177e4
LT
760 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
761 u16 vsoff ;
762 char ver_str[33];
763 int open_err ;
764 int i ;
765 unsigned long t ;
766
767 /*
768 * Okay, let's build up the Open.NIC srb command
769 *
770 */
771
772 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
773 writeb(OPEN_NIC, xl_mmio + MMIO_MACDATA) ;
774
775 /*
776 * Use this as a test byte, if it comes back with the same value, the command didn't work
777 */
778
779 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb)+ 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
780 writeb(0xff,xl_mmio + MMIO_MACDATA) ;
781
782 /* Open options */
783 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
784 writeb(0x00, xl_mmio + MMIO_MACDATA) ;
785 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 9, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
786 writeb(0x00, xl_mmio + MMIO_MACDATA) ;
787
788 /*
789 * Node address, be careful here, the docs say you can just put zeros here and it will use
790 * the hardware address, it doesn't, you must include the node address in the open command.
791 */
792
793 if (xl_priv->xl_laa[0]) { /* If using a LAA address */
794 for (i=10;i<16;i++) {
795 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
9a7387c2 796 writeb(xl_priv->xl_laa[i-10],xl_mmio + MMIO_MACDATA) ;
1da177e4
LT
797 }
798 memcpy(dev->dev_addr,xl_priv->xl_laa,dev->addr_len) ;
799 } else { /* Regular hardware address */
800 for (i=10;i<16;i++) {
801 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
802 writeb(dev->dev_addr[i-10], xl_mmio + MMIO_MACDATA) ;
803 }
804 }
805
806 /* Default everything else to 0 */
807 for (i = 16; i < 34; i++) {
808 writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
809 writeb(0x00,xl_mmio + MMIO_MACDATA) ;
810 }
811
812 /*
813 * Set the csrb bit in the MISR register
814 */
815
816 xl_wait_misr_flags(dev) ;
817 writel(MEM_BYTE_WRITE | MF_CSRB, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
818 writeb(0xFF, xl_mmio + MMIO_MACDATA) ;
819 writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
820 writeb(MISR_CSRB , xl_mmio + MMIO_MACDATA) ;
821
822 /*
823 * Now wait for the command to run
824 */
825
826 t=jiffies;
827 while (! (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) {
828 schedule();
b7aa6909 829 if (time_after(jiffies, t + 40 * HZ)) {
1da177e4
LT
830 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
831 break ;
832 }
833 }
834
835 /*
836 * Let's interpret the open response
837 */
838
839 writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb)+2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
840 if (readb(xl_mmio + MMIO_MACDATA)!=0) {
841 open_err = readb(xl_mmio + MMIO_MACDATA) << 8 ;
842 writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb) + 7, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
843 open_err |= readb(xl_mmio + MMIO_MACDATA) ;
844 return open_err ;
845 } else {
846 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
9914cad5 847 xl_priv->asb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
1da177e4
LT
848 printk(KERN_INFO "%s: Adapter Opened Details: ",dev->name) ;
849 printk("ASB: %04x",xl_priv->asb ) ;
850 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 10, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
9914cad5 851 printk(", SRB: %04x",swab16(readw(xl_mmio + MMIO_MACDATA)) ) ;
1da177e4
LT
852
853 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
9914cad5 854 xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
014e4668 855 printk(", ARB: %04x\n",xl_priv->arb );
1da177e4 856 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
9914cad5 857 vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
1da177e4
LT
858
859 /*
860 * Interesting, sending the individual characters directly to printk was causing klogd to use
861 * use 100% of processor time, so we build up the string and print that instead.
862 */
863
864 for (i=0;i<0x20;i++) {
865 writel( (MEM_BYTE_READ | 0xD0000 | vsoff) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
866 ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ;
867 }
868 ver_str[i] = '\0' ;
014e4668 869 printk(KERN_INFO "%s: Microcode version String: %s\n",dev->name,ver_str);
1da177e4
LT
870 }
871
872 /*
873 * Issue the AckInterrupt
874 */
875 writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
876
877 return 0 ;
878}
879
880/*
881 * There are two ways of implementing rx on the 359 NIC, either
882 * interrupt driven or polling. We are going to uses interrupts,
883 * it is the easier way of doing things.
884 *
885 * The Rx works with a ring of Rx descriptors. At initialise time the ring
886 * entries point to the next entry except for the last entry in the ring
887 * which points to 0. The card is programmed with the location of the first
888 * available descriptor and keeps reading the next_ptr until next_ptr is set
889 * to 0. Hopefully with a ring size of 16 the card will never get to read a next_ptr
890 * of 0. As the Rx interrupt is received we copy the frame up to the protocol layers
891 * and then point the end of the ring to our current position and point our current
892 * position to 0, therefore making the current position the last position on the ring.
893 * The last position on the ring therefore loops continually loops around the rx ring.
894 *
895 * rx_ring_tail is the position on the ring to process next. (Think of a snake, the head
896 * expands as the card adds new packets and we go around eating the tail processing the
897 * packets.)
898 *
899 * Undoubtably it could be streamlined and improved upon, but at the moment it works
900 * and the fast path through the routine is fine.
901 *
902 * adv_rx_ring could be inlined to increase performance, but its called a *lot* of times
903 * in xl_rx so would increase the size of the function significantly.
904 */
905
906static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on bloat in xl_rx */
907{
eda10531 908 struct xl_private *xl_priv=netdev_priv(dev);
9914cad5
AV
909 int n = xl_priv->rx_ring_tail;
910 int prev_ring_loc;
911
912 prev_ring_loc = (n + XL_RX_RING_SIZE - 1) & (XL_RX_RING_SIZE - 1);
913 xl_priv->xl_rx_ring[prev_ring_loc].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * n));
914 xl_priv->xl_rx_ring[n].framestatus = 0;
915 xl_priv->xl_rx_ring[n].upnextptr = 0;
916 xl_priv->rx_ring_tail++;
917 xl_priv->rx_ring_tail &= (XL_RX_RING_SIZE-1);
1da177e4
LT
918}
919
920static void xl_rx(struct net_device *dev)
921{
eda10531 922 struct xl_private *xl_priv=netdev_priv(dev);
1da177e4
LT
923 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
924 struct sk_buff *skb, *skb2 ;
925 int frame_length = 0, copy_len = 0 ;
926 int temp_ring_loc ;
927
928 /*
929 * Receive the next frame, loop around the ring until all frames
930 * have been received.
931 */
932
933 while (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & (RXUPDCOMPLETE | RXUPDFULL) ) { /* Descriptor to process */
934
935 if (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & RXUPDFULL ) { /* UpdFull, Multiple Descriptors used for the frame */
936
937 /*
938 * This is a pain, you need to go through all the descriptors until the last one
939 * for this frame to find the framelength
940 */
941
942 temp_ring_loc = xl_priv->rx_ring_tail ;
943
944 while (xl_priv->xl_rx_ring[temp_ring_loc].framestatus & RXUPDFULL ) {
945 temp_ring_loc++ ;
946 temp_ring_loc &= (XL_RX_RING_SIZE-1) ;
947 }
948
9914cad5 949 frame_length = le32_to_cpu(xl_priv->xl_rx_ring[temp_ring_loc].framestatus) & 0x7FFF;
1da177e4
LT
950
951 skb = dev_alloc_skb(frame_length) ;
952
953 if (skb==NULL) { /* No memory for frame, still need to roll forward the rx ring */
954 printk(KERN_WARNING "%s: dev_alloc_skb failed - multi buffer !\n", dev->name) ;
955 while (xl_priv->rx_ring_tail != temp_ring_loc)
956 adv_rx_ring(dev) ;
957
958 adv_rx_ring(dev) ; /* One more time just for luck :) */
94f9d298 959 dev->stats.rx_dropped++ ;
1da177e4
LT
960
961 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
962 return ;
963 }
964
1da177e4 965 while (xl_priv->rx_ring_tail != temp_ring_loc) {
9914cad5 966 copy_len = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen) & 0x7FFF;
1da177e4 967 frame_length -= copy_len ;
9914cad5 968 pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
d626f62b
ACM
969 skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
970 skb_put(skb, copy_len),
971 copy_len);
9914cad5 972 pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
1da177e4
LT
973 adv_rx_ring(dev) ;
974 }
975
976 /* Now we have found the last fragment */
9914cad5 977 pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
d626f62b
ACM
978 skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
979 skb_put(skb,copy_len), frame_length);
1da177e4 980/* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */
9914cad5 981 pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
1da177e4
LT
982 adv_rx_ring(dev) ;
983 skb->protocol = tr_type_trans(skb,dev) ;
984 netif_rx(skb) ;
985
986 } else { /* Single Descriptor Used, simply swap buffers over, fast path */
987
9914cad5 988 frame_length = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus) & 0x7FFF;
1da177e4
LT
989
990 skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
991
992 if (skb==NULL) { /* Still need to fix the rx ring */
014e4668 993 printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer\n",dev->name);
1da177e4 994 adv_rx_ring(dev) ;
94f9d298 995 dev->stats.rx_dropped++ ;
1da177e4
LT
996 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
997 return ;
998 }
999
1da177e4 1000 skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ;
9914cad5 1001 pci_unmap_single(xl_priv->pdev, le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
1da177e4
LT
1002 skb_put(skb2, frame_length) ;
1003 skb2->protocol = tr_type_trans(skb2,dev) ;
1004
1005 xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] = skb ;
9914cad5
AV
1006 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev,skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
1007 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
1da177e4 1008 adv_rx_ring(dev) ;
94f9d298
PZ
1009 dev->stats.rx_packets++ ;
1010 dev->stats.rx_bytes += frame_length ;
1da177e4
LT
1011
1012 netif_rx(skb2) ;
1013 } /* if multiple buffers */
1da177e4
LT
1014 } /* while packet to do */
1015
1016 /* Clear the updComplete interrupt */
1017 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
1018 return ;
1019}
1020
1021/*
1022 * This is ruthless, it doesn't care what state the card is in it will
1023 * completely reset the adapter.
1024 */
1025
1026static void xl_reset(struct net_device *dev)
1027{
eda10531 1028 struct xl_private *xl_priv=netdev_priv(dev);
1da177e4
LT
1029 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1030 unsigned long t;
1031
1032 writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
1033
1034 /*
1035 * Must wait for cmdInProgress bit (12) to clear before continuing with
1036 * card configuration.
1037 */
1038
1039 t=jiffies;
1040 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
b7aa6909 1041 if (time_after(jiffies, t + 40 * HZ)) {
1da177e4
LT
1042 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
1043 break ;
1044 }
1045 }
1046
1047}
1048
1049static void xl_freemem(struct net_device *dev)
1050{
eda10531 1051 struct xl_private *xl_priv=netdev_priv(dev);
1da177e4
LT
1052 int i ;
1053
1054 for (i=0;i<XL_RX_RING_SIZE;i++) {
1055 dev_kfree_skb_irq(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]) ;
9914cad5 1056 pci_unmap_single(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
1057 xl_priv->rx_ring_tail++ ;
1058 xl_priv->rx_ring_tail &= XL_RX_RING_SIZE-1;
1059 }
1060
1061 /* unmap ring */
1062 pci_unmap_single(xl_priv->pdev,xl_priv->rx_ring_dma_addr, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_FROMDEVICE) ;
1063
1064 pci_unmap_single(xl_priv->pdev,xl_priv->tx_ring_dma_addr, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE, PCI_DMA_TODEVICE) ;
1065
1066 kfree(xl_priv->xl_rx_ring) ;
1067 kfree(xl_priv->xl_tx_ring) ;
1068
1069 return ;
1070}
1071
7d12e780 1072static irqreturn_t xl_interrupt(int irq, void *dev_id)
1da177e4
LT
1073{
1074 struct net_device *dev = (struct net_device *)dev_id;
eda10531 1075 struct xl_private *xl_priv =netdev_priv(dev);
1da177e4
LT
1076 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1077 u16 intstatus, macstatus ;
1078
1da177e4
LT
1079 intstatus = readw(xl_mmio + MMIO_INTSTATUS) ;
1080
1081 if (!(intstatus & 1)) /* We didn't generate the interrupt */
1082 return IRQ_NONE;
1083
1084 spin_lock(&xl_priv->xl_lock) ;
1085
1086 /*
1087 * Process the interrupt
1088 */
1089 /*
1090 * Something fishy going on here, we shouldn't get 0001 ints, not fatal though.
1091 */
1092 if (intstatus == 0x0001) {
1093 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
014e4668 1094 printk(KERN_INFO "%s: 00001 int received\n",dev->name);
1da177e4
LT
1095 } else {
1096 if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) {
1097
1098 /*
1099 * Host Error.
1100 * It may be possible to recover from this, but usually it means something
1101 * is seriously fubar, so we just close the adapter.
1102 */
1103
1104 if (intstatus & HOSTERRINT) {
014e4668 1105 printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x\n",dev->name,intstatus);
1da177e4 1106 writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
014e4668 1107 printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
1da177e4
LT
1108 netif_stop_queue(dev) ;
1109 xl_freemem(dev) ;
1110 free_irq(dev->irq,dev);
1111 xl_reset(dev) ;
1112 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1113 spin_unlock(&xl_priv->xl_lock) ;
1114 return IRQ_HANDLED;
1115 } /* Host Error */
1116
1117 if (intstatus & SRBRINT ) { /* Srbc interrupt */
1118 writel(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1119 if (xl_priv->srb_queued)
1120 xl_srb_bh(dev) ;
1121 } /* SRBR Interrupt */
1122
1123 if (intstatus & TXUNDERRUN) { /* Issue DnReset command */
1124 writel(DNRESET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1125 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { /* Wait for command to run */
1126 /* !!! FIX-ME !!!!
1127 Must put a timeout check here ! */
1128 /* Empty Loop */
1129 }
014e4668 1130 printk(KERN_WARNING "%s: TX Underrun received\n",dev->name);
1da177e4
LT
1131 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1132 } /* TxUnderRun */
1133
1134 if (intstatus & ARBCINT ) { /* Arbc interrupt */
1135 xl_arb_cmd(dev) ;
1136 } /* Arbc */
1137
1138 if (intstatus & ASBFINT) {
1139 if (xl_priv->asb_queued == 1) {
1140 xl_asb_cmd(dev) ;
1141 } else if (xl_priv->asb_queued == 2) {
1142 xl_asb_bh(dev) ;
1143 } else {
1144 writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ;
1145 }
1146 } /* Asbf */
1147
1148 if (intstatus & UPCOMPINT ) /* UpComplete */
1149 xl_rx(dev) ;
1150
1151 if (intstatus & DNCOMPINT ) /* DnComplete */
1152 xl_dn_comp(dev) ;
1153
1154 if (intstatus & HARDERRINT ) { /* Hardware error */
1155 writel(MMIO_WORD_READ | MACSTATUS, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1156 macstatus = readw(xl_mmio + MMIO_MACDATA) ;
1157 printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name);
1158 if (macstatus & (1<<14))
014e4668 1159 printk(KERN_WARNING "tchk error: Unrecoverable error\n");
1da177e4 1160 if (macstatus & (1<<3))
014e4668 1161 printk(KERN_WARNING "eint error: Internal watchdog timer expired\n");
1da177e4 1162 if (macstatus & (1<<2))
014e4668 1163 printk(KERN_WARNING "aint error: Host tried to perform invalid operation\n");
1da177e4 1164 printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ;
014e4668 1165 printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
1da177e4
LT
1166 netif_stop_queue(dev) ;
1167 xl_freemem(dev) ;
1168 free_irq(dev->irq,dev);
1169 unregister_netdev(dev) ;
1170 free_netdev(dev) ;
1171 xl_reset(dev) ;
1172 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1173 spin_unlock(&xl_priv->xl_lock) ;
1174 return IRQ_HANDLED;
1175 }
1176 } else {
014e4668 1177 printk(KERN_WARNING "%s: Received Unknown interrupt : %04x\n", dev->name, intstatus);
1da177e4
LT
1178 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1179 }
1180 }
1181
1182 /* Turn interrupts back on */
1183
1184 writel( SETINDENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
1185 writel( SETINTENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ;
1186
1187 spin_unlock(&xl_priv->xl_lock) ;
1188 return IRQ_HANDLED;
1189}
1190
1191/*
1192 * Tx - Polling configuration
1193 */
1194
61a84108 1195static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 1196{
eda10531 1197 struct xl_private *xl_priv=netdev_priv(dev);
1da177e4
LT
1198 struct xl_tx_desc *txd ;
1199 int tx_head, tx_tail, tx_prev ;
1200 unsigned long flags ;
1201
1202 spin_lock_irqsave(&xl_priv->xl_lock,flags) ;
1203
1204 netif_stop_queue(dev) ;
1205
1206 if (xl_priv->free_ring_entries > 1 ) {
1207 /*
1208 * Set up the descriptor for the packet
1209 */
1210 tx_head = xl_priv->tx_ring_head ;
1211 tx_tail = xl_priv->tx_ring_tail ;
1212
1213 txd = &(xl_priv->xl_tx_ring[tx_head]) ;
1214 txd->dnnextptr = 0 ;
9914cad5
AV
1215 txd->framestartheader = cpu_to_le32(skb->len) | TXDNINDICATE;
1216 txd->buffer = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE));
1217 txd->buffer_length = cpu_to_le32(skb->len) | TXDNFRAGLAST;
1da177e4 1218 xl_priv->tx_ring_skb[tx_head] = skb ;
94f9d298
PZ
1219 dev->stats.tx_packets++ ;
1220 dev->stats.tx_bytes += skb->len ;
1da177e4
LT
1221
1222 /*
1223 * Set the nextptr of the previous descriptor equal to this descriptor, add XL_TX_RING_SIZE -1
1224 * to ensure no negative numbers in unsigned locations.
1225 */
1226
1227 tx_prev = (xl_priv->tx_ring_head + XL_TX_RING_SIZE - 1) & (XL_TX_RING_SIZE - 1) ;
1228
1229 xl_priv->tx_ring_head++ ;
1230 xl_priv->tx_ring_head &= (XL_TX_RING_SIZE - 1) ;
1231 xl_priv->free_ring_entries-- ;
1232
9914cad5 1233 xl_priv->xl_tx_ring[tx_prev].dnnextptr = cpu_to_le32(xl_priv->tx_ring_dma_addr + (sizeof (struct xl_tx_desc) * tx_head));
1da177e4
LT
1234
1235 /* Sneaky, by doing a read on DnListPtr we can force the card to poll on the DnNextPtr */
1236 /* readl(xl_mmio + MMIO_DNLISTPTR) ; */
1237
1238 netif_wake_queue(dev) ;
1239
1240 spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ;
1241
6ed10654 1242 return NETDEV_TX_OK;
1da177e4
LT
1243 } else {
1244 spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ;
5b548140 1245 return NETDEV_TX_BUSY;
1da177e4
LT
1246 }
1247
1248}
1249
1250/*
1251 * The NIC has told us that a packet has been downloaded onto the card, we must
1252 * find out which packet it has done, clear the skb and information for the packet
1253 * then advance around the ring for all tranmitted packets
1254 */
1255
1256static void xl_dn_comp(struct net_device *dev)
1257{
eda10531 1258 struct xl_private *xl_priv=netdev_priv(dev);
1da177e4
LT
1259 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1260 struct xl_tx_desc *txd ;
1261
1262
1263 if (xl_priv->tx_ring_tail == 255) {/* First time */
1264 xl_priv->xl_tx_ring[0].framestartheader = 0 ;
1265 xl_priv->xl_tx_ring[0].dnnextptr = 0 ;
1266 xl_priv->tx_ring_tail = 1 ;
1267 }
1268
1269 while (xl_priv->xl_tx_ring[xl_priv->tx_ring_tail].framestartheader & TXDNCOMPLETE ) {
1270 txd = &(xl_priv->xl_tx_ring[xl_priv->tx_ring_tail]) ;
9914cad5 1271 pci_unmap_single(xl_priv->pdev, le32_to_cpu(txd->buffer), xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]->len, PCI_DMA_TODEVICE);
1da177e4 1272 txd->framestartheader = 0 ;
9914cad5 1273 txd->buffer = cpu_to_le32(0xdeadbeef);
1da177e4
LT
1274 txd->buffer_length = 0 ;
1275 dev_kfree_skb_irq(xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]) ;
1276 xl_priv->tx_ring_tail++ ;
1277 xl_priv->tx_ring_tail &= (XL_TX_RING_SIZE - 1) ;
1278 xl_priv->free_ring_entries++ ;
1279 }
1280
1281 netif_wake_queue(dev) ;
1282
1283 writel(ACK_INTERRUPT | DNCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
1284}
1285
1286/*
1287 * Close the adapter properly.
1288 * This srb reply cannot be handled from interrupt context as we have
1289 * to free the interrupt from the driver.
1290 */
1291
1292static int xl_close(struct net_device *dev)
1293{
eda10531 1294 struct xl_private *xl_priv = netdev_priv(dev);
1da177e4
LT
1295 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1296 unsigned long t ;
1297
1298 netif_stop_queue(dev) ;
1299
1300 /*
1301 * Close the adapter, need to stall the rx and tx queues.
1302 */
1303
1304 writew(DNSTALL, xl_mmio + MMIO_COMMAND) ;
1305 t=jiffies;
1306 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1307 schedule();
b7aa6909 1308 if (time_after(jiffies, t + 10 * HZ)) {
1da177e4
LT
1309 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNSTALL not responding.\n", dev->name);
1310 break ;
1311 }
1312 }
1313 writew(DNDISABLE, xl_mmio + MMIO_COMMAND) ;
1314 t=jiffies;
1315 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1316 schedule();
b7aa6909 1317 if (time_after(jiffies, t + 10 * HZ)) {
1da177e4
LT
1318 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNDISABLE not responding.\n", dev->name);
1319 break ;
1320 }
1321 }
1322 writew(UPSTALL, xl_mmio + MMIO_COMMAND) ;
1323 t=jiffies;
1324 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1325 schedule();
b7aa6909 1326 if (time_after(jiffies, t + 10 * HZ)) {
1da177e4
LT
1327 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPSTALL not responding.\n", dev->name);
1328 break ;
1329 }
1330 }
1331
1332 /* Turn off interrupts, we will still get the indication though
1333 * so we can trap it
1334 */
1335
1336 writel(SETINTENABLE, xl_mmio + MMIO_COMMAND) ;
1337
1338 xl_srb_cmd(dev,CLOSE_NIC) ;
1339
1340 t=jiffies;
1341 while (!(readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) {
1342 schedule();
b7aa6909 1343 if (time_after(jiffies, t + 10 * HZ)) {
1da177e4
LT
1344 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-CLOSENIC not responding.\n", dev->name);
1345 break ;
1346 }
1347 }
1348 /* Read the srb response from the adapter */
1349
1350 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD);
1351 if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) {
014e4668 1352 printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response\n",dev->name);
1da177e4
LT
1353 } else {
1354 writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1355 if (readb(xl_mmio + MMIO_MACDATA)==0) {
014e4668 1356 printk(KERN_INFO "%s: Adapter has been closed\n",dev->name);
1da177e4
LT
1357 writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1358
1359 xl_freemem(dev) ;
1360 free_irq(dev->irq,dev) ;
1361 } else {
1362 printk(KERN_INFO "%s: Close nic command returned error code %02x\n",dev->name, readb(xl_mmio + MMIO_MACDATA)) ;
1363 }
1364 }
1365
1366 /* Reset the upload and download logic */
1367
1368 writew(UPRESET, xl_mmio + MMIO_COMMAND) ;
1369 t=jiffies;
1370 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1371 schedule();
b7aa6909 1372 if (time_after(jiffies, t + 10 * HZ)) {
1da177e4
LT
1373 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPRESET not responding.\n", dev->name);
1374 break ;
1375 }
1376 }
1377 writew(DNRESET, xl_mmio + MMIO_COMMAND) ;
1378 t=jiffies;
1379 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1380 schedule();
b7aa6909 1381 if (time_after(jiffies, t + 10 * HZ)) {
1da177e4
LT
1382 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNRESET not responding.\n", dev->name);
1383 break ;
1384 }
1385 }
1386 xl_hw_reset(dev) ;
1387 return 0 ;
1388}
1389
1390static void xl_set_rx_mode(struct net_device *dev)
1391{
eda10531 1392 struct xl_private *xl_priv = netdev_priv(dev);
22bedad3 1393 struct netdev_hw_addr *ha;
1da177e4
LT
1394 unsigned char dev_mc_address[4] ;
1395 u16 options ;
1da177e4
LT
1396
1397 if (dev->flags & IFF_PROMISC)
1398 options = 0x0004 ;
1399 else
1400 options = 0x0000 ;
1401
1402 if (options ^ xl_priv->xl_copy_all_options) { /* Changed, must send command */
1403 xl_priv->xl_copy_all_options = options ;
1404 xl_srb_cmd(dev, SET_RECEIVE_MODE) ;
1405 return ;
1406 }
1407
1408 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1409
22bedad3
JP
1410 netdev_for_each_mc_addr(ha, dev) {
1411 dev_mc_address[0] |= ha->addr[2];
1412 dev_mc_address[1] |= ha->addr[3];
1413 dev_mc_address[2] |= ha->addr[4];
1414 dev_mc_address[3] |= ha->addr[5];
1da177e4
LT
1415 }
1416
1417 if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */
1418 memcpy(xl_priv->xl_functional_addr, dev_mc_address,4) ;
1419 xl_srb_cmd(dev, SET_FUNC_ADDRESS) ;
1420 }
1421 return ;
1422}
1423
1424
1425/*
1426 * We issued an srb command and now we must read
1427 * the response from the completed command.
1428 */
1429
1430static void xl_srb_bh(struct net_device *dev)
1431{
eda10531 1432 struct xl_private *xl_priv = netdev_priv(dev);
1da177e4
LT
1433 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1434 u8 srb_cmd, ret_code ;
1435 int i ;
1436
1437 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1438 srb_cmd = readb(xl_mmio + MMIO_MACDATA) ;
1439 writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1440 ret_code = readb(xl_mmio + MMIO_MACDATA) ;
1441
1442 /* Ret_code is standard across all commands */
1443
1444 switch (ret_code) {
1445 case 1:
1446 printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ;
1447 break ;
1448 case 4:
014e4668 1449 printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command\n",dev->name,srb_cmd);
1da177e4
LT
1450 break ;
1451
1452 case 6:
014e4668 1453 printk(KERN_INFO "%s: Command: %d - Options Invalid for command\n",dev->name,srb_cmd);
1da177e4
LT
1454 break ;
1455
1456 case 0: /* Successful command execution */
1457 switch (srb_cmd) {
1458 case READ_LOG: /* Returns 14 bytes of data from the NIC */
1459 if(xl_priv->xl_message_level)
1460 printk(KERN_INFO "%s: READ.LOG 14 bytes of data ",dev->name) ;
1461 /*
1462 * We still have to read the log even if message_level = 0 and we don't want
1463 * to see it
1464 */
1465 for (i=0;i<14;i++) {
1466 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1467 if(xl_priv->xl_message_level)
1468 printk("%02x:",readb(xl_mmio + MMIO_MACDATA)) ;
1469 }
1470 printk("\n") ;
1471 break ;
1472 case SET_FUNC_ADDRESS:
1473 if(xl_priv->xl_message_level)
014e4668 1474 printk(KERN_INFO "%s: Functional Address Set\n",dev->name);
1da177e4
LT
1475 break ;
1476 case CLOSE_NIC:
1477 if(xl_priv->xl_message_level)
014e4668 1478 printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler\n",dev->name);
1da177e4
LT
1479 break ;
1480 case SET_MULTICAST_MODE:
1481 if(xl_priv->xl_message_level)
1482 printk(KERN_INFO "%s: Multicast options successfully changed\n",dev->name) ;
1483 break ;
1484 case SET_RECEIVE_MODE:
1485 if(xl_priv->xl_message_level) {
1486 if (xl_priv->xl_copy_all_options == 0x0004)
014e4668 1487 printk(KERN_INFO "%s: Entering promiscuous mode\n", dev->name);
1da177e4 1488 else
014e4668 1489 printk(KERN_INFO "%s: Entering normal receive mode\n",dev->name);
1da177e4
LT
1490 }
1491 break ;
1492
1493 } /* switch */
1494 break ;
1495 } /* switch */
1496 return ;
1497}
1498
1da177e4
LT
1499static int xl_set_mac_address (struct net_device *dev, void *addr)
1500{
1501 struct sockaddr *saddr = addr ;
eda10531 1502 struct xl_private *xl_priv = netdev_priv(dev);
1da177e4
LT
1503
1504 if (netif_running(dev)) {
1505 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1506 return -EIO ;
1507 }
1508
1509 memcpy(xl_priv->xl_laa, saddr->sa_data,dev->addr_len) ;
1510
1511 if (xl_priv->xl_message_level) {
1512 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, xl_priv->xl_laa[0],
1513 xl_priv->xl_laa[1], xl_priv->xl_laa[2],
1514 xl_priv->xl_laa[3], xl_priv->xl_laa[4],
1515 xl_priv->xl_laa[5]);
1516 }
1517
1518 return 0 ;
1519}
1520
1521static void xl_arb_cmd(struct net_device *dev)
1522{
eda10531 1523 struct xl_private *xl_priv = netdev_priv(dev);
1da177e4
LT
1524 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1525 u8 arb_cmd ;
1526 u16 lan_status, lan_status_diff ;
1527
1528 writel( ( MEM_BYTE_READ | 0xD0000 | xl_priv->arb), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1529 arb_cmd = readb(xl_mmio + MMIO_MACDATA) ;
1530
1531 if (arb_cmd == RING_STATUS_CHANGE) { /* Ring.Status.Change */
1532 writel( ( (MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1533
9914cad5 1534 printk(KERN_INFO "%s: Ring Status Change: New Status = %04x\n", dev->name, swab16(readw(xl_mmio + MMIO_MACDATA) )) ;
1da177e4 1535
9914cad5 1536 lan_status = swab16(readw(xl_mmio + MMIO_MACDATA));
1da177e4
LT
1537
1538 /* Acknowledge interrupt, this tells nic we are done with the arb */
1539 writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1540
1541 lan_status_diff = xl_priv->xl_lan_status ^ lan_status ;
1542
1543 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1544 if (lan_status_diff & LSC_LWF)
1545 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1546 if (lan_status_diff & LSC_ARW)
1547 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1548 if (lan_status_diff & LSC_FPE)
1549 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1550 if (lan_status_diff & LSC_RR)
1551 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1552
1553 /* Adapter has been closed by the hardware */
1554
1555 netif_stop_queue(dev);
1556 xl_freemem(dev) ;
1557 free_irq(dev->irq,dev);
1558
014e4668 1559 printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
1da177e4
LT
1560 } /* If serious error */
1561
1562 if (xl_priv->xl_message_level) {
1563 if (lan_status_diff & LSC_SIG_LOSS)
014e4668 1564 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1da177e4 1565 if (lan_status_diff & LSC_HARD_ERR)
014e4668 1566 printk(KERN_INFO "%s: Beaconing\n",dev->name);
1da177e4 1567 if (lan_status_diff & LSC_SOFT_ERR)
014e4668 1568 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1da177e4
LT
1569 if (lan_status_diff & LSC_TRAN_BCN)
1570 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
1571 if (lan_status_diff & LSC_SS)
014e4668 1572 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1da177e4
LT
1573 if (lan_status_diff & LSC_RING_REC)
1574 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1575 if (lan_status_diff & LSC_FDX_MODE)
1576 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1577 }
1578
1579 if (lan_status_diff & LSC_CO) {
1580 if (xl_priv->xl_message_level)
014e4668 1581 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1da177e4
LT
1582 /* Issue READ.LOG command */
1583 xl_srb_cmd(dev, READ_LOG) ;
1584 }
1585
1586 /* There is no command in the tech docs to issue the read_sr_counters */
1587 if (lan_status_diff & LSC_SR_CO) {
1588 if (xl_priv->xl_message_level)
1589 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1590 }
1591
1592 xl_priv->xl_lan_status = lan_status ;
1593
1594 } /* Lan.change.status */
1595 else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */
1596#if XL_DEBUG
014e4668 1597 printk(KERN_INFO "Received.Data\n");
1da177e4
LT
1598#endif
1599 writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
9914cad5 1600 xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
1da177e4
LT
1601
1602 /* Now we are going to be really basic here and not do anything
1603 * with the data at all. The tech docs do not give me enough
1604 * information to calculate the buffers properly so we're
1605 * just going to tell the nic that we've dealt with the frame
1606 * anyway.
1607 */
1608
1da177e4
LT
1609 /* Acknowledge interrupt, this tells nic we are done with the arb */
1610 writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1611
1612 /* Is the ASB free ? */
1613
1614 xl_priv->asb_queued = 0 ;
1615 writel( ((MEM_BYTE_READ | 0xD0000 | xl_priv->asb) + 2), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1616 if (readb(xl_mmio + MMIO_MACDATA) != 0xff) {
1617 xl_priv->asb_queued = 1 ;
1618
1619 xl_wait_misr_flags(dev) ;
1620
1621 writel(MEM_BYTE_WRITE | MF_ASBFR, xl_mmio + MMIO_MAC_ACCESS_CMD);
1622 writeb(0xff, xl_mmio + MMIO_MACDATA) ;
1623 writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1624 writeb(MISR_ASBFR, xl_mmio + MMIO_MACDATA) ;
1625 return ;
1626 /* Drop out and wait for the bottom half to be run */
1627 }
1628
1629 xl_asb_cmd(dev) ;
1630
1631 } else {
014e4668 1632 printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x\n",dev->name,arb_cmd);
1da177e4
LT
1633 }
1634
1635 /* Acknowledge the arb interrupt */
1636
1637 writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
1638
1639 return ;
1640}
1641
1642
1643/*
1644 * There is only one asb command, but we can get called from different
1645 * places.
1646 */
1647
1648static void xl_asb_cmd(struct net_device *dev)
1649{
eda10531 1650 struct xl_private *xl_priv = netdev_priv(dev);
1da177e4
LT
1651 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1652
1653 if (xl_priv->asb_queued == 1)
1654 writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ;
1655
1656 writel(MEM_BYTE_WRITE | 0xd0000 | xl_priv->asb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1657 writeb(0x81, xl_mmio + MMIO_MACDATA) ;
1658
1659 writel(MEM_WORD_WRITE | 0xd0000 | xl_priv->asb | 6, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
9914cad5 1660 writew(swab16(xl_priv->mac_buffer), xl_mmio + MMIO_MACDATA) ;
1da177e4
LT
1661
1662 xl_wait_misr_flags(dev) ;
1663
1664 writel(MEM_BYTE_WRITE | MF_RASB, xl_mmio + MMIO_MAC_ACCESS_CMD);
1665 writeb(0xff, xl_mmio + MMIO_MACDATA) ;
1666
1667 writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1668 writeb(MISR_RASB, xl_mmio + MMIO_MACDATA) ;
1669
1670 xl_priv->asb_queued = 2 ;
1671
1672 return ;
1673}
1674
1675/*
1676 * This will only get called if there was an error
1677 * from the asb cmd.
1678 */
1679static void xl_asb_bh(struct net_device *dev)
1680{
eda10531 1681 struct xl_private *xl_priv = netdev_priv(dev);
1da177e4
LT
1682 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1683 u8 ret_code ;
1684
1685 writel(MMIO_BYTE_READ | 0xd0000 | xl_priv->asb | 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1686 ret_code = readb(xl_mmio + MMIO_MACDATA) ;
1687 switch (ret_code) {
1688 case 0x01:
014e4668 1689 printk(KERN_INFO "%s: ASB Command, unrecognized command code\n",dev->name);
1da177e4
LT
1690 break ;
1691 case 0x26:
014e4668 1692 printk(KERN_INFO "%s: ASB Command, unexpected receive buffer\n", dev->name);
1da177e4
LT
1693 break ;
1694 case 0x40:
014e4668 1695 printk(KERN_INFO "%s: ASB Command, Invalid Station ID\n", dev->name);
1da177e4
LT
1696 break ;
1697 }
1698 xl_priv->asb_queued = 0 ;
1699 writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ;
1700 return ;
1701}
1702
1703/*
1704 * Issue srb commands to the nic
1705 */
1706
1707static void xl_srb_cmd(struct net_device *dev, int srb_cmd)
1708{
eda10531 1709 struct xl_private *xl_priv = netdev_priv(dev);
1da177e4
LT
1710 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1711
1712 switch (srb_cmd) {
1713 case READ_LOG:
1714 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1715 writeb(READ_LOG, xl_mmio + MMIO_MACDATA) ;
1716 break;
1717
1718 case CLOSE_NIC:
1719 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1720 writeb(CLOSE_NIC, xl_mmio + MMIO_MACDATA) ;
1721 break ;
1722
1723 case SET_RECEIVE_MODE:
1724 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1725 writeb(SET_RECEIVE_MODE, xl_mmio + MMIO_MACDATA) ;
1726 writel(MEM_WORD_WRITE | 0xD0000 | xl_priv->srb | 4, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1727 writew(xl_priv->xl_copy_all_options, xl_mmio + MMIO_MACDATA) ;
1728 break ;
1729
1730 case SET_FUNC_ADDRESS:
1731 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1732 writeb(SET_FUNC_ADDRESS, xl_mmio + MMIO_MACDATA) ;
1733 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 6 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1734 writeb(xl_priv->xl_functional_addr[0], xl_mmio + MMIO_MACDATA) ;
1735 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 7 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1736 writeb(xl_priv->xl_functional_addr[1], xl_mmio + MMIO_MACDATA) ;
1737 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 8 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1738 writeb(xl_priv->xl_functional_addr[2], xl_mmio + MMIO_MACDATA) ;
1739 writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 9 , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1740 writeb(xl_priv->xl_functional_addr[3], xl_mmio + MMIO_MACDATA) ;
1741 break ;
1742 } /* switch */
1743
1744
1745 xl_wait_misr_flags(dev) ;
1746
1747 /* Write 0xff to the CSRB flag */
1748 writel(MEM_BYTE_WRITE | MF_CSRB , xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1749 writeb(0xFF, xl_mmio + MMIO_MACDATA) ;
1750 /* Set csrb bit in MISR register to process command */
1751 writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1752 writeb(MISR_CSRB, xl_mmio + MMIO_MACDATA) ;
1753 xl_priv->srb_queued = 1 ;
1754
1755 return ;
1756}
1757
1758/*
1759 * This is nasty, to use the MISR command you have to wait for 6 memory locations
1760 * to be zero. This is the way the driver does on other OS'es so we should be ok with
1761 * the empty loop.
1762 */
1763
1764static void xl_wait_misr_flags(struct net_device *dev)
1765{
eda10531 1766 struct xl_private *xl_priv = netdev_priv(dev);
1da177e4
LT
1767 u8 __iomem * xl_mmio = xl_priv->xl_mmio ;
1768
1769 int i ;
1770
1771 writel(MMIO_BYTE_READ | MISR_RW, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1772 if (readb(xl_mmio + MMIO_MACDATA) != 0) { /* Misr not clear */
1773 for (i=0; i<6; i++) {
1774 writel(MEM_BYTE_READ | 0xDFFE0 | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1775 while (readb(xl_mmio + MMIO_MACDATA) != 0 ) {} ; /* Empty Loop */
1776 }
1777 }
1778
1779 writel(MMIO_BYTE_WRITE | MISR_AND, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1780 writeb(0x80, xl_mmio + MMIO_MACDATA) ;
1781
1782 return ;
1783}
1784
1785/*
1786 * Change mtu size, this should work the same as olympic
1787 */
1788
1789static int xl_change_mtu(struct net_device *dev, int mtu)
1790{
eda10531 1791 struct xl_private *xl_priv = netdev_priv(dev);
1da177e4
LT
1792 u16 max_mtu ;
1793
1794 if (xl_priv->xl_ring_speed == 4)
1795 max_mtu = 4500 ;
1796 else
1797 max_mtu = 18000 ;
1798
1799 if (mtu > max_mtu)
1800 return -EINVAL ;
1801 if (mtu < 100)
1802 return -EINVAL ;
1803
1804 dev->mtu = mtu ;
1805 xl_priv->pkt_buf_sz = mtu + TR_HLEN ;
1806
1807 return 0 ;
1808}
1809
1810static void __devexit xl_remove_one (struct pci_dev *pdev)
1811{
1812 struct net_device *dev = pci_get_drvdata(pdev);
eda10531 1813 struct xl_private *xl_priv=netdev_priv(dev);
1da177e4 1814
4b6ece97 1815 release_firmware(xl_priv->fw);
1da177e4
LT
1816 unregister_netdev(dev);
1817 iounmap(xl_priv->xl_mmio) ;
1818 pci_release_regions(pdev) ;
1819 pci_set_drvdata(pdev,NULL) ;
1820 free_netdev(dev);
1821 return ;
1822}
1823
1824static struct pci_driver xl_3c359_driver = {
1825 .name = "3c359",
1826 .id_table = xl_pci_tbl,
1827 .probe = xl_probe,
1828 .remove = __devexit_p(xl_remove_one),
1829};
1830
1831static int __init xl_pci_init (void)
1832{
29917620 1833 return pci_register_driver(&xl_3c359_driver);
1da177e4
LT
1834}
1835
1836
1837static void __exit xl_pci_cleanup (void)
1838{
1839 pci_unregister_driver (&xl_3c359_driver);
1840}
1841
1842module_init(xl_pci_init);
1843module_exit(xl_pci_cleanup);
1844
1845MODULE_LICENSE("GPL") ;