]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/skfp/skfddi.c
tree-wide: fix assorted typos all over the place
[net-next-2.6.git] / drivers / net / skfp / skfddi.c
CommitLineData
1da177e4
LT
1/*
2 * File Name:
3 * skfddi.c
4 *
5 * Copyright Information:
6 * Copyright SysKonnect 1998,1999.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * The information in this file is provided "AS IS" without warranty.
14 *
15 * Abstract:
16 * A Linux device driver supporting the SysKonnect FDDI PCI controller
17 * familie.
18 *
19 * Maintainers:
20 * CG Christoph Goos (cgoos@syskonnect.de)
21 *
22 * Contributors:
23 * DM David S. Miller
24 *
25 * Address all question to:
26 * linux@syskonnect.de
27 *
28 * The technical manual for the adapters is available from SysKonnect's
29 * web pages: www.syskonnect.com
30 * Goto "Support" and search Knowledge Base for "manual".
31 *
32 * Driver Architecture:
33 * The driver architecture is based on the DEC FDDI driver by
34 * Lawrence V. Stefani and several ethernet drivers.
35 * I also used an existing Windows NT miniport driver.
36 * All hardware dependent fuctions are handled by the SysKonnect
37 * Hardware Module.
38 * The only headerfiles that are directly related to this source
39 * are skfddi.c, h/types.h, h/osdef1st.h, h/targetos.h.
40 * The others belong to the SysKonnect FDDI Hardware Module and
41 * should better not be changed.
42 *
43 * Modification History:
44 * Date Name Description
45 * 02-Mar-98 CG Created.
46 *
47 * 10-Mar-99 CG Support for 2.2.x added.
48 * 25-Mar-99 CG Corrected IRQ routing for SMP (APIC)
49 * 26-Oct-99 CG Fixed compilation error on 2.2.13
50 * 12-Nov-99 CG Source code release
51 * 22-Nov-99 CG Included in kernel source.
52 * 07-May-00 DM 64 bit fixes, new dma interface
53 * 31-Jul-03 DB Audit copy_*_user in skfp_ioctl
54 * Daniele Bellucci <bellucda@tiscali.it>
55 * 03-Dec-03 SH Convert to PCI device model
56 *
57 * Compilation options (-Dxxx):
58 * DRIVERDEBUG print lots of messages to log file
59 * DUMPPACKETS print received/transmitted packets to logfile
60 *
61 * Tested cpu architectures:
62 * - i386
63 * - sparc64
64 */
65
66/* Version information string - should be updated prior to */
67/* each new release!!! */
68#define VERSION "2.07"
69
f71e1309 70static const char * const boot_msg =
1da177e4
LT
71 "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
72 " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
73
74/* Include files */
75
d43c36dc 76#include <linux/capability.h>
1da177e4
LT
77#include <linux/module.h>
78#include <linux/kernel.h>
79#include <linux/errno.h>
80#include <linux/ioport.h>
81#include <linux/slab.h>
82#include <linux/interrupt.h>
83#include <linux/pci.h>
84#include <linux/netdevice.h>
85#include <linux/fddidevice.h>
86#include <linux/skbuff.h>
87#include <linux/bitops.h>
88
89#include <asm/byteorder.h>
90#include <asm/io.h>
91#include <asm/uaccess.h>
92
93#include "h/types.h"
94#undef ADDR // undo Linux definition
95#include "h/skfbi.h"
96#include "h/fddi.h"
97#include "h/smc.h"
98#include "h/smtstate.h"
99
100
101// Define module-wide (static) routines
102static int skfp_driver_init(struct net_device *dev);
103static int skfp_open(struct net_device *dev);
104static int skfp_close(struct net_device *dev);
7d12e780 105static irqreturn_t skfp_interrupt(int irq, void *dev_id);
1da177e4
LT
106static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
107static void skfp_ctl_set_multicast_list(struct net_device *dev);
108static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
109static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
110static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
61357325
SH
111static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
112 struct net_device *dev);
1da177e4
LT
113static void send_queued_packets(struct s_smc *smc);
114static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr);
115static void ResetAdapter(struct s_smc *smc);
116
117
118// Functions needed by the hardware module
119void *mac_drv_get_space(struct s_smc *smc, u_int size);
120void *mac_drv_get_desc_mem(struct s_smc *smc, u_int size);
121unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt);
122unsigned long dma_master(struct s_smc *smc, void *virt, int len, int flag);
123void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
124 int flag);
125void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd);
126void llc_restart_tx(struct s_smc *smc);
127void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
128 int frag_count, int len);
129void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
130 int frag_count);
131void mac_drv_fill_rxd(struct s_smc *smc);
132void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
133 int frag_count);
134int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
135 int la_len);
136void dump_data(unsigned char *Data, int length);
137
138// External functions from the hardware module
139extern u_int mac_drv_check_space(void);
1da177e4
LT
140extern int mac_drv_init(struct s_smc *smc);
141extern void hwm_tx_frag(struct s_smc *smc, char far * virt, u_long phys,
142 int len, int frame_status);
143extern int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count,
144 int frame_len, int frame_status);
1da177e4
LT
145extern void fddi_isr(struct s_smc *smc);
146extern void hwm_rx_frag(struct s_smc *smc, char far * virt, u_long phys,
147 int len, int frame_status);
148extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
149extern void mac_drv_clear_rx_queue(struct s_smc *smc);
150extern void enable_tx_irq(struct s_smc *smc, u_short queue);
1da177e4
LT
151
152static struct pci_device_id skfddi_pci_tbl[] = {
153 { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
154 { } /* Terminating entry */
155};
156MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
157MODULE_LICENSE("GPL");
158MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
159
160// Define module-wide (static) variables
161
162static int num_boards; /* total number of adapters configured */
163
145186a3
SH
164static const struct net_device_ops skfp_netdev_ops = {
165 .ndo_open = skfp_open,
166 .ndo_stop = skfp_close,
167 .ndo_start_xmit = skfp_send_pkt,
168 .ndo_get_stats = skfp_ctl_get_stats,
169 .ndo_change_mtu = fddi_change_mtu,
170 .ndo_set_multicast_list = skfp_ctl_set_multicast_list,
171 .ndo_set_mac_address = skfp_ctl_set_mac_address,
172 .ndo_do_ioctl = skfp_ioctl,
173};
174
1da177e4
LT
175/*
176 * =================
177 * = skfp_init_one =
178 * =================
179 *
180 * Overview:
181 * Probes for supported FDDI PCI controllers
182 *
183 * Returns:
184 * Condition code
185 *
186 * Arguments:
187 * pdev - pointer to PCI device information
188 *
189 * Functional Description:
190 * This is now called by PCI driver registration process
191 * for each board found.
192 *
193 * Return Codes:
194 * 0 - This device (fddi0, fddi1, etc) configured successfully
195 * -ENODEV - No devices present, or no SysKonnect FDDI PCI device
196 * present for this device name
197 *
198 *
199 * Side Effects:
200 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
201 * initialized and the board resources are read and stored in
202 * the device structure.
203 */
204static int skfp_init_one(struct pci_dev *pdev,
205 const struct pci_device_id *ent)
206{
207 struct net_device *dev;
208 struct s_smc *smc; /* board pointer */
209 void __iomem *mem;
210 int err;
211
ebc06eeb 212 pr_debug(KERN_INFO "entering skfp_init_one\n");
1da177e4
LT
213
214 if (num_boards == 0)
215 printk("%s\n", boot_msg);
216
217 err = pci_enable_device(pdev);
218 if (err)
219 return err;
220
221 err = pci_request_regions(pdev, "skfddi");
222 if (err)
223 goto err_out1;
224
225 pci_set_master(pdev);
226
227#ifdef MEM_MAPPED_IO
228 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
229 printk(KERN_ERR "skfp: region is not an MMIO resource\n");
230 err = -EIO;
231 goto err_out2;
232 }
233
234 mem = ioremap(pci_resource_start(pdev, 0), 0x4000);
235#else
236 if (!(pci_resource_flags(pdev, 1) & IO_RESOURCE_IO)) {
237 printk(KERN_ERR "skfp: region is not PIO resource\n");
238 err = -EIO;
239 goto err_out2;
240 }
241
242 mem = ioport_map(pci_resource_start(pdev, 1), FP_IO_LEN);
243#endif
244 if (!mem) {
245 printk(KERN_ERR "skfp: Unable to map register, "
246 "FDDI adapter will be disabled.\n");
247 err = -EIO;
248 goto err_out2;
249 }
250
251 dev = alloc_fddidev(sizeof(struct s_smc));
252 if (!dev) {
253 printk(KERN_ERR "skfp: Unable to allocate fddi device, "
254 "FDDI adapter will be disabled.\n");
255 err = -ENOMEM;
256 goto err_out3;
257 }
258
259 dev->irq = pdev->irq;
145186a3 260 dev->netdev_ops = &skfp_netdev_ops;
1da177e4 261
1da177e4
LT
262 SET_NETDEV_DEV(dev, &pdev->dev);
263
264 /* Initialize board structure with bus-specific info */
265 smc = netdev_priv(dev);
266 smc->os.dev = dev;
267 smc->os.bus_type = SK_BUS_TYPE_PCI;
268 smc->os.pdev = *pdev;
269 smc->os.QueueSkb = MAX_TX_QUEUE_LEN;
270 smc->os.MaxFrameSize = MAX_FRAME_SIZE;
271 smc->os.dev = dev;
272 smc->hw.slot = -1;
273 smc->hw.iop = mem;
274 smc->os.ResetRequested = FALSE;
275 skb_queue_head_init(&smc->os.SendSkbQueue);
276
277 dev->base_addr = (unsigned long)mem;
278
279 err = skfp_driver_init(dev);
280 if (err)
281 goto err_out4;
282
283 err = register_netdev(dev);
284 if (err)
285 goto err_out5;
286
287 ++num_boards;
288 pci_set_drvdata(pdev, dev);
289
290 if ((pdev->subsystem_device & 0xff00) == 0x5500 ||
291 (pdev->subsystem_device & 0xff00) == 0x5800)
292 printk("%s: SysKonnect FDDI PCI adapter"
293 " found (SK-%04X)\n", dev->name,
294 pdev->subsystem_device);
295 else
296 printk("%s: FDDI PCI adapter found\n", dev->name);
297
298 return 0;
299err_out5:
300 if (smc->os.SharedMemAddr)
301 pci_free_consistent(pdev, smc->os.SharedMemSize,
302 smc->os.SharedMemAddr,
303 smc->os.SharedMemDMA);
304 pci_free_consistent(pdev, MAX_FRAME_SIZE,
305 smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
306err_out4:
307 free_netdev(dev);
308err_out3:
309#ifdef MEM_MAPPED_IO
310 iounmap(mem);
311#else
312 ioport_unmap(mem);
313#endif
314err_out2:
315 pci_release_regions(pdev);
316err_out1:
317 pci_disable_device(pdev);
318 return err;
319}
320
321/*
322 * Called for each adapter board from pci_unregister_driver
323 */
324static void __devexit skfp_remove_one(struct pci_dev *pdev)
325{
326 struct net_device *p = pci_get_drvdata(pdev);
327 struct s_smc *lp = netdev_priv(p);
328
329 unregister_netdev(p);
330
331 if (lp->os.SharedMemAddr) {
332 pci_free_consistent(&lp->os.pdev,
333 lp->os.SharedMemSize,
334 lp->os.SharedMemAddr,
335 lp->os.SharedMemDMA);
336 lp->os.SharedMemAddr = NULL;
337 }
338 if (lp->os.LocalRxBuffer) {
339 pci_free_consistent(&lp->os.pdev,
340 MAX_FRAME_SIZE,
341 lp->os.LocalRxBuffer,
342 lp->os.LocalRxBufferDMA);
343 lp->os.LocalRxBuffer = NULL;
344 }
345#ifdef MEM_MAPPED_IO
346 iounmap(lp->hw.iop);
347#else
348 ioport_unmap(lp->hw.iop);
349#endif
350 pci_release_regions(pdev);
351 free_netdev(p);
352
353 pci_disable_device(pdev);
354 pci_set_drvdata(pdev, NULL);
355}
356
357/*
358 * ====================
359 * = skfp_driver_init =
360 * ====================
361 *
362 * Overview:
363 * Initializes remaining adapter board structure information
364 * and makes sure adapter is in a safe state prior to skfp_open().
365 *
366 * Returns:
367 * Condition code
368 *
369 * Arguments:
370 * dev - pointer to device information
371 *
372 * Functional Description:
373 * This function allocates additional resources such as the host memory
374 * blocks needed by the adapter.
375 * The adapter is also reset. The OS must call skfp_open() to open
376 * the adapter and bring it on-line.
377 *
378 * Return Codes:
379 * 0 - initialization succeeded
380 * -1 - initialization failed
381 */
382static int skfp_driver_init(struct net_device *dev)
383{
384 struct s_smc *smc = netdev_priv(dev);
385 skfddi_priv *bp = &smc->os;
386 int err = -EIO;
387
ebc06eeb 388 pr_debug(KERN_INFO "entering skfp_driver_init\n");
1da177e4
LT
389
390 // set the io address in private structures
391 bp->base_addr = dev->base_addr;
392
393 // Get the interrupt level from the PCI Configuration Table
394 smc->hw.irq = dev->irq;
395
396 spin_lock_init(&bp->DriverLock);
397
398 // Allocate invalid frame
399 bp->LocalRxBuffer = pci_alloc_consistent(&bp->pdev, MAX_FRAME_SIZE, &bp->LocalRxBufferDMA);
400 if (!bp->LocalRxBuffer) {
401 printk("could not allocate mem for ");
402 printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
403 goto fail;
404 }
405
406 // Determine the required size of the 'shared' memory area.
407 bp->SharedMemSize = mac_drv_check_space();
ebc06eeb 408 pr_debug(KERN_INFO "Memory for HWM: %ld\n", bp->SharedMemSize);
1da177e4
LT
409 if (bp->SharedMemSize > 0) {
410 bp->SharedMemSize += 16; // for descriptor alignment
411
412 bp->SharedMemAddr = pci_alloc_consistent(&bp->pdev,
413 bp->SharedMemSize,
414 &bp->SharedMemDMA);
415 if (!bp->SharedMemSize) {
416 printk("could not allocate mem for ");
417 printk("hardware module: %ld byte\n",
418 bp->SharedMemSize);
419 goto fail;
420 }
421 bp->SharedMemHeap = 0; // Nothing used yet.
422
423 } else {
424 bp->SharedMemAddr = NULL;
425 bp->SharedMemHeap = 0;
426 } // SharedMemSize > 0
427
428 memset(bp->SharedMemAddr, 0, bp->SharedMemSize);
429
430 card_stop(smc); // Reset adapter.
431
ebc06eeb 432 pr_debug(KERN_INFO "mac_drv_init()..\n");
1da177e4 433 if (mac_drv_init(smc) != 0) {
ebc06eeb 434 pr_debug(KERN_INFO "mac_drv_init() failed.\n");
1da177e4
LT
435 goto fail;
436 }
437 read_address(smc, NULL);
ebc06eeb 438 pr_debug(KERN_INFO "HW-Addr: %02x %02x %02x %02x %02x %02x\n",
1da177e4
LT
439 smc->hw.fddi_canon_addr.a[0],
440 smc->hw.fddi_canon_addr.a[1],
441 smc->hw.fddi_canon_addr.a[2],
442 smc->hw.fddi_canon_addr.a[3],
443 smc->hw.fddi_canon_addr.a[4],
444 smc->hw.fddi_canon_addr.a[5]);
445 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
446
447 smt_reset_defaults(smc, 0);
448
449 return (0);
450
451fail:
452 if (bp->SharedMemAddr) {
453 pci_free_consistent(&bp->pdev,
454 bp->SharedMemSize,
455 bp->SharedMemAddr,
456 bp->SharedMemDMA);
457 bp->SharedMemAddr = NULL;
458 }
459 if (bp->LocalRxBuffer) {
460 pci_free_consistent(&bp->pdev, MAX_FRAME_SIZE,
461 bp->LocalRxBuffer, bp->LocalRxBufferDMA);
462 bp->LocalRxBuffer = NULL;
463 }
464 return err;
465} // skfp_driver_init
466
467
468/*
469 * =============
470 * = skfp_open =
471 * =============
472 *
473 * Overview:
474 * Opens the adapter
475 *
476 * Returns:
477 * Condition code
478 *
479 * Arguments:
480 * dev - pointer to device information
481 *
482 * Functional Description:
483 * This function brings the adapter to an operational state.
484 *
485 * Return Codes:
486 * 0 - Adapter was successfully opened
487 * -EAGAIN - Could not register IRQ
488 */
489static int skfp_open(struct net_device *dev)
490{
491 struct s_smc *smc = netdev_priv(dev);
492 int err;
493
ebc06eeb 494 pr_debug(KERN_INFO "entering skfp_open\n");
1da177e4 495 /* Register IRQ - support shared interrupts by passing device ptr */
2f220e30 496 err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
1da177e4
LT
497 dev->name, dev);
498 if (err)
499 return err;
500
501 /*
502 * Set current address to factory MAC address
503 *
504 * Note: We've already done this step in skfp_driver_init.
505 * However, it's possible that a user has set a node
506 * address override, then closed and reopened the
507 * adapter. Unless we reset the device address field
508 * now, we'll continue to use the existing modified
509 * address.
510 */
511 read_address(smc, NULL);
512 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
513
514 init_smt(smc, NULL);
515 smt_online(smc, 1);
516 STI_FBI();
517
518 /* Clear local multicast address tables */
519 mac_clear_multicast(smc);
520
521 /* Disable promiscuous filter settings */
522 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
523
524 netif_start_queue(dev);
525 return (0);
526} // skfp_open
527
528
529/*
530 * ==============
531 * = skfp_close =
532 * ==============
533 *
534 * Overview:
535 * Closes the device/module.
536 *
537 * Returns:
538 * Condition code
539 *
540 * Arguments:
541 * dev - pointer to device information
542 *
543 * Functional Description:
544 * This routine closes the adapter and brings it to a safe state.
545 * The interrupt service routine is deregistered with the OS.
546 * The adapter can be opened again with another call to skfp_open().
547 *
548 * Return Codes:
549 * Always return 0.
550 *
551 * Assumptions:
552 * No further requests for this adapter are made after this routine is
553 * called. skfp_open() can be called to reset and reinitialize the
554 * adapter.
555 */
556static int skfp_close(struct net_device *dev)
557{
558 struct s_smc *smc = netdev_priv(dev);
559 skfddi_priv *bp = &smc->os;
560
561 CLI_FBI();
562 smt_reset_defaults(smc, 1);
563 card_stop(smc);
564 mac_drv_clear_tx_queue(smc);
565 mac_drv_clear_rx_queue(smc);
566
567 netif_stop_queue(dev);
568 /* Deregister (free) IRQ */
569 free_irq(dev->irq, dev);
570
571 skb_queue_purge(&bp->SendSkbQueue);
572 bp->QueueSkb = MAX_TX_QUEUE_LEN;
573
574 return (0);
575} // skfp_close
576
577
578/*
579 * ==================
580 * = skfp_interrupt =
581 * ==================
582 *
583 * Overview:
584 * Interrupt processing routine
585 *
586 * Returns:
587 * None
588 *
589 * Arguments:
590 * irq - interrupt vector
591 * dev_id - pointer to device information
1da177e4
LT
592 *
593 * Functional Description:
594 * This routine calls the interrupt processing routine for this adapter. It
595 * disables and reenables adapter interrupts, as appropriate. We can support
596 * shared interrupts since the incoming dev_id pointer provides our device
597 * structure context. All the real work is done in the hardware module.
598 *
599 * Return Codes:
600 * None
601 *
602 * Assumptions:
603 * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
604 * on Intel-based systems) is done by the operating system outside this
605 * routine.
606 *
607 * System interrupts are enabled through this call.
608 *
609 * Side Effects:
610 * Interrupts are disabled, then reenabled at the adapter.
611 */
612
409b2044 613static irqreturn_t skfp_interrupt(int irq, void *dev_id)
1da177e4 614{
c31f28e7 615 struct net_device *dev = dev_id;
1da177e4
LT
616 struct s_smc *smc; /* private board structure pointer */
617 skfddi_priv *bp;
618
1da177e4
LT
619 smc = netdev_priv(dev);
620 bp = &smc->os;
621
622 // IRQs enabled or disabled ?
623 if (inpd(ADDR(B0_IMSK)) == 0) {
624 // IRQs are disabled: must be shared interrupt
625 return IRQ_NONE;
626 }
627 // Note: At this point, IRQs are enabled.
628 if ((inpd(ISR_A) & smc->hw.is_imask) == 0) { // IRQ?
629 // Adapter did not issue an IRQ: must be shared interrupt
630 return IRQ_NONE;
631 }
632 CLI_FBI(); // Disable IRQs from our adapter.
633 spin_lock(&bp->DriverLock);
634
635 // Call interrupt handler in hardware module (HWM).
636 fddi_isr(smc);
637
638 if (smc->os.ResetRequested) {
639 ResetAdapter(smc);
640 smc->os.ResetRequested = FALSE;
641 }
642 spin_unlock(&bp->DriverLock);
643 STI_FBI(); // Enable IRQs from our adapter.
644
645 return IRQ_HANDLED;
646} // skfp_interrupt
647
648
649/*
650 * ======================
651 * = skfp_ctl_get_stats =
652 * ======================
653 *
654 * Overview:
655 * Get statistics for FDDI adapter
656 *
657 * Returns:
658 * Pointer to FDDI statistics structure
659 *
660 * Arguments:
661 * dev - pointer to device information
662 *
663 * Functional Description:
664 * Gets current MIB objects from adapter, then
665 * returns FDDI statistics structure as defined
666 * in if_fddi.h.
667 *
668 * Note: Since the FDDI statistics structure is
669 * still new and the device structure doesn't
670 * have an FDDI-specific get statistics handler,
671 * we'll return the FDDI statistics structure as
672 * a pointer to an Ethernet statistics structure.
673 * That way, at least the first part of the statistics
674 * structure can be decoded properly.
675 * We'll have to pay attention to this routine as the
676 * device structure becomes more mature and LAN media
677 * independent.
678 *
679 */
409b2044 680static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
1da177e4
LT
681{
682 struct s_smc *bp = netdev_priv(dev);
683
684 /* Fill the bp->stats structure with driver-maintained counters */
685
686 bp->os.MacStat.port_bs_flag[0] = 0x1234;
687 bp->os.MacStat.port_bs_flag[1] = 0x5678;
688// goos: need to fill out fddi statistic
689#if 0
690 /* Get FDDI SMT MIB objects */
691
692/* Fill the bp->stats structure with the SMT MIB object values */
693
694 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
695 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
696 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
697 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
698 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
699 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
700 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
701 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
702 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
703 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
704 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
705 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
706 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
707 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
708 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
709 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
710 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
711 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
712 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
713 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
714 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
715 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
716 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
717 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
718 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
719 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
720 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
721 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
722 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
723 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
724 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
725 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
726 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
727 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
728 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
729 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
730 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
731 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
732 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
733 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
734 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
735 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
736 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
737 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
738 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
739 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
740 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
741 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
742 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
743 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
744 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
745 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
746 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
747 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
748 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
749 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
750 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
751 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
752 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
753 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
754 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
755 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
756 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
757 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
758 memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
759 memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
760 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
761 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
762 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
763 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
764 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
765 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
766 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
767 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
768 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
769 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
770 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
771 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
772 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
773 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
774 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
775 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
776 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
777 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
778 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
779 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
780 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
781 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
782 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
783 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
784 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
785 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
786
787
788 /* Fill the bp->stats structure with the FDDI counter values */
789
790 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
791 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
792 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
793 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
794 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
795 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
796 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
797 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
798 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
799 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
800 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
801
802#endif
803 return ((struct net_device_stats *) &bp->os.MacStat);
804} // ctl_get_stat
805
806
807/*
808 * ==============================
809 * = skfp_ctl_set_multicast_list =
810 * ==============================
811 *
812 * Overview:
813 * Enable/Disable LLC frame promiscuous mode reception
814 * on the adapter and/or update multicast address table.
815 *
816 * Returns:
817 * None
818 *
819 * Arguments:
820 * dev - pointer to device information
821 *
822 * Functional Description:
823 * This function acquires the driver lock and only calls
824 * skfp_ctl_set_multicast_list_wo_lock then.
825 * This routine follows a fairly simple algorithm for setting the
826 * adapter filters and CAM:
827 *
828 * if IFF_PROMISC flag is set
829 * enable promiscuous mode
830 * else
831 * disable promiscuous mode
832 * if number of multicast addresses <= max. multicast number
833 * add mc addresses to adapter table
834 * else
835 * enable promiscuous mode
836 * update adapter filters
837 *
838 * Assumptions:
839 * Multicast addresses are presented in canonical (LSB) format.
840 *
841 * Side Effects:
842 * On-board adapter filters are updated.
843 */
844static void skfp_ctl_set_multicast_list(struct net_device *dev)
845{
846 struct s_smc *smc = netdev_priv(dev);
847 skfddi_priv *bp = &smc->os;
848 unsigned long Flags;
849
850 spin_lock_irqsave(&bp->DriverLock, Flags);
851 skfp_ctl_set_multicast_list_wo_lock(dev);
852 spin_unlock_irqrestore(&bp->DriverLock, Flags);
853 return;
854} // skfp_ctl_set_multicast_list
855
856
857
858static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
859{
860 struct s_smc *smc = netdev_priv(dev);
861 struct dev_mc_list *dmi; /* ptr to multicast addr entry */
862 int i;
863
864 /* Enable promiscuous mode, if necessary */
865 if (dev->flags & IFF_PROMISC) {
866 mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
ebc06eeb 867 pr_debug(KERN_INFO "PROMISCUOUS MODE ENABLED\n");
1da177e4
LT
868 }
869 /* Else, update multicast address table */
870 else {
871 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
ebc06eeb 872 pr_debug(KERN_INFO "PROMISCUOUS MODE DISABLED\n");
1da177e4
LT
873
874 // Reset all MC addresses
875 mac_clear_multicast(smc);
876 mac_drv_rx_mode(smc, RX_DISABLE_ALLMULTI);
877
878 if (dev->flags & IFF_ALLMULTI) {
879 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
ebc06eeb 880 pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
1da177e4
LT
881 } else if (dev->mc_count > 0) {
882 if (dev->mc_count <= FPMAX_MULTICAST) {
883 /* use exact filtering */
884
885 // point to first multicast addr
886 dmi = dev->mc_list;
887
888 for (i = 0; i < dev->mc_count; i++) {
889 mac_add_multicast(smc,
890 (struct fddi_addr *)dmi->dmi_addr,
891 1);
892
ebc06eeb
AB
893 pr_debug(KERN_INFO "ENABLE MC ADDRESS:");
894 pr_debug(" %02x %02x %02x ",
1da177e4
LT
895 dmi->dmi_addr[0],
896 dmi->dmi_addr[1],
897 dmi->dmi_addr[2]);
ebc06eeb 898 pr_debug("%02x %02x %02x\n",
1da177e4
LT
899 dmi->dmi_addr[3],
900 dmi->dmi_addr[4],
901 dmi->dmi_addr[5]);
902 dmi = dmi->next;
903 } // for
904
905 } else { // more MC addresses than HW supports
906
907 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
ebc06eeb 908 pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
1da177e4
LT
909 }
910 } else { // no MC addresses
911
ebc06eeb 912 pr_debug(KERN_INFO "DISABLE ALL MC ADDRESSES\n");
1da177e4
LT
913 }
914
915 /* Update adapter filters */
916 mac_update_multicast(smc);
917 }
918 return;
919} // skfp_ctl_set_multicast_list_wo_lock
920
921
922/*
923 * ===========================
924 * = skfp_ctl_set_mac_address =
925 * ===========================
926 *
927 * Overview:
928 * set new mac address on adapter and update dev_addr field in device table.
929 *
930 * Returns:
931 * None
932 *
933 * Arguments:
934 * dev - pointer to device information
935 * addr - pointer to sockaddr structure containing unicast address to set
936 *
937 * Assumptions:
938 * The address pointed to by addr->sa_data is a valid unicast
939 * address and is presented in canonical (LSB) format.
940 */
941static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
942{
943 struct s_smc *smc = netdev_priv(dev);
944 struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
945 skfddi_priv *bp = &smc->os;
946 unsigned long Flags;
947
948
949 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
950 spin_lock_irqsave(&bp->DriverLock, Flags);
951 ResetAdapter(smc);
952 spin_unlock_irqrestore(&bp->DriverLock, Flags);
953
954 return (0); /* always return zero */
955} // skfp_ctl_set_mac_address
956
957
958/*
959 * ==============
960 * = skfp_ioctl =
961 * ==============
962 *
963 * Overview:
964 *
965 * Perform IOCTL call functions here. Some are privileged operations and the
966 * effective uid is checked in those cases.
967 *
968 * Returns:
969 * status value
970 * 0 - success
971 * other - failure
972 *
973 * Arguments:
974 * dev - pointer to device information
975 * rq - pointer to ioctl request structure
976 * cmd - ?
977 *
978 */
979
980
981static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
982{
983 struct s_smc *smc = netdev_priv(dev);
984 skfddi_priv *lp = &smc->os;
985 struct s_skfp_ioctl ioc;
986 int status = 0;
987
988 if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
989 return -EFAULT;
990
991 switch (ioc.cmd) {
992 case SKFP_GET_STATS: /* Get the driver statistics */
993 ioc.len = sizeof(lp->MacStat);
994 status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
995 ? -EFAULT : 0;
996 break;
997 case SKFP_CLR_STATS: /* Zero out the driver statistics */
998 if (!capable(CAP_NET_ADMIN)) {
1da177e4 999 status = -EPERM;
c25b9abb
RK
1000 } else {
1001 memset(&lp->MacStat, 0, sizeof(lp->MacStat));
1da177e4
LT
1002 }
1003 break;
1004 default:
af901ca1 1005 printk("ioctl for %s: unknown cmd: %04x\n", dev->name, ioc.cmd);
1da177e4
LT
1006 status = -EOPNOTSUPP;
1007
1008 } // switch
1009
1010 return status;
1011} // skfp_ioctl
1012
1013
1014/*
1015 * =====================
1016 * = skfp_send_pkt =
1017 * =====================
1018 *
1019 * Overview:
1020 * Queues a packet for transmission and try to transmit it.
1021 *
1022 * Returns:
1023 * Condition code
1024 *
1025 * Arguments:
1026 * skb - pointer to sk_buff to queue for transmission
1027 * dev - pointer to device information
1028 *
1029 * Functional Description:
1030 * Here we assume that an incoming skb transmit request
1031 * is contained in a single physically contiguous buffer
1032 * in which the virtual address of the start of packet
1033 * (skb->data) can be converted to a physical address
1034 * by using pci_map_single().
1035 *
1036 * We have an internal queue for packets we can not send
1037 * immediately. Packets in this queue can be given to the
1038 * adapter if transmit buffers are freed.
1039 *
1040 * We can't free the skb until after it's been DMA'd
1041 * out by the adapter, so we'll keep it in the driver and
1042 * return it in mac_drv_tx_complete.
1043 *
1044 * Return Codes:
1045 * 0 - driver has queued and/or sent packet
1046 * 1 - caller should requeue the sk_buff for later transmission
1047 *
1048 * Assumptions:
1049 * The entire packet is stored in one physically
1050 * contiguous buffer which is not cached and whose
1051 * 32-bit physical address can be determined.
1052 *
1053 * It's vital that this routine is NOT reentered for the
1054 * same board and that the OS is not in another section of
1055 * code (eg. skfp_interrupt) for the same board on a
1056 * different thread.
1057 *
1058 * Side Effects:
1059 * None
1060 */
61357325
SH
1061static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
1062 struct net_device *dev)
1da177e4
LT
1063{
1064 struct s_smc *smc = netdev_priv(dev);
1065 skfddi_priv *bp = &smc->os;
1066
ebc06eeb 1067 pr_debug(KERN_INFO "skfp_send_pkt\n");
1da177e4
LT
1068
1069 /*
1070 * Verify that incoming transmit request is OK
1071 *
1072 * Note: The packet size check is consistent with other
1073 * Linux device drivers, although the correct packet
1074 * size should be verified before calling the
1075 * transmit routine.
1076 */
1077
1078 if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
1079 bp->MacStat.gen.tx_errors++; /* bump error counter */
1080 // dequeue packets from xmt queue and send them
1081 netif_start_queue(dev);
1082 dev_kfree_skb(skb);
ec634fe3 1083 return NETDEV_TX_OK; /* return "success" */
1da177e4
LT
1084 }
1085 if (bp->QueueSkb == 0) { // return with tbusy set: queue full
1086
1087 netif_stop_queue(dev);
5b548140 1088 return NETDEV_TX_BUSY;
1da177e4
LT
1089 }
1090 bp->QueueSkb--;
1091 skb_queue_tail(&bp->SendSkbQueue, skb);
1092 send_queued_packets(netdev_priv(dev));
1093 if (bp->QueueSkb == 0) {
1094 netif_stop_queue(dev);
1095 }
1096 dev->trans_start = jiffies;
6ed10654 1097 return NETDEV_TX_OK;
1da177e4
LT
1098
1099} // skfp_send_pkt
1100
1101
1102/*
1103 * =======================
1104 * = send_queued_packets =
1105 * =======================
1106 *
1107 * Overview:
1108 * Send packets from the driver queue as long as there are some and
1109 * transmit resources are available.
1110 *
1111 * Returns:
1112 * None
1113 *
1114 * Arguments:
1115 * smc - pointer to smc (adapter) structure
1116 *
1117 * Functional Description:
1118 * Take a packet from queue if there is any. If not, then we are done.
1119 * Check if there are resources to send the packet. If not, requeue it
1120 * and exit.
1121 * Set packet descriptor flags and give packet to adapter.
1122 * Check if any send resources can be freed (we do not use the
1123 * transmit complete interrupt).
1124 */
1125static void send_queued_packets(struct s_smc *smc)
1126{
1127 skfddi_priv *bp = &smc->os;
1128 struct sk_buff *skb;
1129 unsigned char fc;
1130 int queue;
1131 struct s_smt_fp_txd *txd; // Current TxD.
1132 dma_addr_t dma_address;
1133 unsigned long Flags;
1134
1135 int frame_status; // HWM tx frame status.
1136
ebc06eeb 1137 pr_debug(KERN_INFO "send queued packets\n");
1da177e4
LT
1138 for (;;) {
1139 // send first buffer from queue
1140 skb = skb_dequeue(&bp->SendSkbQueue);
1141
1142 if (!skb) {
ebc06eeb 1143 pr_debug(KERN_INFO "queue empty\n");
1da177e4
LT
1144 return;
1145 } // queue empty !
1146
1147 spin_lock_irqsave(&bp->DriverLock, Flags);
1148 fc = skb->data[0];
1149 queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
1150#ifdef ESS
1151 // Check if the frame may/must be sent as a synchronous frame.
1152
1153 if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1154 // It's an LLC frame.
1155 if (!smc->ess.sync_bw_available)
1156 fc &= ~FC_SYNC_BIT; // No bandwidth available.
1157
1158 else { // Bandwidth is available.
1159
1160 if (smc->mib.fddiESSSynchTxMode) {
1161 // Send as sync. frame.
1162 fc |= FC_SYNC_BIT;
1163 }
1164 }
1165 }
1166#endif // ESS
1167 frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
1168
1169 if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
1170 // Unable to send the frame.
1171
1172 if ((frame_status & RING_DOWN) != 0) {
1173 // Ring is down.
ebc06eeb 1174 pr_debug("Tx attempt while ring down.\n");
1da177e4 1175 } else if ((frame_status & OUT_OF_TXD) != 0) {
ebc06eeb 1176 pr_debug("%s: out of TXDs.\n", bp->dev->name);
1da177e4 1177 } else {
ebc06eeb 1178 pr_debug("%s: out of transmit resources",
1da177e4
LT
1179 bp->dev->name);
1180 }
1181
1182 // Note: We will retry the operation as soon as
1183 // transmit resources become available.
1184 skb_queue_head(&bp->SendSkbQueue, skb);
1185 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1186 return; // Packet has been queued.
1187
1188 } // if (unable to send frame)
1189
1190 bp->QueueSkb++; // one packet less in local queue
1191
1192 // source address in packet ?
1193 CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
1194
1195 txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
1196
1197 dma_address = pci_map_single(&bp->pdev, skb->data,
1198 skb->len, PCI_DMA_TODEVICE);
1199 if (frame_status & LAN_TX) {
1200 txd->txd_os.skb = skb; // save skb
1201 txd->txd_os.dma_addr = dma_address; // save dma mapping
1202 }
1203 hwm_tx_frag(smc, skb->data, dma_address, skb->len,
1204 frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);
1205
1206 if (!(frame_status & LAN_TX)) { // local only frame
1207 pci_unmap_single(&bp->pdev, dma_address,
1208 skb->len, PCI_DMA_TODEVICE);
1209 dev_kfree_skb_irq(skb);
1210 }
1211 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1212 } // for
1213
1214 return; // never reached
1215
1216} // send_queued_packets
1217
1218
1219/************************
1220 *
1221 * CheckSourceAddress
1222 *
1223 * Verify if the source address is set. Insert it if necessary.
1224 *
1225 ************************/
409b2044 1226static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
1da177e4
LT
1227{
1228 unsigned char SRBit;
1229
1230 if ((((unsigned long) frame[1 + 6]) & ~0x01) != 0) // source routing bit
1231
1232 return;
1233 if ((unsigned short) frame[1 + 10] != 0)
1234 return;
1235 SRBit = frame[1 + 6] & 0x01;
1236 memcpy(&frame[1 + 6], hw_addr, 6);
1237 frame[8] |= SRBit;
1238} // CheckSourceAddress
1239
1240
1241/************************
1242 *
1243 * ResetAdapter
1244 *
1245 * Reset the adapter and bring it back to operational mode.
1246 * Args
1247 * smc - A pointer to the SMT context struct.
1248 * Out
1249 * Nothing.
1250 *
1251 ************************/
1252static void ResetAdapter(struct s_smc *smc)
1253{
1254
ebc06eeb 1255 pr_debug(KERN_INFO "[fddi: ResetAdapter]\n");
1da177e4
LT
1256
1257 // Stop the adapter.
1258
1259 card_stop(smc); // Stop all activity.
1260
1261 // Clear the transmit and receive descriptor queues.
1262 mac_drv_clear_tx_queue(smc);
1263 mac_drv_clear_rx_queue(smc);
1264
1265 // Restart the adapter.
1266
1267 smt_reset_defaults(smc, 1); // Initialize the SMT module.
1268
1269 init_smt(smc, (smc->os.dev)->dev_addr); // Initialize the hardware.
1270
1271 smt_online(smc, 1); // Insert into the ring again.
1272 STI_FBI();
1273
1274 // Restore original receive mode (multicasts, promiscuous, etc.).
1275 skfp_ctl_set_multicast_list_wo_lock(smc->os.dev);
1276} // ResetAdapter
1277
1278
1279//--------------- functions called by hardware module ----------------
1280
1281/************************
1282 *
1283 * llc_restart_tx
1284 *
1285 * The hardware driver calls this routine when the transmit complete
1286 * interrupt bits (end of frame) for the synchronous or asynchronous
1287 * queue is set.
1288 *
1289 * NOTE The hardware driver calls this function also if no packets are queued.
1290 * The routine must be able to handle this case.
1291 * Args
1292 * smc - A pointer to the SMT context struct.
1293 * Out
1294 * Nothing.
1295 *
1296 ************************/
1297void llc_restart_tx(struct s_smc *smc)
1298{
1299 skfddi_priv *bp = &smc->os;
1300
ebc06eeb 1301 pr_debug(KERN_INFO "[llc_restart_tx]\n");
1da177e4
LT
1302
1303 // Try to send queued packets
1304 spin_unlock(&bp->DriverLock);
1305 send_queued_packets(smc);
1306 spin_lock(&bp->DriverLock);
1307 netif_start_queue(bp->dev);// system may send again if it was blocked
1308
1309} // llc_restart_tx
1310
1311
1312/************************
1313 *
1314 * mac_drv_get_space
1315 *
1316 * The hardware module calls this function to allocate the memory
1317 * for the SMT MBufs if the define MB_OUTSIDE_SMC is specified.
1318 * Args
1319 * smc - A pointer to the SMT context struct.
1320 *
1321 * size - Size of memory in bytes to allocate.
1322 * Out
1323 * != 0 A pointer to the virtual address of the allocated memory.
1324 * == 0 Allocation error.
1325 *
1326 ************************/
1327void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
1328{
1329 void *virt;
1330
ebc06eeb 1331 pr_debug(KERN_INFO "mac_drv_get_space (%d bytes), ", size);
1da177e4
LT
1332 virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
1333
1334 if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
1335 printk("Unexpected SMT memory size requested: %d\n", size);
1336 return (NULL);
1337 }
1338 smc->os.SharedMemHeap += size; // Move heap pointer.
1339
ebc06eeb
AB
1340 pr_debug(KERN_INFO "mac_drv_get_space end\n");
1341 pr_debug(KERN_INFO "virt addr: %lx\n", (ulong) virt);
1342 pr_debug(KERN_INFO "bus addr: %lx\n", (ulong)
1da177e4
LT
1343 (smc->os.SharedMemDMA +
1344 ((char *) virt - (char *)smc->os.SharedMemAddr)));
1345 return (virt);
1346} // mac_drv_get_space
1347
1348
1349/************************
1350 *
1351 * mac_drv_get_desc_mem
1352 *
1353 * This function is called by the hardware dependent module.
1354 * It allocates the memory for the RxD and TxD descriptors.
1355 *
1356 * This memory must be non-cached, non-movable and non-swappable.
1357 * This memory should start at a physical page boundary.
1358 * Args
1359 * smc - A pointer to the SMT context struct.
1360 *
1361 * size - Size of memory in bytes to allocate.
1362 * Out
1363 * != 0 A pointer to the virtual address of the allocated memory.
1364 * == 0 Allocation error.
1365 *
1366 ************************/
1367void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1368{
1369
1370 char *virt;
1371
ebc06eeb 1372 pr_debug(KERN_INFO "mac_drv_get_desc_mem\n");
1da177e4
LT
1373
1374 // Descriptor memory must be aligned on 16-byte boundary.
1375
1376 virt = mac_drv_get_space(smc, size);
1377
1378 size = (u_int) (16 - (((unsigned long) virt) & 15UL));
1379 size = size % 16;
1380
ebc06eeb
AB
1381 pr_debug("Allocate %u bytes alignment gap ", size);
1382 pr_debug("for descriptor memory.\n");
1da177e4
LT
1383
1384 if (!mac_drv_get_space(smc, size)) {
1385 printk("fddi: Unable to align descriptor memory.\n");
1386 return (NULL);
1387 }
1388 return (virt + size);
1389} // mac_drv_get_desc_mem
1390
1391
1392/************************
1393 *
1394 * mac_drv_virt2phys
1395 *
1396 * Get the physical address of a given virtual address.
1397 * Args
1398 * smc - A pointer to the SMT context struct.
1399 *
1400 * virt - A (virtual) pointer into our 'shared' memory area.
1401 * Out
1402 * Physical address of the given virtual address.
1403 *
1404 ************************/
1405unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
1406{
1407 return (smc->os.SharedMemDMA +
1408 ((char *) virt - (char *)smc->os.SharedMemAddr));
1409} // mac_drv_virt2phys
1410
1411
1412/************************
1413 *
1414 * dma_master
1415 *
1416 * The HWM calls this function, when the driver leads through a DMA
1417 * transfer. If the OS-specific module must prepare the system hardware
1418 * for the DMA transfer, it should do it in this function.
1419 *
1420 * The hardware module calls this dma_master if it wants to send an SMT
1421 * frame. This means that the virt address passed in here is part of
1422 * the 'shared' memory area.
1423 * Args
1424 * smc - A pointer to the SMT context struct.
1425 *
1426 * virt - The virtual address of the data.
1427 *
1428 * len - The length in bytes of the data.
1429 *
1430 * flag - Indicates the transmit direction and the buffer type:
1431 * DMA_RD (0x01) system RAM ==> adapter buffer memory
1432 * DMA_WR (0x02) adapter buffer memory ==> system RAM
1433 * SMT_BUF (0x80) SMT buffer
1434 *
1435 * >> NOTE: SMT_BUF and DMA_RD are always set for PCI. <<
1436 * Out
1437 * Returns the pyhsical address for the DMA transfer.
1438 *
1439 ************************/
1440u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
1441{
1442 return (smc->os.SharedMemDMA +
1443 ((char *) virt - (char *)smc->os.SharedMemAddr));
1444} // dma_master
1445
1446
1447/************************
1448 *
1449 * dma_complete
1450 *
1451 * The hardware module calls this routine when it has completed a DMA
1452 * transfer. If the operating system dependent module has set up the DMA
1453 * channel via dma_master() (e.g. Windows NT or AIX) it should clean up
1454 * the DMA channel.
1455 * Args
1456 * smc - A pointer to the SMT context struct.
1457 *
1458 * descr - A pointer to a TxD or RxD, respectively.
1459 *
1460 * flag - Indicates the DMA transfer direction / SMT buffer:
1461 * DMA_RD (0x01) system RAM ==> adapter buffer memory
1462 * DMA_WR (0x02) adapter buffer memory ==> system RAM
1463 * SMT_BUF (0x80) SMT buffer (managed by HWM)
1464 * Out
1465 * Nothing.
1466 *
1467 ************************/
1468void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag)
1469{
1470 /* For TX buffers, there are two cases. If it is an SMT transmit
1471 * buffer, there is nothing to do since we use consistent memory
1472 * for the 'shared' memory area. The other case is for normal
1473 * transmit packets given to us by the networking stack, and in
1474 * that case we cleanup the PCI DMA mapping in mac_drv_tx_complete
1475 * below.
1476 *
1477 * For RX buffers, we have to unmap dynamic PCI DMA mappings here
1478 * because the hardware module is about to potentially look at
1479 * the contents of the buffer. If we did not call the PCI DMA
1480 * unmap first, the hardware module could read inconsistent data.
1481 */
1482 if (flag & DMA_WR) {
1483 skfddi_priv *bp = &smc->os;
1484 volatile struct s_smt_fp_rxd *r = &descr->r;
1485
1486 /* If SKB is NULL, we used the local buffer. */
1487 if (r->rxd_os.skb && r->rxd_os.dma_addr) {
1488 int MaxFrameSize = bp->MaxFrameSize;
1489
1490 pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr,
1491 MaxFrameSize, PCI_DMA_FROMDEVICE);
1492 r->rxd_os.dma_addr = 0;
1493 }
1494 }
1495} // dma_complete
1496
1497
1498/************************
1499 *
1500 * mac_drv_tx_complete
1501 *
1502 * Transmit of a packet is complete. Release the tx staging buffer.
1503 *
1504 * Args
1505 * smc - A pointer to the SMT context struct.
1506 *
1507 * txd - A pointer to the last TxD which is used by the frame.
1508 * Out
1509 * Returns nothing.
1510 *
1511 ************************/
1512void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
1513{
1514 struct sk_buff *skb;
1515
ebc06eeb 1516 pr_debug(KERN_INFO "entering mac_drv_tx_complete\n");
1da177e4
LT
1517 // Check if this TxD points to a skb
1518
1519 if (!(skb = txd->txd_os.skb)) {
ebc06eeb 1520 pr_debug("TXD with no skb assigned.\n");
1da177e4
LT
1521 return;
1522 }
1523 txd->txd_os.skb = NULL;
1524
1525 // release the DMA mapping
1526 pci_unmap_single(&smc->os.pdev, txd->txd_os.dma_addr,
1527 skb->len, PCI_DMA_TODEVICE);
1528 txd->txd_os.dma_addr = 0;
1529
1530 smc->os.MacStat.gen.tx_packets++; // Count transmitted packets.
1531 smc->os.MacStat.gen.tx_bytes+=skb->len; // Count bytes
1532
1533 // free the skb
1534 dev_kfree_skb_irq(skb);
1535
ebc06eeb 1536 pr_debug(KERN_INFO "leaving mac_drv_tx_complete\n");
1da177e4
LT
1537} // mac_drv_tx_complete
1538
1539
1540/************************
1541 *
1542 * dump packets to logfile
1543 *
1544 ************************/
1545#ifdef DUMPPACKETS
1546void dump_data(unsigned char *Data, int length)
1547{
1548 int i, j;
1549 unsigned char s[255], sh[10];
1550 if (length > 64) {
1551 length = 64;
1552 }
1553 printk(KERN_INFO "---Packet start---\n");
1554 for (i = 0, j = 0; i < length / 8; i++, j += 8)
1555 printk(KERN_INFO "%02x %02x %02x %02x %02x %02x %02x %02x\n",
1556 Data[j + 0], Data[j + 1], Data[j + 2], Data[j + 3],
1557 Data[j + 4], Data[j + 5], Data[j + 6], Data[j + 7]);
1558 strcpy(s, "");
1559 for (i = 0; i < length % 8; i++) {
1560 sprintf(sh, "%02x ", Data[j + i]);
1561 strcat(s, sh);
1562 }
1563 printk(KERN_INFO "%s\n", s);
1564 printk(KERN_INFO "------------------\n");
1565} // dump_data
1566#else
1567#define dump_data(data,len)
1568#endif // DUMPPACKETS
1569
1570/************************
1571 *
1572 * mac_drv_rx_complete
1573 *
1574 * The hardware module calls this function if an LLC frame is received
1575 * in a receive buffer. Also the SMT, NSA, and directed beacon frames
1576 * from the network will be passed to the LLC layer by this function
1577 * if passing is enabled.
1578 *
1579 * mac_drv_rx_complete forwards the frame to the LLC layer if it should
1580 * be received. It also fills the RxD ring with new receive buffers if
1581 * some can be queued.
1582 * Args
1583 * smc - A pointer to the SMT context struct.
1584 *
1585 * rxd - A pointer to the first RxD which is used by the receive frame.
1586 *
1587 * frag_count - Count of RxDs used by the received frame.
1588 *
1589 * len - Frame length.
1590 * Out
1591 * Nothing.
1592 *
1593 ************************/
1594void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1595 int frag_count, int len)
1596{
1597 skfddi_priv *bp = &smc->os;
1598 struct sk_buff *skb;
1599 unsigned char *virt, *cp;
1600 unsigned short ri;
1601 u_int RifLength;
1602
ebc06eeb 1603 pr_debug(KERN_INFO "entering mac_drv_rx_complete (len=%d)\n", len);
1da177e4
LT
1604 if (frag_count != 1) { // This is not allowed to happen.
1605
1606 printk("fddi: Multi-fragment receive!\n");
1607 goto RequeueRxd; // Re-use the given RXD(s).
1608
1609 }
1610 skb = rxd->rxd_os.skb;
1611 if (!skb) {
ebc06eeb 1612 pr_debug(KERN_INFO "No skb in rxd\n");
1da177e4
LT
1613 smc->os.MacStat.gen.rx_errors++;
1614 goto RequeueRxd;
1615 }
1616 virt = skb->data;
1617
1618 // The DMA mapping was released in dma_complete above.
1619
1620 dump_data(skb->data, len);
1621
1622 /*
1623 * FDDI Frame format:
1624 * +-------+-------+-------+------------+--------+------------+
1625 * | FC[1] | DA[6] | SA[6] | RIF[0..18] | LLC[3] | Data[0..n] |
1626 * +-------+-------+-------+------------+--------+------------+
1627 *
1628 * FC = Frame Control
1629 * DA = Destination Address
1630 * SA = Source Address
1631 * RIF = Routing Information Field
1632 * LLC = Logical Link Control
1633 */
1634
1635 // Remove Routing Information Field (RIF), if present.
1636
1637 if ((virt[1 + 6] & FDDI_RII) == 0)
1638 RifLength = 0;
1639 else {
1640 int n;
1641// goos: RIF removal has still to be tested
ebc06eeb 1642 pr_debug(KERN_INFO "RIF found\n");
1da177e4
LT
1643 // Get RIF length from Routing Control (RC) field.
1644 cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
1645
2f220e30 1646 ri = ntohs(*((__be16 *) cp));
1da177e4
LT
1647 RifLength = ri & FDDI_RCF_LEN_MASK;
1648 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
1649 printk("fddi: Invalid RIF.\n");
1650 goto RequeueRxd; // Discard the frame.
1651
1652 }
1653 virt[1 + 6] &= ~FDDI_RII; // Clear RII bit.
1654 // regions overlap
1655
1656 virt = cp + RifLength;
1657 for (n = FDDI_MAC_HDR_LEN; n; n--)
1658 *--virt = *--cp;
1659 // adjust sbd->data pointer
1660 skb_pull(skb, RifLength);
1661 len -= RifLength;
1662 RifLength = 0;
1663 }
1664
1665 // Count statistics.
1666 smc->os.MacStat.gen.rx_packets++; // Count indicated receive
1667 // packets.
1668 smc->os.MacStat.gen.rx_bytes+=len; // Count bytes.
1669
1670 // virt points to header again
1671 if (virt[1] & 0x01) { // Check group (multicast) bit.
1672
1673 smc->os.MacStat.gen.multicast++;
1674 }
1675
1676 // deliver frame to system
1677 rxd->rxd_os.skb = NULL;
1678 skb_trim(skb, len);
1679 skb->protocol = fddi_type_trans(skb, bp->dev);
1da177e4
LT
1680
1681 netif_rx(skb);
1da177e4
LT
1682
1683 HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
1684 return;
1685
1686 RequeueRxd:
ebc06eeb 1687 pr_debug(KERN_INFO "Rx: re-queue RXD.\n");
1da177e4
LT
1688 mac_drv_requeue_rxd(smc, rxd, frag_count);
1689 smc->os.MacStat.gen.rx_errors++; // Count receive packets
1690 // not indicated.
1691
1692} // mac_drv_rx_complete
1693
1694
1695/************************
1696 *
1697 * mac_drv_requeue_rxd
1698 *
1699 * The hardware module calls this function to request the OS-specific
1700 * module to queue the receive buffer(s) represented by the pointer
1701 * to the RxD and the frag_count into the receive queue again. This
1702 * buffer was filled with an invalid frame or an SMT frame.
1703 * Args
1704 * smc - A pointer to the SMT context struct.
1705 *
1706 * rxd - A pointer to the first RxD which is used by the receive frame.
1707 *
1708 * frag_count - Count of RxDs used by the received frame.
1709 * Out
1710 * Nothing.
1711 *
1712 ************************/
1713void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1714 int frag_count)
1715{
1716 volatile struct s_smt_fp_rxd *next_rxd;
1717 volatile struct s_smt_fp_rxd *src_rxd;
1718 struct sk_buff *skb;
1719 int MaxFrameSize;
1720 unsigned char *v_addr;
1721 dma_addr_t b_addr;
1722
1723 if (frag_count != 1) // This is not allowed to happen.
1724
1725 printk("fddi: Multi-fragment requeue!\n");
1726
1727 MaxFrameSize = smc->os.MaxFrameSize;
1728 src_rxd = rxd;
1729 for (; frag_count > 0; frag_count--) {
1730 next_rxd = src_rxd->rxd_next;
1731 rxd = HWM_GET_CURR_RXD(smc);
1732
1733 skb = src_rxd->rxd_os.skb;
1734 if (skb == NULL) { // this should not happen
1735
ebc06eeb 1736 pr_debug("Requeue with no skb in rxd!\n");
1da177e4
LT
1737 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1738 if (skb) {
1739 // we got a skb
1740 rxd->rxd_os.skb = skb;
1741 skb_reserve(skb, 3);
1742 skb_put(skb, MaxFrameSize);
1743 v_addr = skb->data;
1744 b_addr = pci_map_single(&smc->os.pdev,
1745 v_addr,
1746 MaxFrameSize,
1747 PCI_DMA_FROMDEVICE);
1748 rxd->rxd_os.dma_addr = b_addr;
1749 } else {
1750 // no skb available, use local buffer
ebc06eeb 1751 pr_debug("Queueing invalid buffer!\n");
1da177e4
LT
1752 rxd->rxd_os.skb = NULL;
1753 v_addr = smc->os.LocalRxBuffer;
1754 b_addr = smc->os.LocalRxBufferDMA;
1755 }
1756 } else {
1757 // we use skb from old rxd
1758 rxd->rxd_os.skb = skb;
1759 v_addr = skb->data;
1760 b_addr = pci_map_single(&smc->os.pdev,
1761 v_addr,
1762 MaxFrameSize,
1763 PCI_DMA_FROMDEVICE);
1764 rxd->rxd_os.dma_addr = b_addr;
1765 }
1766 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1767 FIRST_FRAG | LAST_FRAG);
1768
1769 src_rxd = next_rxd;
1770 }
1771} // mac_drv_requeue_rxd
1772
1773
1774/************************
1775 *
1776 * mac_drv_fill_rxd
1777 *
1778 * The hardware module calls this function at initialization time
1779 * to fill the RxD ring with receive buffers. It is also called by
1780 * mac_drv_rx_complete if rx_free is large enough to queue some new
1781 * receive buffers into the RxD ring. mac_drv_fill_rxd queues new
1782 * receive buffers as long as enough RxDs and receive buffers are
1783 * available.
1784 * Args
1785 * smc - A pointer to the SMT context struct.
1786 * Out
1787 * Nothing.
1788 *
1789 ************************/
1790void mac_drv_fill_rxd(struct s_smc *smc)
1791{
1792 int MaxFrameSize;
1793 unsigned char *v_addr;
1794 unsigned long b_addr;
1795 struct sk_buff *skb;
1796 volatile struct s_smt_fp_rxd *rxd;
1797
ebc06eeb 1798 pr_debug(KERN_INFO "entering mac_drv_fill_rxd\n");
1da177e4
LT
1799
1800 // Walk through the list of free receive buffers, passing receive
1801 // buffers to the HWM as long as RXDs are available.
1802
1803 MaxFrameSize = smc->os.MaxFrameSize;
1804 // Check if there is any RXD left.
1805 while (HWM_GET_RX_FREE(smc) > 0) {
ebc06eeb 1806 pr_debug(KERN_INFO ".\n");
1da177e4
LT
1807
1808 rxd = HWM_GET_CURR_RXD(smc);
1809 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1810 if (skb) {
1811 // we got a skb
1812 skb_reserve(skb, 3);
1813 skb_put(skb, MaxFrameSize);
1814 v_addr = skb->data;
1815 b_addr = pci_map_single(&smc->os.pdev,
1816 v_addr,
1817 MaxFrameSize,
1818 PCI_DMA_FROMDEVICE);
1819 rxd->rxd_os.dma_addr = b_addr;
1820 } else {
1821 // no skb available, use local buffer
1822 // System has run out of buffer memory, but we want to
1823 // keep the receiver running in hope of better times.
1824 // Multiple descriptors may point to this local buffer,
1825 // so data in it must be considered invalid.
ebc06eeb 1826 pr_debug("Queueing invalid buffer!\n");
1da177e4
LT
1827 v_addr = smc->os.LocalRxBuffer;
1828 b_addr = smc->os.LocalRxBufferDMA;
1829 }
1830
1831 rxd->rxd_os.skb = skb;
1832
1833 // Pass receive buffer to HWM.
1834 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1835 FIRST_FRAG | LAST_FRAG);
1836 }
ebc06eeb 1837 pr_debug(KERN_INFO "leaving mac_drv_fill_rxd\n");
1da177e4
LT
1838} // mac_drv_fill_rxd
1839
1840
1841/************************
1842 *
1843 * mac_drv_clear_rxd
1844 *
1845 * The hardware module calls this function to release unused
1846 * receive buffers.
1847 * Args
1848 * smc - A pointer to the SMT context struct.
1849 *
1850 * rxd - A pointer to the first RxD which is used by the receive buffer.
1851 *
1852 * frag_count - Count of RxDs used by the receive buffer.
1853 * Out
1854 * Nothing.
1855 *
1856 ************************/
1857void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1858 int frag_count)
1859{
1860
1861 struct sk_buff *skb;
1862
ebc06eeb 1863 pr_debug("entering mac_drv_clear_rxd\n");
1da177e4
LT
1864
1865 if (frag_count != 1) // This is not allowed to happen.
1866
1867 printk("fddi: Multi-fragment clear!\n");
1868
1869 for (; frag_count > 0; frag_count--) {
1870 skb = rxd->rxd_os.skb;
1871 if (skb != NULL) {
1872 skfddi_priv *bp = &smc->os;
1873 int MaxFrameSize = bp->MaxFrameSize;
1874
1875 pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr,
1876 MaxFrameSize, PCI_DMA_FROMDEVICE);
1877
1878 dev_kfree_skb(skb);
1879 rxd->rxd_os.skb = NULL;
1880 }
1881 rxd = rxd->rxd_next; // Next RXD.
1882
1883 }
1884} // mac_drv_clear_rxd
1885
1886
1887/************************
1888 *
1889 * mac_drv_rx_init
1890 *
1891 * The hardware module calls this routine when an SMT or NSA frame of the
1892 * local SMT should be delivered to the LLC layer.
1893 *
1894 * It is necessary to have this function, because there is no other way to
1895 * copy the contents of SMT MBufs into receive buffers.
1896 *
1897 * mac_drv_rx_init allocates the required target memory for this frame,
1898 * and receives the frame fragment by fragment by calling mac_drv_rx_frag.
1899 * Args
1900 * smc - A pointer to the SMT context struct.
1901 *
1902 * len - The length (in bytes) of the received frame (FC, DA, SA, Data).
1903 *
1904 * fc - The Frame Control field of the received frame.
1905 *
1906 * look_ahead - A pointer to the lookahead data buffer (may be NULL).
1907 *
1908 * la_len - The length of the lookahead data stored in the lookahead
1909 * buffer (may be zero).
1910 * Out
1911 * Always returns zero (0).
1912 *
1913 ************************/
1914int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1915 char *look_ahead, int la_len)
1916{
1917 struct sk_buff *skb;
1918
ebc06eeb 1919 pr_debug("entering mac_drv_rx_init(len=%d)\n", len);
1da177e4
LT
1920
1921 // "Received" a SMT or NSA frame of the local SMT.
1922
1923 if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
ebc06eeb
AB
1924 pr_debug("fddi: Discard invalid local SMT frame\n");
1925 pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
1da177e4
LT
1926 len, la_len, (unsigned long) look_ahead);
1927 return (0);
1928 }
1929 skb = alloc_skb(len + 3, GFP_ATOMIC);
1930 if (!skb) {
ebc06eeb 1931 pr_debug("fddi: Local SMT: skb memory exhausted.\n");
1da177e4
LT
1932 return (0);
1933 }
1934 skb_reserve(skb, 3);
1935 skb_put(skb, len);
27d7ff46 1936 skb_copy_to_linear_data(skb, look_ahead, len);
1da177e4
LT
1937
1938 // deliver frame to system
1939 skb->protocol = fddi_type_trans(skb, smc->os.dev);
1da177e4
LT
1940 netif_rx(skb);
1941
1942 return (0);
1943} // mac_drv_rx_init
1944
1945
1946/************************
1947 *
1948 * smt_timer_poll
1949 *
1950 * This routine is called periodically by the SMT module to clean up the
1951 * driver.
1952 *
1953 * Return any queued frames back to the upper protocol layers if the ring
1954 * is down.
1955 * Args
1956 * smc - A pointer to the SMT context struct.
1957 * Out
1958 * Nothing.
1959 *
1960 ************************/
1961void smt_timer_poll(struct s_smc *smc)
1962{
1963} // smt_timer_poll
1964
1965
1966/************************
1967 *
1968 * ring_status_indication
1969 *
1970 * This function indicates a change of the ring state.
1971 * Args
1972 * smc - A pointer to the SMT context struct.
1973 *
1974 * status - The current ring status.
1975 * Out
1976 * Nothing.
1977 *
1978 ************************/
1979void ring_status_indication(struct s_smc *smc, u_long status)
1980{
ebc06eeb 1981 pr_debug("ring_status_indication( ");
1da177e4 1982 if (status & RS_RES15)
ebc06eeb 1983 pr_debug("RS_RES15 ");
1da177e4 1984 if (status & RS_HARDERROR)
ebc06eeb 1985 pr_debug("RS_HARDERROR ");
1da177e4 1986 if (status & RS_SOFTERROR)
ebc06eeb 1987 pr_debug("RS_SOFTERROR ");
1da177e4 1988 if (status & RS_BEACON)
ebc06eeb 1989 pr_debug("RS_BEACON ");
1da177e4 1990 if (status & RS_PATHTEST)
ebc06eeb 1991 pr_debug("RS_PATHTEST ");
1da177e4 1992 if (status & RS_SELFTEST)
ebc06eeb 1993 pr_debug("RS_SELFTEST ");
1da177e4 1994 if (status & RS_RES9)
ebc06eeb 1995 pr_debug("RS_RES9 ");
1da177e4 1996 if (status & RS_DISCONNECT)
ebc06eeb 1997 pr_debug("RS_DISCONNECT ");
1da177e4 1998 if (status & RS_RES7)
ebc06eeb 1999 pr_debug("RS_RES7 ");
1da177e4 2000 if (status & RS_DUPADDR)
ebc06eeb 2001 pr_debug("RS_DUPADDR ");
1da177e4 2002 if (status & RS_NORINGOP)
ebc06eeb 2003 pr_debug("RS_NORINGOP ");
1da177e4 2004 if (status & RS_VERSION)
ebc06eeb 2005 pr_debug("RS_VERSION ");
1da177e4 2006 if (status & RS_STUCKBYPASSS)
ebc06eeb 2007 pr_debug("RS_STUCKBYPASSS ");
1da177e4 2008 if (status & RS_EVENT)
ebc06eeb 2009 pr_debug("RS_EVENT ");
1da177e4 2010 if (status & RS_RINGOPCHANGE)
ebc06eeb 2011 pr_debug("RS_RINGOPCHANGE ");
1da177e4 2012 if (status & RS_RES0)
ebc06eeb
AB
2013 pr_debug("RS_RES0 ");
2014 pr_debug("]\n");
1da177e4
LT
2015} // ring_status_indication
2016
2017
2018/************************
2019 *
2020 * smt_get_time
2021 *
2022 * Gets the current time from the system.
2023 * Args
2024 * None.
2025 * Out
2026 * The current time in TICKS_PER_SECOND.
2027 *
2028 * TICKS_PER_SECOND has the unit 'count of timer ticks per second'. It is
2029 * defined in "targetos.h". The definition of TICKS_PER_SECOND must comply
2030 * to the time returned by smt_get_time().
2031 *
2032 ************************/
2033unsigned long smt_get_time(void)
2034{
2035 return jiffies;
2036} // smt_get_time
2037
2038
2039/************************
2040 *
2041 * smt_stat_counter
2042 *
2043 * Status counter update (ring_op, fifo full).
2044 * Args
2045 * smc - A pointer to the SMT context struct.
2046 *
2047 * stat - = 0: A ring operational change occurred.
2048 * = 1: The FORMAC FIFO buffer is full / FIFO overflow.
2049 * Out
2050 * Nothing.
2051 *
2052 ************************/
2053void smt_stat_counter(struct s_smc *smc, int stat)
2054{
2055// BOOLEAN RingIsUp ;
2056
ebc06eeb 2057 pr_debug(KERN_INFO "smt_stat_counter\n");
1da177e4
LT
2058 switch (stat) {
2059 case 0:
ebc06eeb 2060 pr_debug(KERN_INFO "Ring operational change.\n");
1da177e4
LT
2061 break;
2062 case 1:
ebc06eeb 2063 pr_debug(KERN_INFO "Receive fifo overflow.\n");
1da177e4
LT
2064 smc->os.MacStat.gen.rx_errors++;
2065 break;
2066 default:
ebc06eeb 2067 pr_debug(KERN_INFO "Unknown status (%d).\n", stat);
1da177e4
LT
2068 break;
2069 }
2070} // smt_stat_counter
2071
2072
2073/************************
2074 *
2075 * cfm_state_change
2076 *
2077 * Sets CFM state in custom statistics.
2078 * Args
2079 * smc - A pointer to the SMT context struct.
2080 *
2081 * c_state - Possible values are:
2082 *
2083 * EC0_OUT, EC1_IN, EC2_TRACE, EC3_LEAVE, EC4_PATH_TEST,
2084 * EC5_INSERT, EC6_CHECK, EC7_DEINSERT
2085 * Out
2086 * Nothing.
2087 *
2088 ************************/
2089void cfm_state_change(struct s_smc *smc, int c_state)
2090{
2091#ifdef DRIVERDEBUG
2092 char *s;
2093
2094 switch (c_state) {
2095 case SC0_ISOLATED:
2096 s = "SC0_ISOLATED";
2097 break;
2098 case SC1_WRAP_A:
2099 s = "SC1_WRAP_A";
2100 break;
2101 case SC2_WRAP_B:
2102 s = "SC2_WRAP_B";
2103 break;
2104 case SC4_THRU_A:
2105 s = "SC4_THRU_A";
2106 break;
2107 case SC5_THRU_B:
2108 s = "SC5_THRU_B";
2109 break;
2110 case SC7_WRAP_S:
2111 s = "SC7_WRAP_S";
2112 break;
2113 case SC9_C_WRAP_A:
2114 s = "SC9_C_WRAP_A";
2115 break;
2116 case SC10_C_WRAP_B:
2117 s = "SC10_C_WRAP_B";
2118 break;
2119 case SC11_C_WRAP_S:
2120 s = "SC11_C_WRAP_S";
2121 break;
2122 default:
ebc06eeb 2123 pr_debug(KERN_INFO "cfm_state_change: unknown %d\n", c_state);
1da177e4
LT
2124 return;
2125 }
ebc06eeb 2126 pr_debug(KERN_INFO "cfm_state_change: %s\n", s);
1da177e4
LT
2127#endif // DRIVERDEBUG
2128} // cfm_state_change
2129
2130
2131/************************
2132 *
2133 * ecm_state_change
2134 *
2135 * Sets ECM state in custom statistics.
2136 * Args
2137 * smc - A pointer to the SMT context struct.
2138 *
2139 * e_state - Possible values are:
2140 *
2141 * SC0_ISOLATED, SC1_WRAP_A (5), SC2_WRAP_B (6), SC4_THRU_A (12),
2142 * SC5_THRU_B (7), SC7_WRAP_S (8)
2143 * Out
2144 * Nothing.
2145 *
2146 ************************/
2147void ecm_state_change(struct s_smc *smc, int e_state)
2148{
2149#ifdef DRIVERDEBUG
2150 char *s;
2151
2152 switch (e_state) {
2153 case EC0_OUT:
2154 s = "EC0_OUT";
2155 break;
2156 case EC1_IN:
2157 s = "EC1_IN";
2158 break;
2159 case EC2_TRACE:
2160 s = "EC2_TRACE";
2161 break;
2162 case EC3_LEAVE:
2163 s = "EC3_LEAVE";
2164 break;
2165 case EC4_PATH_TEST:
2166 s = "EC4_PATH_TEST";
2167 break;
2168 case EC5_INSERT:
2169 s = "EC5_INSERT";
2170 break;
2171 case EC6_CHECK:
2172 s = "EC6_CHECK";
2173 break;
2174 case EC7_DEINSERT:
2175 s = "EC7_DEINSERT";
2176 break;
2177 default:
2178 s = "unknown";
2179 break;
2180 }
ebc06eeb 2181 pr_debug(KERN_INFO "ecm_state_change: %s\n", s);
1da177e4
LT
2182#endif //DRIVERDEBUG
2183} // ecm_state_change
2184
2185
2186/************************
2187 *
2188 * rmt_state_change
2189 *
2190 * Sets RMT state in custom statistics.
2191 * Args
2192 * smc - A pointer to the SMT context struct.
2193 *
2194 * r_state - Possible values are:
2195 *
2196 * RM0_ISOLATED, RM1_NON_OP, RM2_RING_OP, RM3_DETECT,
2197 * RM4_NON_OP_DUP, RM5_RING_OP_DUP, RM6_DIRECTED, RM7_TRACE
2198 * Out
2199 * Nothing.
2200 *
2201 ************************/
2202void rmt_state_change(struct s_smc *smc, int r_state)
2203{
2204#ifdef DRIVERDEBUG
2205 char *s;
2206
2207 switch (r_state) {
2208 case RM0_ISOLATED:
2209 s = "RM0_ISOLATED";
2210 break;
2211 case RM1_NON_OP:
2212 s = "RM1_NON_OP - not operational";
2213 break;
2214 case RM2_RING_OP:
2215 s = "RM2_RING_OP - ring operational";
2216 break;
2217 case RM3_DETECT:
2218 s = "RM3_DETECT - detect dupl addresses";
2219 break;
2220 case RM4_NON_OP_DUP:
2221 s = "RM4_NON_OP_DUP - dupl. addr detected";
2222 break;
2223 case RM5_RING_OP_DUP:
2224 s = "RM5_RING_OP_DUP - ring oper. with dupl. addr";
2225 break;
2226 case RM6_DIRECTED:
2227 s = "RM6_DIRECTED - sending directed beacons";
2228 break;
2229 case RM7_TRACE:
2230 s = "RM7_TRACE - trace initiated";
2231 break;
2232 default:
2233 s = "unknown";
2234 break;
2235 }
ebc06eeb 2236 pr_debug(KERN_INFO "[rmt_state_change: %s]\n", s);
1da177e4
LT
2237#endif // DRIVERDEBUG
2238} // rmt_state_change
2239
2240
2241/************************
2242 *
2243 * drv_reset_indication
2244 *
2245 * This function is called by the SMT when it has detected a severe
2246 * hardware problem. The driver should perform a reset on the adapter
2247 * as soon as possible, but not from within this function.
2248 * Args
2249 * smc - A pointer to the SMT context struct.
2250 * Out
2251 * Nothing.
2252 *
2253 ************************/
2254void drv_reset_indication(struct s_smc *smc)
2255{
ebc06eeb 2256 pr_debug(KERN_INFO "entering drv_reset_indication\n");
1da177e4
LT
2257
2258 smc->os.ResetRequested = TRUE; // Set flag.
2259
2260} // drv_reset_indication
2261
2262static struct pci_driver skfddi_pci_driver = {
2263 .name = "skfddi",
2264 .id_table = skfddi_pci_tbl,
2265 .probe = skfp_init_one,
2266 .remove = __devexit_p(skfp_remove_one),
2267};
2268
2269static int __init skfd_init(void)
2270{
29917620 2271 return pci_register_driver(&skfddi_pci_driver);
1da177e4
LT
2272}
2273
2274static void __exit skfd_exit(void)
2275{
2276 pci_unregister_driver(&skfddi_pci_driver);
2277}
2278
2279module_init(skfd_init);
2280module_exit(skfd_exit);