]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/skfp/skfddi.c
net: remove NET_RX_BAD and NET_RX_CN* defines
[net-next-2.6.git] / drivers / net / skfp / skfddi.c
CommitLineData
1da177e4
LT
1/*
2 * File Name:
3 * skfddi.c
4 *
5 * Copyright Information:
6 * Copyright SysKonnect 1998,1999.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * The information in this file is provided "AS IS" without warranty.
14 *
15 * Abstract:
16 * A Linux device driver supporting the SysKonnect FDDI PCI controller
17 * familie.
18 *
19 * Maintainers:
20 * CG Christoph Goos (cgoos@syskonnect.de)
21 *
22 * Contributors:
23 * DM David S. Miller
24 *
25 * Address all question to:
26 * linux@syskonnect.de
27 *
28 * The technical manual for the adapters is available from SysKonnect's
29 * web pages: www.syskonnect.com
30 * Goto "Support" and search Knowledge Base for "manual".
31 *
32 * Driver Architecture:
33 * The driver architecture is based on the DEC FDDI driver by
34 * Lawrence V. Stefani and several ethernet drivers.
35 * I also used an existing Windows NT miniport driver.
36 * All hardware dependent fuctions are handled by the SysKonnect
37 * Hardware Module.
38 * The only headerfiles that are directly related to this source
39 * are skfddi.c, h/types.h, h/osdef1st.h, h/targetos.h.
40 * The others belong to the SysKonnect FDDI Hardware Module and
41 * should better not be changed.
42 *
43 * Modification History:
44 * Date Name Description
45 * 02-Mar-98 CG Created.
46 *
47 * 10-Mar-99 CG Support for 2.2.x added.
48 * 25-Mar-99 CG Corrected IRQ routing for SMP (APIC)
49 * 26-Oct-99 CG Fixed compilation error on 2.2.13
50 * 12-Nov-99 CG Source code release
51 * 22-Nov-99 CG Included in kernel source.
52 * 07-May-00 DM 64 bit fixes, new dma interface
53 * 31-Jul-03 DB Audit copy_*_user in skfp_ioctl
54 * Daniele Bellucci <bellucda@tiscali.it>
55 * 03-Dec-03 SH Convert to PCI device model
56 *
57 * Compilation options (-Dxxx):
58 * DRIVERDEBUG print lots of messages to log file
59 * DUMPPACKETS print received/transmitted packets to logfile
60 *
61 * Tested cpu architectures:
62 * - i386
63 * - sparc64
64 */
65
66/* Version information string - should be updated prior to */
67/* each new release!!! */
68#define VERSION "2.07"
69
f71e1309 70static const char * const boot_msg =
1da177e4
LT
71 "SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
72 " SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
73
74/* Include files */
75
76#include <linux/module.h>
77#include <linux/kernel.h>
78#include <linux/errno.h>
79#include <linux/ioport.h>
80#include <linux/slab.h>
81#include <linux/interrupt.h>
82#include <linux/pci.h>
83#include <linux/netdevice.h>
84#include <linux/fddidevice.h>
85#include <linux/skbuff.h>
86#include <linux/bitops.h>
87
88#include <asm/byteorder.h>
89#include <asm/io.h>
90#include <asm/uaccess.h>
91
92#include "h/types.h"
93#undef ADDR // undo Linux definition
94#include "h/skfbi.h"
95#include "h/fddi.h"
96#include "h/smc.h"
97#include "h/smtstate.h"
98
99
100// Define module-wide (static) routines
101static int skfp_driver_init(struct net_device *dev);
102static int skfp_open(struct net_device *dev);
103static int skfp_close(struct net_device *dev);
7d12e780 104static irqreturn_t skfp_interrupt(int irq, void *dev_id);
1da177e4
LT
105static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
106static void skfp_ctl_set_multicast_list(struct net_device *dev);
107static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
108static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
109static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
110static int skfp_send_pkt(struct sk_buff *skb, struct net_device *dev);
111static void send_queued_packets(struct s_smc *smc);
112static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr);
113static void ResetAdapter(struct s_smc *smc);
114
115
116// Functions needed by the hardware module
117void *mac_drv_get_space(struct s_smc *smc, u_int size);
118void *mac_drv_get_desc_mem(struct s_smc *smc, u_int size);
119unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt);
120unsigned long dma_master(struct s_smc *smc, void *virt, int len, int flag);
121void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
122 int flag);
123void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd);
124void llc_restart_tx(struct s_smc *smc);
125void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
126 int frag_count, int len);
127void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
128 int frag_count);
129void mac_drv_fill_rxd(struct s_smc *smc);
130void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
131 int frag_count);
132int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
133 int la_len);
134void dump_data(unsigned char *Data, int length);
135
136// External functions from the hardware module
137extern u_int mac_drv_check_space(void);
1da177e4
LT
138extern int mac_drv_init(struct s_smc *smc);
139extern void hwm_tx_frag(struct s_smc *smc, char far * virt, u_long phys,
140 int len, int frame_status);
141extern int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count,
142 int frame_len, int frame_status);
1da177e4
LT
143extern void fddi_isr(struct s_smc *smc);
144extern void hwm_rx_frag(struct s_smc *smc, char far * virt, u_long phys,
145 int len, int frame_status);
146extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
147extern void mac_drv_clear_rx_queue(struct s_smc *smc);
148extern void enable_tx_irq(struct s_smc *smc, u_short queue);
1da177e4
LT
149
150static struct pci_device_id skfddi_pci_tbl[] = {
151 { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
152 { } /* Terminating entry */
153};
154MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
155MODULE_LICENSE("GPL");
156MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
157
158// Define module-wide (static) variables
159
160static int num_boards; /* total number of adapters configured */
161
145186a3
SH
162static const struct net_device_ops skfp_netdev_ops = {
163 .ndo_open = skfp_open,
164 .ndo_stop = skfp_close,
165 .ndo_start_xmit = skfp_send_pkt,
166 .ndo_get_stats = skfp_ctl_get_stats,
167 .ndo_change_mtu = fddi_change_mtu,
168 .ndo_set_multicast_list = skfp_ctl_set_multicast_list,
169 .ndo_set_mac_address = skfp_ctl_set_mac_address,
170 .ndo_do_ioctl = skfp_ioctl,
171};
172
1da177e4
LT
173/*
174 * =================
175 * = skfp_init_one =
176 * =================
177 *
178 * Overview:
179 * Probes for supported FDDI PCI controllers
180 *
181 * Returns:
182 * Condition code
183 *
184 * Arguments:
185 * pdev - pointer to PCI device information
186 *
187 * Functional Description:
188 * This is now called by PCI driver registration process
189 * for each board found.
190 *
191 * Return Codes:
192 * 0 - This device (fddi0, fddi1, etc) configured successfully
193 * -ENODEV - No devices present, or no SysKonnect FDDI PCI device
194 * present for this device name
195 *
196 *
197 * Side Effects:
198 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
199 * initialized and the board resources are read and stored in
200 * the device structure.
201 */
202static int skfp_init_one(struct pci_dev *pdev,
203 const struct pci_device_id *ent)
204{
205 struct net_device *dev;
206 struct s_smc *smc; /* board pointer */
207 void __iomem *mem;
208 int err;
209
ebc06eeb 210 pr_debug(KERN_INFO "entering skfp_init_one\n");
1da177e4
LT
211
212 if (num_boards == 0)
213 printk("%s\n", boot_msg);
214
215 err = pci_enable_device(pdev);
216 if (err)
217 return err;
218
219 err = pci_request_regions(pdev, "skfddi");
220 if (err)
221 goto err_out1;
222
223 pci_set_master(pdev);
224
225#ifdef MEM_MAPPED_IO
226 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
227 printk(KERN_ERR "skfp: region is not an MMIO resource\n");
228 err = -EIO;
229 goto err_out2;
230 }
231
232 mem = ioremap(pci_resource_start(pdev, 0), 0x4000);
233#else
234 if (!(pci_resource_flags(pdev, 1) & IO_RESOURCE_IO)) {
235 printk(KERN_ERR "skfp: region is not PIO resource\n");
236 err = -EIO;
237 goto err_out2;
238 }
239
240 mem = ioport_map(pci_resource_start(pdev, 1), FP_IO_LEN);
241#endif
242 if (!mem) {
243 printk(KERN_ERR "skfp: Unable to map register, "
244 "FDDI adapter will be disabled.\n");
245 err = -EIO;
246 goto err_out2;
247 }
248
249 dev = alloc_fddidev(sizeof(struct s_smc));
250 if (!dev) {
251 printk(KERN_ERR "skfp: Unable to allocate fddi device, "
252 "FDDI adapter will be disabled.\n");
253 err = -ENOMEM;
254 goto err_out3;
255 }
256
257 dev->irq = pdev->irq;
145186a3 258 dev->netdev_ops = &skfp_netdev_ops;
1da177e4 259
1da177e4
LT
260 SET_NETDEV_DEV(dev, &pdev->dev);
261
262 /* Initialize board structure with bus-specific info */
263 smc = netdev_priv(dev);
264 smc->os.dev = dev;
265 smc->os.bus_type = SK_BUS_TYPE_PCI;
266 smc->os.pdev = *pdev;
267 smc->os.QueueSkb = MAX_TX_QUEUE_LEN;
268 smc->os.MaxFrameSize = MAX_FRAME_SIZE;
269 smc->os.dev = dev;
270 smc->hw.slot = -1;
271 smc->hw.iop = mem;
272 smc->os.ResetRequested = FALSE;
273 skb_queue_head_init(&smc->os.SendSkbQueue);
274
275 dev->base_addr = (unsigned long)mem;
276
277 err = skfp_driver_init(dev);
278 if (err)
279 goto err_out4;
280
281 err = register_netdev(dev);
282 if (err)
283 goto err_out5;
284
285 ++num_boards;
286 pci_set_drvdata(pdev, dev);
287
288 if ((pdev->subsystem_device & 0xff00) == 0x5500 ||
289 (pdev->subsystem_device & 0xff00) == 0x5800)
290 printk("%s: SysKonnect FDDI PCI adapter"
291 " found (SK-%04X)\n", dev->name,
292 pdev->subsystem_device);
293 else
294 printk("%s: FDDI PCI adapter found\n", dev->name);
295
296 return 0;
297err_out5:
298 if (smc->os.SharedMemAddr)
299 pci_free_consistent(pdev, smc->os.SharedMemSize,
300 smc->os.SharedMemAddr,
301 smc->os.SharedMemDMA);
302 pci_free_consistent(pdev, MAX_FRAME_SIZE,
303 smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
304err_out4:
305 free_netdev(dev);
306err_out3:
307#ifdef MEM_MAPPED_IO
308 iounmap(mem);
309#else
310 ioport_unmap(mem);
311#endif
312err_out2:
313 pci_release_regions(pdev);
314err_out1:
315 pci_disable_device(pdev);
316 return err;
317}
318
319/*
320 * Called for each adapter board from pci_unregister_driver
321 */
322static void __devexit skfp_remove_one(struct pci_dev *pdev)
323{
324 struct net_device *p = pci_get_drvdata(pdev);
325 struct s_smc *lp = netdev_priv(p);
326
327 unregister_netdev(p);
328
329 if (lp->os.SharedMemAddr) {
330 pci_free_consistent(&lp->os.pdev,
331 lp->os.SharedMemSize,
332 lp->os.SharedMemAddr,
333 lp->os.SharedMemDMA);
334 lp->os.SharedMemAddr = NULL;
335 }
336 if (lp->os.LocalRxBuffer) {
337 pci_free_consistent(&lp->os.pdev,
338 MAX_FRAME_SIZE,
339 lp->os.LocalRxBuffer,
340 lp->os.LocalRxBufferDMA);
341 lp->os.LocalRxBuffer = NULL;
342 }
343#ifdef MEM_MAPPED_IO
344 iounmap(lp->hw.iop);
345#else
346 ioport_unmap(lp->hw.iop);
347#endif
348 pci_release_regions(pdev);
349 free_netdev(p);
350
351 pci_disable_device(pdev);
352 pci_set_drvdata(pdev, NULL);
353}
354
355/*
356 * ====================
357 * = skfp_driver_init =
358 * ====================
359 *
360 * Overview:
361 * Initializes remaining adapter board structure information
362 * and makes sure adapter is in a safe state prior to skfp_open().
363 *
364 * Returns:
365 * Condition code
366 *
367 * Arguments:
368 * dev - pointer to device information
369 *
370 * Functional Description:
371 * This function allocates additional resources such as the host memory
372 * blocks needed by the adapter.
373 * The adapter is also reset. The OS must call skfp_open() to open
374 * the adapter and bring it on-line.
375 *
376 * Return Codes:
377 * 0 - initialization succeeded
378 * -1 - initialization failed
379 */
380static int skfp_driver_init(struct net_device *dev)
381{
382 struct s_smc *smc = netdev_priv(dev);
383 skfddi_priv *bp = &smc->os;
384 int err = -EIO;
385
ebc06eeb 386 pr_debug(KERN_INFO "entering skfp_driver_init\n");
1da177e4
LT
387
388 // set the io address in private structures
389 bp->base_addr = dev->base_addr;
390
391 // Get the interrupt level from the PCI Configuration Table
392 smc->hw.irq = dev->irq;
393
394 spin_lock_init(&bp->DriverLock);
395
396 // Allocate invalid frame
397 bp->LocalRxBuffer = pci_alloc_consistent(&bp->pdev, MAX_FRAME_SIZE, &bp->LocalRxBufferDMA);
398 if (!bp->LocalRxBuffer) {
399 printk("could not allocate mem for ");
400 printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
401 goto fail;
402 }
403
404 // Determine the required size of the 'shared' memory area.
405 bp->SharedMemSize = mac_drv_check_space();
ebc06eeb 406 pr_debug(KERN_INFO "Memory for HWM: %ld\n", bp->SharedMemSize);
1da177e4
LT
407 if (bp->SharedMemSize > 0) {
408 bp->SharedMemSize += 16; // for descriptor alignment
409
410 bp->SharedMemAddr = pci_alloc_consistent(&bp->pdev,
411 bp->SharedMemSize,
412 &bp->SharedMemDMA);
413 if (!bp->SharedMemSize) {
414 printk("could not allocate mem for ");
415 printk("hardware module: %ld byte\n",
416 bp->SharedMemSize);
417 goto fail;
418 }
419 bp->SharedMemHeap = 0; // Nothing used yet.
420
421 } else {
422 bp->SharedMemAddr = NULL;
423 bp->SharedMemHeap = 0;
424 } // SharedMemSize > 0
425
426 memset(bp->SharedMemAddr, 0, bp->SharedMemSize);
427
428 card_stop(smc); // Reset adapter.
429
ebc06eeb 430 pr_debug(KERN_INFO "mac_drv_init()..\n");
1da177e4 431 if (mac_drv_init(smc) != 0) {
ebc06eeb 432 pr_debug(KERN_INFO "mac_drv_init() failed.\n");
1da177e4
LT
433 goto fail;
434 }
435 read_address(smc, NULL);
ebc06eeb 436 pr_debug(KERN_INFO "HW-Addr: %02x %02x %02x %02x %02x %02x\n",
1da177e4
LT
437 smc->hw.fddi_canon_addr.a[0],
438 smc->hw.fddi_canon_addr.a[1],
439 smc->hw.fddi_canon_addr.a[2],
440 smc->hw.fddi_canon_addr.a[3],
441 smc->hw.fddi_canon_addr.a[4],
442 smc->hw.fddi_canon_addr.a[5]);
443 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
444
445 smt_reset_defaults(smc, 0);
446
447 return (0);
448
449fail:
450 if (bp->SharedMemAddr) {
451 pci_free_consistent(&bp->pdev,
452 bp->SharedMemSize,
453 bp->SharedMemAddr,
454 bp->SharedMemDMA);
455 bp->SharedMemAddr = NULL;
456 }
457 if (bp->LocalRxBuffer) {
458 pci_free_consistent(&bp->pdev, MAX_FRAME_SIZE,
459 bp->LocalRxBuffer, bp->LocalRxBufferDMA);
460 bp->LocalRxBuffer = NULL;
461 }
462 return err;
463} // skfp_driver_init
464
465
466/*
467 * =============
468 * = skfp_open =
469 * =============
470 *
471 * Overview:
472 * Opens the adapter
473 *
474 * Returns:
475 * Condition code
476 *
477 * Arguments:
478 * dev - pointer to device information
479 *
480 * Functional Description:
481 * This function brings the adapter to an operational state.
482 *
483 * Return Codes:
484 * 0 - Adapter was successfully opened
485 * -EAGAIN - Could not register IRQ
486 */
487static int skfp_open(struct net_device *dev)
488{
489 struct s_smc *smc = netdev_priv(dev);
490 int err;
491
ebc06eeb 492 pr_debug(KERN_INFO "entering skfp_open\n");
1da177e4 493 /* Register IRQ - support shared interrupts by passing device ptr */
2f220e30 494 err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
1da177e4
LT
495 dev->name, dev);
496 if (err)
497 return err;
498
499 /*
500 * Set current address to factory MAC address
501 *
502 * Note: We've already done this step in skfp_driver_init.
503 * However, it's possible that a user has set a node
504 * address override, then closed and reopened the
505 * adapter. Unless we reset the device address field
506 * now, we'll continue to use the existing modified
507 * address.
508 */
509 read_address(smc, NULL);
510 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
511
512 init_smt(smc, NULL);
513 smt_online(smc, 1);
514 STI_FBI();
515
516 /* Clear local multicast address tables */
517 mac_clear_multicast(smc);
518
519 /* Disable promiscuous filter settings */
520 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
521
522 netif_start_queue(dev);
523 return (0);
524} // skfp_open
525
526
527/*
528 * ==============
529 * = skfp_close =
530 * ==============
531 *
532 * Overview:
533 * Closes the device/module.
534 *
535 * Returns:
536 * Condition code
537 *
538 * Arguments:
539 * dev - pointer to device information
540 *
541 * Functional Description:
542 * This routine closes the adapter and brings it to a safe state.
543 * The interrupt service routine is deregistered with the OS.
544 * The adapter can be opened again with another call to skfp_open().
545 *
546 * Return Codes:
547 * Always return 0.
548 *
549 * Assumptions:
550 * No further requests for this adapter are made after this routine is
551 * called. skfp_open() can be called to reset and reinitialize the
552 * adapter.
553 */
554static int skfp_close(struct net_device *dev)
555{
556 struct s_smc *smc = netdev_priv(dev);
557 skfddi_priv *bp = &smc->os;
558
559 CLI_FBI();
560 smt_reset_defaults(smc, 1);
561 card_stop(smc);
562 mac_drv_clear_tx_queue(smc);
563 mac_drv_clear_rx_queue(smc);
564
565 netif_stop_queue(dev);
566 /* Deregister (free) IRQ */
567 free_irq(dev->irq, dev);
568
569 skb_queue_purge(&bp->SendSkbQueue);
570 bp->QueueSkb = MAX_TX_QUEUE_LEN;
571
572 return (0);
573} // skfp_close
574
575
576/*
577 * ==================
578 * = skfp_interrupt =
579 * ==================
580 *
581 * Overview:
582 * Interrupt processing routine
583 *
584 * Returns:
585 * None
586 *
587 * Arguments:
588 * irq - interrupt vector
589 * dev_id - pointer to device information
1da177e4
LT
590 *
591 * Functional Description:
592 * This routine calls the interrupt processing routine for this adapter. It
593 * disables and reenables adapter interrupts, as appropriate. We can support
594 * shared interrupts since the incoming dev_id pointer provides our device
595 * structure context. All the real work is done in the hardware module.
596 *
597 * Return Codes:
598 * None
599 *
600 * Assumptions:
601 * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
602 * on Intel-based systems) is done by the operating system outside this
603 * routine.
604 *
605 * System interrupts are enabled through this call.
606 *
607 * Side Effects:
608 * Interrupts are disabled, then reenabled at the adapter.
609 */
610
409b2044 611static irqreturn_t skfp_interrupt(int irq, void *dev_id)
1da177e4 612{
c31f28e7 613 struct net_device *dev = dev_id;
1da177e4
LT
614 struct s_smc *smc; /* private board structure pointer */
615 skfddi_priv *bp;
616
1da177e4
LT
617 smc = netdev_priv(dev);
618 bp = &smc->os;
619
620 // IRQs enabled or disabled ?
621 if (inpd(ADDR(B0_IMSK)) == 0) {
622 // IRQs are disabled: must be shared interrupt
623 return IRQ_NONE;
624 }
625 // Note: At this point, IRQs are enabled.
626 if ((inpd(ISR_A) & smc->hw.is_imask) == 0) { // IRQ?
627 // Adapter did not issue an IRQ: must be shared interrupt
628 return IRQ_NONE;
629 }
630 CLI_FBI(); // Disable IRQs from our adapter.
631 spin_lock(&bp->DriverLock);
632
633 // Call interrupt handler in hardware module (HWM).
634 fddi_isr(smc);
635
636 if (smc->os.ResetRequested) {
637 ResetAdapter(smc);
638 smc->os.ResetRequested = FALSE;
639 }
640 spin_unlock(&bp->DriverLock);
641 STI_FBI(); // Enable IRQs from our adapter.
642
643 return IRQ_HANDLED;
644} // skfp_interrupt
645
646
647/*
648 * ======================
649 * = skfp_ctl_get_stats =
650 * ======================
651 *
652 * Overview:
653 * Get statistics for FDDI adapter
654 *
655 * Returns:
656 * Pointer to FDDI statistics structure
657 *
658 * Arguments:
659 * dev - pointer to device information
660 *
661 * Functional Description:
662 * Gets current MIB objects from adapter, then
663 * returns FDDI statistics structure as defined
664 * in if_fddi.h.
665 *
666 * Note: Since the FDDI statistics structure is
667 * still new and the device structure doesn't
668 * have an FDDI-specific get statistics handler,
669 * we'll return the FDDI statistics structure as
670 * a pointer to an Ethernet statistics structure.
671 * That way, at least the first part of the statistics
672 * structure can be decoded properly.
673 * We'll have to pay attention to this routine as the
674 * device structure becomes more mature and LAN media
675 * independent.
676 *
677 */
409b2044 678static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
1da177e4
LT
679{
680 struct s_smc *bp = netdev_priv(dev);
681
682 /* Fill the bp->stats structure with driver-maintained counters */
683
684 bp->os.MacStat.port_bs_flag[0] = 0x1234;
685 bp->os.MacStat.port_bs_flag[1] = 0x5678;
686// goos: need to fill out fddi statistic
687#if 0
688 /* Get FDDI SMT MIB objects */
689
690/* Fill the bp->stats structure with the SMT MIB object values */
691
692 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
693 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
694 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
695 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
696 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
697 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
698 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
699 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
700 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
701 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
702 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
703 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
704 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
705 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
706 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
707 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
708 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
709 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
710 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
711 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
712 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
713 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
714 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
715 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
716 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
717 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
718 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
719 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
720 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
721 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
722 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
723 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
724 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
725 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
726 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
727 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
728 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
729 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
730 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
731 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
732 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
733 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
734 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
735 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
736 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
737 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
738 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
739 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
740 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
741 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
742 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
743 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
744 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
745 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
746 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
747 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
748 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
749 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
750 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
751 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
752 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
753 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
754 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
755 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
756 memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
757 memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
758 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
759 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
760 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
761 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
762 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
763 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
764 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
765 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
766 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
767 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
768 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
769 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
770 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
771 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
772 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
773 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
774 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
775 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
776 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
777 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
778 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
779 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
780 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
781 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
782 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
783 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
784
785
786 /* Fill the bp->stats structure with the FDDI counter values */
787
788 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
789 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
790 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
791 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
792 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
793 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
794 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
795 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
796 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
797 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
798 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
799
800#endif
801 return ((struct net_device_stats *) &bp->os.MacStat);
802} // ctl_get_stat
803
804
805/*
806 * ==============================
807 * = skfp_ctl_set_multicast_list =
808 * ==============================
809 *
810 * Overview:
811 * Enable/Disable LLC frame promiscuous mode reception
812 * on the adapter and/or update multicast address table.
813 *
814 * Returns:
815 * None
816 *
817 * Arguments:
818 * dev - pointer to device information
819 *
820 * Functional Description:
821 * This function acquires the driver lock and only calls
822 * skfp_ctl_set_multicast_list_wo_lock then.
823 * This routine follows a fairly simple algorithm for setting the
824 * adapter filters and CAM:
825 *
826 * if IFF_PROMISC flag is set
827 * enable promiscuous mode
828 * else
829 * disable promiscuous mode
830 * if number of multicast addresses <= max. multicast number
831 * add mc addresses to adapter table
832 * else
833 * enable promiscuous mode
834 * update adapter filters
835 *
836 * Assumptions:
837 * Multicast addresses are presented in canonical (LSB) format.
838 *
839 * Side Effects:
840 * On-board adapter filters are updated.
841 */
842static void skfp_ctl_set_multicast_list(struct net_device *dev)
843{
844 struct s_smc *smc = netdev_priv(dev);
845 skfddi_priv *bp = &smc->os;
846 unsigned long Flags;
847
848 spin_lock_irqsave(&bp->DriverLock, Flags);
849 skfp_ctl_set_multicast_list_wo_lock(dev);
850 spin_unlock_irqrestore(&bp->DriverLock, Flags);
851 return;
852} // skfp_ctl_set_multicast_list
853
854
855
856static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
857{
858 struct s_smc *smc = netdev_priv(dev);
859 struct dev_mc_list *dmi; /* ptr to multicast addr entry */
860 int i;
861
862 /* Enable promiscuous mode, if necessary */
863 if (dev->flags & IFF_PROMISC) {
864 mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
ebc06eeb 865 pr_debug(KERN_INFO "PROMISCUOUS MODE ENABLED\n");
1da177e4
LT
866 }
867 /* Else, update multicast address table */
868 else {
869 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
ebc06eeb 870 pr_debug(KERN_INFO "PROMISCUOUS MODE DISABLED\n");
1da177e4
LT
871
872 // Reset all MC addresses
873 mac_clear_multicast(smc);
874 mac_drv_rx_mode(smc, RX_DISABLE_ALLMULTI);
875
876 if (dev->flags & IFF_ALLMULTI) {
877 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
ebc06eeb 878 pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
1da177e4
LT
879 } else if (dev->mc_count > 0) {
880 if (dev->mc_count <= FPMAX_MULTICAST) {
881 /* use exact filtering */
882
883 // point to first multicast addr
884 dmi = dev->mc_list;
885
886 for (i = 0; i < dev->mc_count; i++) {
887 mac_add_multicast(smc,
888 (struct fddi_addr *)dmi->dmi_addr,
889 1);
890
ebc06eeb
AB
891 pr_debug(KERN_INFO "ENABLE MC ADDRESS:");
892 pr_debug(" %02x %02x %02x ",
1da177e4
LT
893 dmi->dmi_addr[0],
894 dmi->dmi_addr[1],
895 dmi->dmi_addr[2]);
ebc06eeb 896 pr_debug("%02x %02x %02x\n",
1da177e4
LT
897 dmi->dmi_addr[3],
898 dmi->dmi_addr[4],
899 dmi->dmi_addr[5]);
900 dmi = dmi->next;
901 } // for
902
903 } else { // more MC addresses than HW supports
904
905 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
ebc06eeb 906 pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
1da177e4
LT
907 }
908 } else { // no MC addresses
909
ebc06eeb 910 pr_debug(KERN_INFO "DISABLE ALL MC ADDRESSES\n");
1da177e4
LT
911 }
912
913 /* Update adapter filters */
914 mac_update_multicast(smc);
915 }
916 return;
917} // skfp_ctl_set_multicast_list_wo_lock
918
919
920/*
921 * ===========================
922 * = skfp_ctl_set_mac_address =
923 * ===========================
924 *
925 * Overview:
926 * set new mac address on adapter and update dev_addr field in device table.
927 *
928 * Returns:
929 * None
930 *
931 * Arguments:
932 * dev - pointer to device information
933 * addr - pointer to sockaddr structure containing unicast address to set
934 *
935 * Assumptions:
936 * The address pointed to by addr->sa_data is a valid unicast
937 * address and is presented in canonical (LSB) format.
938 */
939static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
940{
941 struct s_smc *smc = netdev_priv(dev);
942 struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
943 skfddi_priv *bp = &smc->os;
944 unsigned long Flags;
945
946
947 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
948 spin_lock_irqsave(&bp->DriverLock, Flags);
949 ResetAdapter(smc);
950 spin_unlock_irqrestore(&bp->DriverLock, Flags);
951
952 return (0); /* always return zero */
953} // skfp_ctl_set_mac_address
954
955
956/*
957 * ==============
958 * = skfp_ioctl =
959 * ==============
960 *
961 * Overview:
962 *
963 * Perform IOCTL call functions here. Some are privileged operations and the
964 * effective uid is checked in those cases.
965 *
966 * Returns:
967 * status value
968 * 0 - success
969 * other - failure
970 *
971 * Arguments:
972 * dev - pointer to device information
973 * rq - pointer to ioctl request structure
974 * cmd - ?
975 *
976 */
977
978
979static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
980{
981 struct s_smc *smc = netdev_priv(dev);
982 skfddi_priv *lp = &smc->os;
983 struct s_skfp_ioctl ioc;
984 int status = 0;
985
986 if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
987 return -EFAULT;
988
989 switch (ioc.cmd) {
990 case SKFP_GET_STATS: /* Get the driver statistics */
991 ioc.len = sizeof(lp->MacStat);
992 status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
993 ? -EFAULT : 0;
994 break;
995 case SKFP_CLR_STATS: /* Zero out the driver statistics */
996 if (!capable(CAP_NET_ADMIN)) {
1da177e4 997 status = -EPERM;
c25b9abb
RK
998 } else {
999 memset(&lp->MacStat, 0, sizeof(lp->MacStat));
1da177e4
LT
1000 }
1001 break;
1002 default:
1003 printk("ioctl for %s: unknow cmd: %04x\n", dev->name, ioc.cmd);
1004 status = -EOPNOTSUPP;
1005
1006 } // switch
1007
1008 return status;
1009} // skfp_ioctl
1010
1011
1012/*
1013 * =====================
1014 * = skfp_send_pkt =
1015 * =====================
1016 *
1017 * Overview:
1018 * Queues a packet for transmission and try to transmit it.
1019 *
1020 * Returns:
1021 * Condition code
1022 *
1023 * Arguments:
1024 * skb - pointer to sk_buff to queue for transmission
1025 * dev - pointer to device information
1026 *
1027 * Functional Description:
1028 * Here we assume that an incoming skb transmit request
1029 * is contained in a single physically contiguous buffer
1030 * in which the virtual address of the start of packet
1031 * (skb->data) can be converted to a physical address
1032 * by using pci_map_single().
1033 *
1034 * We have an internal queue for packets we can not send
1035 * immediately. Packets in this queue can be given to the
1036 * adapter if transmit buffers are freed.
1037 *
1038 * We can't free the skb until after it's been DMA'd
1039 * out by the adapter, so we'll keep it in the driver and
1040 * return it in mac_drv_tx_complete.
1041 *
1042 * Return Codes:
1043 * 0 - driver has queued and/or sent packet
1044 * 1 - caller should requeue the sk_buff for later transmission
1045 *
1046 * Assumptions:
1047 * The entire packet is stored in one physically
1048 * contiguous buffer which is not cached and whose
1049 * 32-bit physical address can be determined.
1050 *
1051 * It's vital that this routine is NOT reentered for the
1052 * same board and that the OS is not in another section of
1053 * code (eg. skfp_interrupt) for the same board on a
1054 * different thread.
1055 *
1056 * Side Effects:
1057 * None
1058 */
1059static int skfp_send_pkt(struct sk_buff *skb, struct net_device *dev)
1060{
1061 struct s_smc *smc = netdev_priv(dev);
1062 skfddi_priv *bp = &smc->os;
1063
ebc06eeb 1064 pr_debug(KERN_INFO "skfp_send_pkt\n");
1da177e4
LT
1065
1066 /*
1067 * Verify that incoming transmit request is OK
1068 *
1069 * Note: The packet size check is consistent with other
1070 * Linux device drivers, although the correct packet
1071 * size should be verified before calling the
1072 * transmit routine.
1073 */
1074
1075 if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
1076 bp->MacStat.gen.tx_errors++; /* bump error counter */
1077 // dequeue packets from xmt queue and send them
1078 netif_start_queue(dev);
1079 dev_kfree_skb(skb);
1080 return (0); /* return "success" */
1081 }
1082 if (bp->QueueSkb == 0) { // return with tbusy set: queue full
1083
1084 netif_stop_queue(dev);
5b548140 1085 return NETDEV_TX_BUSY;
1da177e4
LT
1086 }
1087 bp->QueueSkb--;
1088 skb_queue_tail(&bp->SendSkbQueue, skb);
1089 send_queued_packets(netdev_priv(dev));
1090 if (bp->QueueSkb == 0) {
1091 netif_stop_queue(dev);
1092 }
1093 dev->trans_start = jiffies;
1094 return 0;
1095
1096} // skfp_send_pkt
1097
1098
1099/*
1100 * =======================
1101 * = send_queued_packets =
1102 * =======================
1103 *
1104 * Overview:
1105 * Send packets from the driver queue as long as there are some and
1106 * transmit resources are available.
1107 *
1108 * Returns:
1109 * None
1110 *
1111 * Arguments:
1112 * smc - pointer to smc (adapter) structure
1113 *
1114 * Functional Description:
1115 * Take a packet from queue if there is any. If not, then we are done.
1116 * Check if there are resources to send the packet. If not, requeue it
1117 * and exit.
1118 * Set packet descriptor flags and give packet to adapter.
1119 * Check if any send resources can be freed (we do not use the
1120 * transmit complete interrupt).
1121 */
1122static void send_queued_packets(struct s_smc *smc)
1123{
1124 skfddi_priv *bp = &smc->os;
1125 struct sk_buff *skb;
1126 unsigned char fc;
1127 int queue;
1128 struct s_smt_fp_txd *txd; // Current TxD.
1129 dma_addr_t dma_address;
1130 unsigned long Flags;
1131
1132 int frame_status; // HWM tx frame status.
1133
ebc06eeb 1134 pr_debug(KERN_INFO "send queued packets\n");
1da177e4
LT
1135 for (;;) {
1136 // send first buffer from queue
1137 skb = skb_dequeue(&bp->SendSkbQueue);
1138
1139 if (!skb) {
ebc06eeb 1140 pr_debug(KERN_INFO "queue empty\n");
1da177e4
LT
1141 return;
1142 } // queue empty !
1143
1144 spin_lock_irqsave(&bp->DriverLock, Flags);
1145 fc = skb->data[0];
1146 queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
1147#ifdef ESS
1148 // Check if the frame may/must be sent as a synchronous frame.
1149
1150 if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
1151 // It's an LLC frame.
1152 if (!smc->ess.sync_bw_available)
1153 fc &= ~FC_SYNC_BIT; // No bandwidth available.
1154
1155 else { // Bandwidth is available.
1156
1157 if (smc->mib.fddiESSSynchTxMode) {
1158 // Send as sync. frame.
1159 fc |= FC_SYNC_BIT;
1160 }
1161 }
1162 }
1163#endif // ESS
1164 frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
1165
1166 if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
1167 // Unable to send the frame.
1168
1169 if ((frame_status & RING_DOWN) != 0) {
1170 // Ring is down.
ebc06eeb 1171 pr_debug("Tx attempt while ring down.\n");
1da177e4 1172 } else if ((frame_status & OUT_OF_TXD) != 0) {
ebc06eeb 1173 pr_debug("%s: out of TXDs.\n", bp->dev->name);
1da177e4 1174 } else {
ebc06eeb 1175 pr_debug("%s: out of transmit resources",
1da177e4
LT
1176 bp->dev->name);
1177 }
1178
1179 // Note: We will retry the operation as soon as
1180 // transmit resources become available.
1181 skb_queue_head(&bp->SendSkbQueue, skb);
1182 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1183 return; // Packet has been queued.
1184
1185 } // if (unable to send frame)
1186
1187 bp->QueueSkb++; // one packet less in local queue
1188
1189 // source address in packet ?
1190 CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
1191
1192 txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
1193
1194 dma_address = pci_map_single(&bp->pdev, skb->data,
1195 skb->len, PCI_DMA_TODEVICE);
1196 if (frame_status & LAN_TX) {
1197 txd->txd_os.skb = skb; // save skb
1198 txd->txd_os.dma_addr = dma_address; // save dma mapping
1199 }
1200 hwm_tx_frag(smc, skb->data, dma_address, skb->len,
1201 frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);
1202
1203 if (!(frame_status & LAN_TX)) { // local only frame
1204 pci_unmap_single(&bp->pdev, dma_address,
1205 skb->len, PCI_DMA_TODEVICE);
1206 dev_kfree_skb_irq(skb);
1207 }
1208 spin_unlock_irqrestore(&bp->DriverLock, Flags);
1209 } // for
1210
1211 return; // never reached
1212
1213} // send_queued_packets
1214
1215
1216/************************
1217 *
1218 * CheckSourceAddress
1219 *
1220 * Verify if the source address is set. Insert it if necessary.
1221 *
1222 ************************/
409b2044 1223static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
1da177e4
LT
1224{
1225 unsigned char SRBit;
1226
1227 if ((((unsigned long) frame[1 + 6]) & ~0x01) != 0) // source routing bit
1228
1229 return;
1230 if ((unsigned short) frame[1 + 10] != 0)
1231 return;
1232 SRBit = frame[1 + 6] & 0x01;
1233 memcpy(&frame[1 + 6], hw_addr, 6);
1234 frame[8] |= SRBit;
1235} // CheckSourceAddress
1236
1237
1238/************************
1239 *
1240 * ResetAdapter
1241 *
1242 * Reset the adapter and bring it back to operational mode.
1243 * Args
1244 * smc - A pointer to the SMT context struct.
1245 * Out
1246 * Nothing.
1247 *
1248 ************************/
1249static void ResetAdapter(struct s_smc *smc)
1250{
1251
ebc06eeb 1252 pr_debug(KERN_INFO "[fddi: ResetAdapter]\n");
1da177e4
LT
1253
1254 // Stop the adapter.
1255
1256 card_stop(smc); // Stop all activity.
1257
1258 // Clear the transmit and receive descriptor queues.
1259 mac_drv_clear_tx_queue(smc);
1260 mac_drv_clear_rx_queue(smc);
1261
1262 // Restart the adapter.
1263
1264 smt_reset_defaults(smc, 1); // Initialize the SMT module.
1265
1266 init_smt(smc, (smc->os.dev)->dev_addr); // Initialize the hardware.
1267
1268 smt_online(smc, 1); // Insert into the ring again.
1269 STI_FBI();
1270
1271 // Restore original receive mode (multicasts, promiscuous, etc.).
1272 skfp_ctl_set_multicast_list_wo_lock(smc->os.dev);
1273} // ResetAdapter
1274
1275
1276//--------------- functions called by hardware module ----------------
1277
1278/************************
1279 *
1280 * llc_restart_tx
1281 *
1282 * The hardware driver calls this routine when the transmit complete
1283 * interrupt bits (end of frame) for the synchronous or asynchronous
1284 * queue is set.
1285 *
1286 * NOTE The hardware driver calls this function also if no packets are queued.
1287 * The routine must be able to handle this case.
1288 * Args
1289 * smc - A pointer to the SMT context struct.
1290 * Out
1291 * Nothing.
1292 *
1293 ************************/
1294void llc_restart_tx(struct s_smc *smc)
1295{
1296 skfddi_priv *bp = &smc->os;
1297
ebc06eeb 1298 pr_debug(KERN_INFO "[llc_restart_tx]\n");
1da177e4
LT
1299
1300 // Try to send queued packets
1301 spin_unlock(&bp->DriverLock);
1302 send_queued_packets(smc);
1303 spin_lock(&bp->DriverLock);
1304 netif_start_queue(bp->dev);// system may send again if it was blocked
1305
1306} // llc_restart_tx
1307
1308
1309/************************
1310 *
1311 * mac_drv_get_space
1312 *
1313 * The hardware module calls this function to allocate the memory
1314 * for the SMT MBufs if the define MB_OUTSIDE_SMC is specified.
1315 * Args
1316 * smc - A pointer to the SMT context struct.
1317 *
1318 * size - Size of memory in bytes to allocate.
1319 * Out
1320 * != 0 A pointer to the virtual address of the allocated memory.
1321 * == 0 Allocation error.
1322 *
1323 ************************/
1324void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
1325{
1326 void *virt;
1327
ebc06eeb 1328 pr_debug(KERN_INFO "mac_drv_get_space (%d bytes), ", size);
1da177e4
LT
1329 virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
1330
1331 if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
1332 printk("Unexpected SMT memory size requested: %d\n", size);
1333 return (NULL);
1334 }
1335 smc->os.SharedMemHeap += size; // Move heap pointer.
1336
ebc06eeb
AB
1337 pr_debug(KERN_INFO "mac_drv_get_space end\n");
1338 pr_debug(KERN_INFO "virt addr: %lx\n", (ulong) virt);
1339 pr_debug(KERN_INFO "bus addr: %lx\n", (ulong)
1da177e4
LT
1340 (smc->os.SharedMemDMA +
1341 ((char *) virt - (char *)smc->os.SharedMemAddr)));
1342 return (virt);
1343} // mac_drv_get_space
1344
1345
1346/************************
1347 *
1348 * mac_drv_get_desc_mem
1349 *
1350 * This function is called by the hardware dependent module.
1351 * It allocates the memory for the RxD and TxD descriptors.
1352 *
1353 * This memory must be non-cached, non-movable and non-swappable.
1354 * This memory should start at a physical page boundary.
1355 * Args
1356 * smc - A pointer to the SMT context struct.
1357 *
1358 * size - Size of memory in bytes to allocate.
1359 * Out
1360 * != 0 A pointer to the virtual address of the allocated memory.
1361 * == 0 Allocation error.
1362 *
1363 ************************/
1364void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1365{
1366
1367 char *virt;
1368
ebc06eeb 1369 pr_debug(KERN_INFO "mac_drv_get_desc_mem\n");
1da177e4
LT
1370
1371 // Descriptor memory must be aligned on 16-byte boundary.
1372
1373 virt = mac_drv_get_space(smc, size);
1374
1375 size = (u_int) (16 - (((unsigned long) virt) & 15UL));
1376 size = size % 16;
1377
ebc06eeb
AB
1378 pr_debug("Allocate %u bytes alignment gap ", size);
1379 pr_debug("for descriptor memory.\n");
1da177e4
LT
1380
1381 if (!mac_drv_get_space(smc, size)) {
1382 printk("fddi: Unable to align descriptor memory.\n");
1383 return (NULL);
1384 }
1385 return (virt + size);
1386} // mac_drv_get_desc_mem
1387
1388
1389/************************
1390 *
1391 * mac_drv_virt2phys
1392 *
1393 * Get the physical address of a given virtual address.
1394 * Args
1395 * smc - A pointer to the SMT context struct.
1396 *
1397 * virt - A (virtual) pointer into our 'shared' memory area.
1398 * Out
1399 * Physical address of the given virtual address.
1400 *
1401 ************************/
1402unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
1403{
1404 return (smc->os.SharedMemDMA +
1405 ((char *) virt - (char *)smc->os.SharedMemAddr));
1406} // mac_drv_virt2phys
1407
1408
1409/************************
1410 *
1411 * dma_master
1412 *
1413 * The HWM calls this function, when the driver leads through a DMA
1414 * transfer. If the OS-specific module must prepare the system hardware
1415 * for the DMA transfer, it should do it in this function.
1416 *
1417 * The hardware module calls this dma_master if it wants to send an SMT
1418 * frame. This means that the virt address passed in here is part of
1419 * the 'shared' memory area.
1420 * Args
1421 * smc - A pointer to the SMT context struct.
1422 *
1423 * virt - The virtual address of the data.
1424 *
1425 * len - The length in bytes of the data.
1426 *
1427 * flag - Indicates the transmit direction and the buffer type:
1428 * DMA_RD (0x01) system RAM ==> adapter buffer memory
1429 * DMA_WR (0x02) adapter buffer memory ==> system RAM
1430 * SMT_BUF (0x80) SMT buffer
1431 *
1432 * >> NOTE: SMT_BUF and DMA_RD are always set for PCI. <<
1433 * Out
1434 * Returns the pyhsical address for the DMA transfer.
1435 *
1436 ************************/
1437u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
1438{
1439 return (smc->os.SharedMemDMA +
1440 ((char *) virt - (char *)smc->os.SharedMemAddr));
1441} // dma_master
1442
1443
1444/************************
1445 *
1446 * dma_complete
1447 *
1448 * The hardware module calls this routine when it has completed a DMA
1449 * transfer. If the operating system dependent module has set up the DMA
1450 * channel via dma_master() (e.g. Windows NT or AIX) it should clean up
1451 * the DMA channel.
1452 * Args
1453 * smc - A pointer to the SMT context struct.
1454 *
1455 * descr - A pointer to a TxD or RxD, respectively.
1456 *
1457 * flag - Indicates the DMA transfer direction / SMT buffer:
1458 * DMA_RD (0x01) system RAM ==> adapter buffer memory
1459 * DMA_WR (0x02) adapter buffer memory ==> system RAM
1460 * SMT_BUF (0x80) SMT buffer (managed by HWM)
1461 * Out
1462 * Nothing.
1463 *
1464 ************************/
1465void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag)
1466{
1467 /* For TX buffers, there are two cases. If it is an SMT transmit
1468 * buffer, there is nothing to do since we use consistent memory
1469 * for the 'shared' memory area. The other case is for normal
1470 * transmit packets given to us by the networking stack, and in
1471 * that case we cleanup the PCI DMA mapping in mac_drv_tx_complete
1472 * below.
1473 *
1474 * For RX buffers, we have to unmap dynamic PCI DMA mappings here
1475 * because the hardware module is about to potentially look at
1476 * the contents of the buffer. If we did not call the PCI DMA
1477 * unmap first, the hardware module could read inconsistent data.
1478 */
1479 if (flag & DMA_WR) {
1480 skfddi_priv *bp = &smc->os;
1481 volatile struct s_smt_fp_rxd *r = &descr->r;
1482
1483 /* If SKB is NULL, we used the local buffer. */
1484 if (r->rxd_os.skb && r->rxd_os.dma_addr) {
1485 int MaxFrameSize = bp->MaxFrameSize;
1486
1487 pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr,
1488 MaxFrameSize, PCI_DMA_FROMDEVICE);
1489 r->rxd_os.dma_addr = 0;
1490 }
1491 }
1492} // dma_complete
1493
1494
1495/************************
1496 *
1497 * mac_drv_tx_complete
1498 *
1499 * Transmit of a packet is complete. Release the tx staging buffer.
1500 *
1501 * Args
1502 * smc - A pointer to the SMT context struct.
1503 *
1504 * txd - A pointer to the last TxD which is used by the frame.
1505 * Out
1506 * Returns nothing.
1507 *
1508 ************************/
1509void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
1510{
1511 struct sk_buff *skb;
1512
ebc06eeb 1513 pr_debug(KERN_INFO "entering mac_drv_tx_complete\n");
1da177e4
LT
1514 // Check if this TxD points to a skb
1515
1516 if (!(skb = txd->txd_os.skb)) {
ebc06eeb 1517 pr_debug("TXD with no skb assigned.\n");
1da177e4
LT
1518 return;
1519 }
1520 txd->txd_os.skb = NULL;
1521
1522 // release the DMA mapping
1523 pci_unmap_single(&smc->os.pdev, txd->txd_os.dma_addr,
1524 skb->len, PCI_DMA_TODEVICE);
1525 txd->txd_os.dma_addr = 0;
1526
1527 smc->os.MacStat.gen.tx_packets++; // Count transmitted packets.
1528 smc->os.MacStat.gen.tx_bytes+=skb->len; // Count bytes
1529
1530 // free the skb
1531 dev_kfree_skb_irq(skb);
1532
ebc06eeb 1533 pr_debug(KERN_INFO "leaving mac_drv_tx_complete\n");
1da177e4
LT
1534} // mac_drv_tx_complete
1535
1536
1537/************************
1538 *
1539 * dump packets to logfile
1540 *
1541 ************************/
1542#ifdef DUMPPACKETS
1543void dump_data(unsigned char *Data, int length)
1544{
1545 int i, j;
1546 unsigned char s[255], sh[10];
1547 if (length > 64) {
1548 length = 64;
1549 }
1550 printk(KERN_INFO "---Packet start---\n");
1551 for (i = 0, j = 0; i < length / 8; i++, j += 8)
1552 printk(KERN_INFO "%02x %02x %02x %02x %02x %02x %02x %02x\n",
1553 Data[j + 0], Data[j + 1], Data[j + 2], Data[j + 3],
1554 Data[j + 4], Data[j + 5], Data[j + 6], Data[j + 7]);
1555 strcpy(s, "");
1556 for (i = 0; i < length % 8; i++) {
1557 sprintf(sh, "%02x ", Data[j + i]);
1558 strcat(s, sh);
1559 }
1560 printk(KERN_INFO "%s\n", s);
1561 printk(KERN_INFO "------------------\n");
1562} // dump_data
1563#else
1564#define dump_data(data,len)
1565#endif // DUMPPACKETS
1566
1567/************************
1568 *
1569 * mac_drv_rx_complete
1570 *
1571 * The hardware module calls this function if an LLC frame is received
1572 * in a receive buffer. Also the SMT, NSA, and directed beacon frames
1573 * from the network will be passed to the LLC layer by this function
1574 * if passing is enabled.
1575 *
1576 * mac_drv_rx_complete forwards the frame to the LLC layer if it should
1577 * be received. It also fills the RxD ring with new receive buffers if
1578 * some can be queued.
1579 * Args
1580 * smc - A pointer to the SMT context struct.
1581 *
1582 * rxd - A pointer to the first RxD which is used by the receive frame.
1583 *
1584 * frag_count - Count of RxDs used by the received frame.
1585 *
1586 * len - Frame length.
1587 * Out
1588 * Nothing.
1589 *
1590 ************************/
1591void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1592 int frag_count, int len)
1593{
1594 skfddi_priv *bp = &smc->os;
1595 struct sk_buff *skb;
1596 unsigned char *virt, *cp;
1597 unsigned short ri;
1598 u_int RifLength;
1599
ebc06eeb 1600 pr_debug(KERN_INFO "entering mac_drv_rx_complete (len=%d)\n", len);
1da177e4
LT
1601 if (frag_count != 1) { // This is not allowed to happen.
1602
1603 printk("fddi: Multi-fragment receive!\n");
1604 goto RequeueRxd; // Re-use the given RXD(s).
1605
1606 }
1607 skb = rxd->rxd_os.skb;
1608 if (!skb) {
ebc06eeb 1609 pr_debug(KERN_INFO "No skb in rxd\n");
1da177e4
LT
1610 smc->os.MacStat.gen.rx_errors++;
1611 goto RequeueRxd;
1612 }
1613 virt = skb->data;
1614
1615 // The DMA mapping was released in dma_complete above.
1616
1617 dump_data(skb->data, len);
1618
1619 /*
1620 * FDDI Frame format:
1621 * +-------+-------+-------+------------+--------+------------+
1622 * | FC[1] | DA[6] | SA[6] | RIF[0..18] | LLC[3] | Data[0..n] |
1623 * +-------+-------+-------+------------+--------+------------+
1624 *
1625 * FC = Frame Control
1626 * DA = Destination Address
1627 * SA = Source Address
1628 * RIF = Routing Information Field
1629 * LLC = Logical Link Control
1630 */
1631
1632 // Remove Routing Information Field (RIF), if present.
1633
1634 if ((virt[1 + 6] & FDDI_RII) == 0)
1635 RifLength = 0;
1636 else {
1637 int n;
1638// goos: RIF removal has still to be tested
ebc06eeb 1639 pr_debug(KERN_INFO "RIF found\n");
1da177e4
LT
1640 // Get RIF length from Routing Control (RC) field.
1641 cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
1642
2f220e30 1643 ri = ntohs(*((__be16 *) cp));
1da177e4
LT
1644 RifLength = ri & FDDI_RCF_LEN_MASK;
1645 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
1646 printk("fddi: Invalid RIF.\n");
1647 goto RequeueRxd; // Discard the frame.
1648
1649 }
1650 virt[1 + 6] &= ~FDDI_RII; // Clear RII bit.
1651 // regions overlap
1652
1653 virt = cp + RifLength;
1654 for (n = FDDI_MAC_HDR_LEN; n; n--)
1655 *--virt = *--cp;
1656 // adjust sbd->data pointer
1657 skb_pull(skb, RifLength);
1658 len -= RifLength;
1659 RifLength = 0;
1660 }
1661
1662 // Count statistics.
1663 smc->os.MacStat.gen.rx_packets++; // Count indicated receive
1664 // packets.
1665 smc->os.MacStat.gen.rx_bytes+=len; // Count bytes.
1666
1667 // virt points to header again
1668 if (virt[1] & 0x01) { // Check group (multicast) bit.
1669
1670 smc->os.MacStat.gen.multicast++;
1671 }
1672
1673 // deliver frame to system
1674 rxd->rxd_os.skb = NULL;
1675 skb_trim(skb, len);
1676 skb->protocol = fddi_type_trans(skb, bp->dev);
1da177e4
LT
1677
1678 netif_rx(skb);
1da177e4
LT
1679
1680 HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
1681 return;
1682
1683 RequeueRxd:
ebc06eeb 1684 pr_debug(KERN_INFO "Rx: re-queue RXD.\n");
1da177e4
LT
1685 mac_drv_requeue_rxd(smc, rxd, frag_count);
1686 smc->os.MacStat.gen.rx_errors++; // Count receive packets
1687 // not indicated.
1688
1689} // mac_drv_rx_complete
1690
1691
1692/************************
1693 *
1694 * mac_drv_requeue_rxd
1695 *
1696 * The hardware module calls this function to request the OS-specific
1697 * module to queue the receive buffer(s) represented by the pointer
1698 * to the RxD and the frag_count into the receive queue again. This
1699 * buffer was filled with an invalid frame or an SMT frame.
1700 * Args
1701 * smc - A pointer to the SMT context struct.
1702 *
1703 * rxd - A pointer to the first RxD which is used by the receive frame.
1704 *
1705 * frag_count - Count of RxDs used by the received frame.
1706 * Out
1707 * Nothing.
1708 *
1709 ************************/
1710void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1711 int frag_count)
1712{
1713 volatile struct s_smt_fp_rxd *next_rxd;
1714 volatile struct s_smt_fp_rxd *src_rxd;
1715 struct sk_buff *skb;
1716 int MaxFrameSize;
1717 unsigned char *v_addr;
1718 dma_addr_t b_addr;
1719
1720 if (frag_count != 1) // This is not allowed to happen.
1721
1722 printk("fddi: Multi-fragment requeue!\n");
1723
1724 MaxFrameSize = smc->os.MaxFrameSize;
1725 src_rxd = rxd;
1726 for (; frag_count > 0; frag_count--) {
1727 next_rxd = src_rxd->rxd_next;
1728 rxd = HWM_GET_CURR_RXD(smc);
1729
1730 skb = src_rxd->rxd_os.skb;
1731 if (skb == NULL) { // this should not happen
1732
ebc06eeb 1733 pr_debug("Requeue with no skb in rxd!\n");
1da177e4
LT
1734 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1735 if (skb) {
1736 // we got a skb
1737 rxd->rxd_os.skb = skb;
1738 skb_reserve(skb, 3);
1739 skb_put(skb, MaxFrameSize);
1740 v_addr = skb->data;
1741 b_addr = pci_map_single(&smc->os.pdev,
1742 v_addr,
1743 MaxFrameSize,
1744 PCI_DMA_FROMDEVICE);
1745 rxd->rxd_os.dma_addr = b_addr;
1746 } else {
1747 // no skb available, use local buffer
ebc06eeb 1748 pr_debug("Queueing invalid buffer!\n");
1da177e4
LT
1749 rxd->rxd_os.skb = NULL;
1750 v_addr = smc->os.LocalRxBuffer;
1751 b_addr = smc->os.LocalRxBufferDMA;
1752 }
1753 } else {
1754 // we use skb from old rxd
1755 rxd->rxd_os.skb = skb;
1756 v_addr = skb->data;
1757 b_addr = pci_map_single(&smc->os.pdev,
1758 v_addr,
1759 MaxFrameSize,
1760 PCI_DMA_FROMDEVICE);
1761 rxd->rxd_os.dma_addr = b_addr;
1762 }
1763 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1764 FIRST_FRAG | LAST_FRAG);
1765
1766 src_rxd = next_rxd;
1767 }
1768} // mac_drv_requeue_rxd
1769
1770
1771/************************
1772 *
1773 * mac_drv_fill_rxd
1774 *
1775 * The hardware module calls this function at initialization time
1776 * to fill the RxD ring with receive buffers. It is also called by
1777 * mac_drv_rx_complete if rx_free is large enough to queue some new
1778 * receive buffers into the RxD ring. mac_drv_fill_rxd queues new
1779 * receive buffers as long as enough RxDs and receive buffers are
1780 * available.
1781 * Args
1782 * smc - A pointer to the SMT context struct.
1783 * Out
1784 * Nothing.
1785 *
1786 ************************/
1787void mac_drv_fill_rxd(struct s_smc *smc)
1788{
1789 int MaxFrameSize;
1790 unsigned char *v_addr;
1791 unsigned long b_addr;
1792 struct sk_buff *skb;
1793 volatile struct s_smt_fp_rxd *rxd;
1794
ebc06eeb 1795 pr_debug(KERN_INFO "entering mac_drv_fill_rxd\n");
1da177e4
LT
1796
1797 // Walk through the list of free receive buffers, passing receive
1798 // buffers to the HWM as long as RXDs are available.
1799
1800 MaxFrameSize = smc->os.MaxFrameSize;
1801 // Check if there is any RXD left.
1802 while (HWM_GET_RX_FREE(smc) > 0) {
ebc06eeb 1803 pr_debug(KERN_INFO ".\n");
1da177e4
LT
1804
1805 rxd = HWM_GET_CURR_RXD(smc);
1806 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
1807 if (skb) {
1808 // we got a skb
1809 skb_reserve(skb, 3);
1810 skb_put(skb, MaxFrameSize);
1811 v_addr = skb->data;
1812 b_addr = pci_map_single(&smc->os.pdev,
1813 v_addr,
1814 MaxFrameSize,
1815 PCI_DMA_FROMDEVICE);
1816 rxd->rxd_os.dma_addr = b_addr;
1817 } else {
1818 // no skb available, use local buffer
1819 // System has run out of buffer memory, but we want to
1820 // keep the receiver running in hope of better times.
1821 // Multiple descriptors may point to this local buffer,
1822 // so data in it must be considered invalid.
ebc06eeb 1823 pr_debug("Queueing invalid buffer!\n");
1da177e4
LT
1824 v_addr = smc->os.LocalRxBuffer;
1825 b_addr = smc->os.LocalRxBufferDMA;
1826 }
1827
1828 rxd->rxd_os.skb = skb;
1829
1830 // Pass receive buffer to HWM.
1831 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1832 FIRST_FRAG | LAST_FRAG);
1833 }
ebc06eeb 1834 pr_debug(KERN_INFO "leaving mac_drv_fill_rxd\n");
1da177e4
LT
1835} // mac_drv_fill_rxd
1836
1837
1838/************************
1839 *
1840 * mac_drv_clear_rxd
1841 *
1842 * The hardware module calls this function to release unused
1843 * receive buffers.
1844 * Args
1845 * smc - A pointer to the SMT context struct.
1846 *
1847 * rxd - A pointer to the first RxD which is used by the receive buffer.
1848 *
1849 * frag_count - Count of RxDs used by the receive buffer.
1850 * Out
1851 * Nothing.
1852 *
1853 ************************/
1854void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1855 int frag_count)
1856{
1857
1858 struct sk_buff *skb;
1859
ebc06eeb 1860 pr_debug("entering mac_drv_clear_rxd\n");
1da177e4
LT
1861
1862 if (frag_count != 1) // This is not allowed to happen.
1863
1864 printk("fddi: Multi-fragment clear!\n");
1865
1866 for (; frag_count > 0; frag_count--) {
1867 skb = rxd->rxd_os.skb;
1868 if (skb != NULL) {
1869 skfddi_priv *bp = &smc->os;
1870 int MaxFrameSize = bp->MaxFrameSize;
1871
1872 pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr,
1873 MaxFrameSize, PCI_DMA_FROMDEVICE);
1874
1875 dev_kfree_skb(skb);
1876 rxd->rxd_os.skb = NULL;
1877 }
1878 rxd = rxd->rxd_next; // Next RXD.
1879
1880 }
1881} // mac_drv_clear_rxd
1882
1883
1884/************************
1885 *
1886 * mac_drv_rx_init
1887 *
1888 * The hardware module calls this routine when an SMT or NSA frame of the
1889 * local SMT should be delivered to the LLC layer.
1890 *
1891 * It is necessary to have this function, because there is no other way to
1892 * copy the contents of SMT MBufs into receive buffers.
1893 *
1894 * mac_drv_rx_init allocates the required target memory for this frame,
1895 * and receives the frame fragment by fragment by calling mac_drv_rx_frag.
1896 * Args
1897 * smc - A pointer to the SMT context struct.
1898 *
1899 * len - The length (in bytes) of the received frame (FC, DA, SA, Data).
1900 *
1901 * fc - The Frame Control field of the received frame.
1902 *
1903 * look_ahead - A pointer to the lookahead data buffer (may be NULL).
1904 *
1905 * la_len - The length of the lookahead data stored in the lookahead
1906 * buffer (may be zero).
1907 * Out
1908 * Always returns zero (0).
1909 *
1910 ************************/
1911int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1912 char *look_ahead, int la_len)
1913{
1914 struct sk_buff *skb;
1915
ebc06eeb 1916 pr_debug("entering mac_drv_rx_init(len=%d)\n", len);
1da177e4
LT
1917
1918 // "Received" a SMT or NSA frame of the local SMT.
1919
1920 if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
ebc06eeb
AB
1921 pr_debug("fddi: Discard invalid local SMT frame\n");
1922 pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
1da177e4
LT
1923 len, la_len, (unsigned long) look_ahead);
1924 return (0);
1925 }
1926 skb = alloc_skb(len + 3, GFP_ATOMIC);
1927 if (!skb) {
ebc06eeb 1928 pr_debug("fddi: Local SMT: skb memory exhausted.\n");
1da177e4
LT
1929 return (0);
1930 }
1931 skb_reserve(skb, 3);
1932 skb_put(skb, len);
27d7ff46 1933 skb_copy_to_linear_data(skb, look_ahead, len);
1da177e4
LT
1934
1935 // deliver frame to system
1936 skb->protocol = fddi_type_trans(skb, smc->os.dev);
1da177e4
LT
1937 netif_rx(skb);
1938
1939 return (0);
1940} // mac_drv_rx_init
1941
1942
1943/************************
1944 *
1945 * smt_timer_poll
1946 *
1947 * This routine is called periodically by the SMT module to clean up the
1948 * driver.
1949 *
1950 * Return any queued frames back to the upper protocol layers if the ring
1951 * is down.
1952 * Args
1953 * smc - A pointer to the SMT context struct.
1954 * Out
1955 * Nothing.
1956 *
1957 ************************/
1958void smt_timer_poll(struct s_smc *smc)
1959{
1960} // smt_timer_poll
1961
1962
1963/************************
1964 *
1965 * ring_status_indication
1966 *
1967 * This function indicates a change of the ring state.
1968 * Args
1969 * smc - A pointer to the SMT context struct.
1970 *
1971 * status - The current ring status.
1972 * Out
1973 * Nothing.
1974 *
1975 ************************/
1976void ring_status_indication(struct s_smc *smc, u_long status)
1977{
ebc06eeb 1978 pr_debug("ring_status_indication( ");
1da177e4 1979 if (status & RS_RES15)
ebc06eeb 1980 pr_debug("RS_RES15 ");
1da177e4 1981 if (status & RS_HARDERROR)
ebc06eeb 1982 pr_debug("RS_HARDERROR ");
1da177e4 1983 if (status & RS_SOFTERROR)
ebc06eeb 1984 pr_debug("RS_SOFTERROR ");
1da177e4 1985 if (status & RS_BEACON)
ebc06eeb 1986 pr_debug("RS_BEACON ");
1da177e4 1987 if (status & RS_PATHTEST)
ebc06eeb 1988 pr_debug("RS_PATHTEST ");
1da177e4 1989 if (status & RS_SELFTEST)
ebc06eeb 1990 pr_debug("RS_SELFTEST ");
1da177e4 1991 if (status & RS_RES9)
ebc06eeb 1992 pr_debug("RS_RES9 ");
1da177e4 1993 if (status & RS_DISCONNECT)
ebc06eeb 1994 pr_debug("RS_DISCONNECT ");
1da177e4 1995 if (status & RS_RES7)
ebc06eeb 1996 pr_debug("RS_RES7 ");
1da177e4 1997 if (status & RS_DUPADDR)
ebc06eeb 1998 pr_debug("RS_DUPADDR ");
1da177e4 1999 if (status & RS_NORINGOP)
ebc06eeb 2000 pr_debug("RS_NORINGOP ");
1da177e4 2001 if (status & RS_VERSION)
ebc06eeb 2002 pr_debug("RS_VERSION ");
1da177e4 2003 if (status & RS_STUCKBYPASSS)
ebc06eeb 2004 pr_debug("RS_STUCKBYPASSS ");
1da177e4 2005 if (status & RS_EVENT)
ebc06eeb 2006 pr_debug("RS_EVENT ");
1da177e4 2007 if (status & RS_RINGOPCHANGE)
ebc06eeb 2008 pr_debug("RS_RINGOPCHANGE ");
1da177e4 2009 if (status & RS_RES0)
ebc06eeb
AB
2010 pr_debug("RS_RES0 ");
2011 pr_debug("]\n");
1da177e4
LT
2012} // ring_status_indication
2013
2014
2015/************************
2016 *
2017 * smt_get_time
2018 *
2019 * Gets the current time from the system.
2020 * Args
2021 * None.
2022 * Out
2023 * The current time in TICKS_PER_SECOND.
2024 *
2025 * TICKS_PER_SECOND has the unit 'count of timer ticks per second'. It is
2026 * defined in "targetos.h". The definition of TICKS_PER_SECOND must comply
2027 * to the time returned by smt_get_time().
2028 *
2029 ************************/
2030unsigned long smt_get_time(void)
2031{
2032 return jiffies;
2033} // smt_get_time
2034
2035
2036/************************
2037 *
2038 * smt_stat_counter
2039 *
2040 * Status counter update (ring_op, fifo full).
2041 * Args
2042 * smc - A pointer to the SMT context struct.
2043 *
2044 * stat - = 0: A ring operational change occurred.
2045 * = 1: The FORMAC FIFO buffer is full / FIFO overflow.
2046 * Out
2047 * Nothing.
2048 *
2049 ************************/
2050void smt_stat_counter(struct s_smc *smc, int stat)
2051{
2052// BOOLEAN RingIsUp ;
2053
ebc06eeb 2054 pr_debug(KERN_INFO "smt_stat_counter\n");
1da177e4
LT
2055 switch (stat) {
2056 case 0:
ebc06eeb 2057 pr_debug(KERN_INFO "Ring operational change.\n");
1da177e4
LT
2058 break;
2059 case 1:
ebc06eeb 2060 pr_debug(KERN_INFO "Receive fifo overflow.\n");
1da177e4
LT
2061 smc->os.MacStat.gen.rx_errors++;
2062 break;
2063 default:
ebc06eeb 2064 pr_debug(KERN_INFO "Unknown status (%d).\n", stat);
1da177e4
LT
2065 break;
2066 }
2067} // smt_stat_counter
2068
2069
2070/************************
2071 *
2072 * cfm_state_change
2073 *
2074 * Sets CFM state in custom statistics.
2075 * Args
2076 * smc - A pointer to the SMT context struct.
2077 *
2078 * c_state - Possible values are:
2079 *
2080 * EC0_OUT, EC1_IN, EC2_TRACE, EC3_LEAVE, EC4_PATH_TEST,
2081 * EC5_INSERT, EC6_CHECK, EC7_DEINSERT
2082 * Out
2083 * Nothing.
2084 *
2085 ************************/
2086void cfm_state_change(struct s_smc *smc, int c_state)
2087{
2088#ifdef DRIVERDEBUG
2089 char *s;
2090
2091 switch (c_state) {
2092 case SC0_ISOLATED:
2093 s = "SC0_ISOLATED";
2094 break;
2095 case SC1_WRAP_A:
2096 s = "SC1_WRAP_A";
2097 break;
2098 case SC2_WRAP_B:
2099 s = "SC2_WRAP_B";
2100 break;
2101 case SC4_THRU_A:
2102 s = "SC4_THRU_A";
2103 break;
2104 case SC5_THRU_B:
2105 s = "SC5_THRU_B";
2106 break;
2107 case SC7_WRAP_S:
2108 s = "SC7_WRAP_S";
2109 break;
2110 case SC9_C_WRAP_A:
2111 s = "SC9_C_WRAP_A";
2112 break;
2113 case SC10_C_WRAP_B:
2114 s = "SC10_C_WRAP_B";
2115 break;
2116 case SC11_C_WRAP_S:
2117 s = "SC11_C_WRAP_S";
2118 break;
2119 default:
ebc06eeb 2120 pr_debug(KERN_INFO "cfm_state_change: unknown %d\n", c_state);
1da177e4
LT
2121 return;
2122 }
ebc06eeb 2123 pr_debug(KERN_INFO "cfm_state_change: %s\n", s);
1da177e4
LT
2124#endif // DRIVERDEBUG
2125} // cfm_state_change
2126
2127
2128/************************
2129 *
2130 * ecm_state_change
2131 *
2132 * Sets ECM state in custom statistics.
2133 * Args
2134 * smc - A pointer to the SMT context struct.
2135 *
2136 * e_state - Possible values are:
2137 *
2138 * SC0_ISOLATED, SC1_WRAP_A (5), SC2_WRAP_B (6), SC4_THRU_A (12),
2139 * SC5_THRU_B (7), SC7_WRAP_S (8)
2140 * Out
2141 * Nothing.
2142 *
2143 ************************/
2144void ecm_state_change(struct s_smc *smc, int e_state)
2145{
2146#ifdef DRIVERDEBUG
2147 char *s;
2148
2149 switch (e_state) {
2150 case EC0_OUT:
2151 s = "EC0_OUT";
2152 break;
2153 case EC1_IN:
2154 s = "EC1_IN";
2155 break;
2156 case EC2_TRACE:
2157 s = "EC2_TRACE";
2158 break;
2159 case EC3_LEAVE:
2160 s = "EC3_LEAVE";
2161 break;
2162 case EC4_PATH_TEST:
2163 s = "EC4_PATH_TEST";
2164 break;
2165 case EC5_INSERT:
2166 s = "EC5_INSERT";
2167 break;
2168 case EC6_CHECK:
2169 s = "EC6_CHECK";
2170 break;
2171 case EC7_DEINSERT:
2172 s = "EC7_DEINSERT";
2173 break;
2174 default:
2175 s = "unknown";
2176 break;
2177 }
ebc06eeb 2178 pr_debug(KERN_INFO "ecm_state_change: %s\n", s);
1da177e4
LT
2179#endif //DRIVERDEBUG
2180} // ecm_state_change
2181
2182
2183/************************
2184 *
2185 * rmt_state_change
2186 *
2187 * Sets RMT state in custom statistics.
2188 * Args
2189 * smc - A pointer to the SMT context struct.
2190 *
2191 * r_state - Possible values are:
2192 *
2193 * RM0_ISOLATED, RM1_NON_OP, RM2_RING_OP, RM3_DETECT,
2194 * RM4_NON_OP_DUP, RM5_RING_OP_DUP, RM6_DIRECTED, RM7_TRACE
2195 * Out
2196 * Nothing.
2197 *
2198 ************************/
2199void rmt_state_change(struct s_smc *smc, int r_state)
2200{
2201#ifdef DRIVERDEBUG
2202 char *s;
2203
2204 switch (r_state) {
2205 case RM0_ISOLATED:
2206 s = "RM0_ISOLATED";
2207 break;
2208 case RM1_NON_OP:
2209 s = "RM1_NON_OP - not operational";
2210 break;
2211 case RM2_RING_OP:
2212 s = "RM2_RING_OP - ring operational";
2213 break;
2214 case RM3_DETECT:
2215 s = "RM3_DETECT - detect dupl addresses";
2216 break;
2217 case RM4_NON_OP_DUP:
2218 s = "RM4_NON_OP_DUP - dupl. addr detected";
2219 break;
2220 case RM5_RING_OP_DUP:
2221 s = "RM5_RING_OP_DUP - ring oper. with dupl. addr";
2222 break;
2223 case RM6_DIRECTED:
2224 s = "RM6_DIRECTED - sending directed beacons";
2225 break;
2226 case RM7_TRACE:
2227 s = "RM7_TRACE - trace initiated";
2228 break;
2229 default:
2230 s = "unknown";
2231 break;
2232 }
ebc06eeb 2233 pr_debug(KERN_INFO "[rmt_state_change: %s]\n", s);
1da177e4
LT
2234#endif // DRIVERDEBUG
2235} // rmt_state_change
2236
2237
2238/************************
2239 *
2240 * drv_reset_indication
2241 *
2242 * This function is called by the SMT when it has detected a severe
2243 * hardware problem. The driver should perform a reset on the adapter
2244 * as soon as possible, but not from within this function.
2245 * Args
2246 * smc - A pointer to the SMT context struct.
2247 * Out
2248 * Nothing.
2249 *
2250 ************************/
2251void drv_reset_indication(struct s_smc *smc)
2252{
ebc06eeb 2253 pr_debug(KERN_INFO "entering drv_reset_indication\n");
1da177e4
LT
2254
2255 smc->os.ResetRequested = TRUE; // Set flag.
2256
2257} // drv_reset_indication
2258
2259static struct pci_driver skfddi_pci_driver = {
2260 .name = "skfddi",
2261 .id_table = skfddi_pci_tbl,
2262 .probe = skfp_init_one,
2263 .remove = __devexit_p(skfp_remove_one),
2264};
2265
2266static int __init skfd_init(void)
2267{
29917620 2268 return pci_register_driver(&skfddi_pci_driver);
1da177e4
LT
2269}
2270
2271static void __exit skfd_exit(void)
2272{
2273 pci_unregister_driver(&skfddi_pci_driver);
2274}
2275
2276module_init(skfd_init);
2277module_exit(skfd_exit);