2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 * Along with multiple RX queue, for CPU load balancing.
28 * - Decode register dump for ethtool.
31 #include <linux/version.h>
32 #include <linux/module.h>
33 #include <linux/kernel.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/crc32.h>
40 #include <linux/delay.h>
41 #include <linux/spinlock.h>
44 #include <linux/ipv6.h>
45 #include <linux/tcp.h>
46 #include <linux/udp.h>
47 #include <linux/if_vlan.h>
50 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
51 static struct net_device_stats *
52 jme_get_stats(struct net_device *netdev)
54 struct jme_adapter *jme = netdev_priv(netdev);
60 jme_mdio_read(struct net_device *netdev, int phy, int reg)
62 struct jme_adapter *jme = netdev_priv(netdev);
65 jwrite32(jme, JME_SMI, SMI_OP_REQ |
70 for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
72 val = jread32(jme, JME_SMI);
73 if ((val & SMI_OP_REQ) == 0)
78 jeprintk(netdev->name, "phy read timeout : %d\n", reg);
82 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
86 jme_mdio_write(struct net_device *netdev,
87 int phy, int reg, int val)
89 struct jme_adapter *jme = netdev_priv(netdev);
92 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
93 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
94 smi_phy_addr(phy) | smi_reg_addr(reg));
97 for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
99 val = jread32(jme, JME_SMI);
100 if ((val & SMI_OP_REQ) == 0)
105 jeprintk(netdev->name, "phy write timeout : %d\n", reg);
110 __always_inline static void
111 jme_reset_phy_processor(struct jme_adapter *jme)
115 jme_mdio_write(jme->dev,
117 MII_ADVERTISE, ADVERTISE_ALL |
118 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
120 jme_mdio_write(jme->dev,
123 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
125 val = jme_mdio_read(jme->dev,
129 jme_mdio_write(jme->dev,
131 MII_BMCR, val | BMCR_RESET);
137 jme_setup_wakeup_frame(struct jme_adapter *jme,
138 __u32 *mask, __u32 crc, int fnr)
145 jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
147 jwrite32(jme, JME_WFODP, crc);
153 for(i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
154 jwrite32(jme, JME_WFOI,
155 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
156 (fnr & WFOI_FRAME_SEL));
158 jwrite32(jme, JME_WFODP, mask[i]);
163 __always_inline static void
164 jme_reset_mac_processor(struct jme_adapter *jme)
166 __u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0,0,0,0};
167 __u32 crc = 0xCDCDCDCD;
170 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
172 jwrite32(jme, JME_GHC, jme->reg_ghc);
173 jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
174 jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
175 for(i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
176 jme_setup_wakeup_frame(jme, mask, crc, i);
177 jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
178 jwrite32(jme, JME_GPREG1, 0);
181 __always_inline static void
182 jme_clear_pm(struct jme_adapter *jme)
184 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
185 pci_set_power_state(jme->pdev, PCI_D0);
186 pci_enable_wake(jme->pdev, PCI_D0, false);
190 jme_reload_eeprom(struct jme_adapter *jme)
195 val = jread32(jme, JME_SMBCSR);
197 if(val & SMBCSR_EEPROMD)
200 jwrite32(jme, JME_SMBCSR, val);
201 val |= SMBCSR_RELOAD;
202 jwrite32(jme, JME_SMBCSR, val);
205 for (i = JME_SMB_TIMEOUT; i > 0; --i)
208 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
213 jeprintk(jme->dev->name, "eeprom reload timeout\n");
224 jme_load_macaddr(struct net_device *netdev)
226 struct jme_adapter *jme = netdev_priv(netdev);
227 unsigned char macaddr[6];
230 spin_lock(&jme->macaddr_lock);
231 val = jread32(jme, JME_RXUMA_LO);
232 macaddr[0] = (val >> 0) & 0xFF;
233 macaddr[1] = (val >> 8) & 0xFF;
234 macaddr[2] = (val >> 16) & 0xFF;
235 macaddr[3] = (val >> 24) & 0xFF;
236 val = jread32(jme, JME_RXUMA_HI);
237 macaddr[4] = (val >> 0) & 0xFF;
238 macaddr[5] = (val >> 8) & 0xFF;
239 memcpy(netdev->dev_addr, macaddr, 6);
240 spin_unlock(&jme->macaddr_lock);
243 __always_inline static void
244 jme_set_rx_pcc(struct jme_adapter *jme, int p)
248 jwrite32(jme, JME_PCCRX0,
249 ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
250 ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
253 jwrite32(jme, JME_PCCRX0,
254 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
255 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
258 jwrite32(jme, JME_PCCRX0,
259 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
260 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
263 jwrite32(jme, JME_PCCRX0,
264 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
265 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
272 if(!(jme->flags & JME_FLAG_POLL))
273 dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
277 jme_start_irq(struct jme_adapter *jme)
279 register struct dynpcc_info *dpi = &(jme->dpi);
281 jme_set_rx_pcc(jme, PCC_P1);
283 dpi->attempt = PCC_P1;
286 jwrite32(jme, JME_PCCTX,
287 ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
288 ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
295 jwrite32(jme, JME_IENS, INTR_ENABLE);
298 __always_inline static void
299 jme_stop_irq(struct jme_adapter *jme)
304 jwrite32(jme, JME_IENC, INTR_ENABLE);
308 __always_inline static void
309 jme_enable_shadow(struct jme_adapter *jme)
313 ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
316 __always_inline static void
317 jme_disable_shadow(struct jme_adapter *jme)
319 jwrite32(jme, JME_SHBA_LO, 0x0);
323 jme_check_link(struct net_device *netdev, int testonly)
325 struct jme_adapter *jme = netdev_priv(netdev);
326 __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
331 phylink = jread32(jme, JME_PHY_LINK);
333 if (phylink & PHY_LINK_UP) {
334 if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
336 * If we did not enable AN
337 * Speed/Duplex Info should be obtained from SMI
339 phylink = PHY_LINK_UP;
341 bmcr = jme_mdio_read(jme->dev,
346 phylink |= ((bmcr & BMCR_SPEED1000) &&
347 (bmcr & BMCR_SPEED100) == 0) ?
348 PHY_LINK_SPEED_1000M :
349 (bmcr & BMCR_SPEED100) ?
350 PHY_LINK_SPEED_100M :
353 phylink |= (bmcr & BMCR_FULLDPLX) ?
356 strcat(linkmsg, "Forced: ");
360 * Keep polling for speed/duplex resolve complete
362 while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
366 phylink = jread32(jme, JME_PHY_LINK);
371 jeprintk(netdev->name,
372 "Waiting speed resolve timeout.\n");
374 strcat(linkmsg, "ANed: ");
377 if(jme->phylink == phylink) {
384 jme->phylink = phylink;
386 switch(phylink & PHY_LINK_SPEED_MASK) {
387 case PHY_LINK_SPEED_10M:
389 strcat(linkmsg, "10 Mbps, ");
391 case PHY_LINK_SPEED_100M:
392 ghc = GHC_SPEED_100M;
393 strcat(linkmsg, "100 Mbps, ");
395 case PHY_LINK_SPEED_1000M:
396 ghc = GHC_SPEED_1000M;
397 strcat(linkmsg, "1000 Mbps, ");
403 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
405 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
409 if(phylink & PHY_LINK_MDI_STAT)
410 strcat(linkmsg, "MDI-X");
412 strcat(linkmsg, "MDI");
414 if(phylink & PHY_LINK_DUPLEX)
415 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
417 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
421 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
422 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
424 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
428 jwrite32(jme, JME_GHC, ghc);
430 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
431 netif_carrier_on(netdev);
437 jprintk(netdev->name, "Link is down.\n");
439 netif_carrier_off(netdev);
447 jme_setup_tx_resources(struct jme_adapter *jme)
449 struct jme_ring *txring = &(jme->txring[0]);
451 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
452 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
458 txring->dmaalloc = 0;
466 txring->desc = (void*)ALIGN((unsigned long)(txring->alloc),
468 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
469 txring->next_to_use = 0;
470 txring->next_to_clean = 0;
471 atomic_set(&txring->nr_free, jme->tx_ring_size);
474 * Initialize Transmit Descriptors
476 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
477 memset(txring->bufinf, 0,
478 sizeof(struct jme_buffer_info) * jme->tx_ring_size);
484 jme_free_tx_resources(struct jme_adapter *jme)
487 struct jme_ring *txring = &(jme->txring[0]);
488 struct jme_buffer_info *txbi = txring->bufinf;
491 for(i = 0 ; i < jme->tx_ring_size ; ++i) {
492 txbi = txring->bufinf + i;
494 dev_kfree_skb(txbi->skb);
502 dma_free_coherent(&(jme->pdev->dev),
503 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
507 txring->alloc = NULL;
509 txring->dmaalloc = 0;
512 txring->next_to_use = 0;
513 txring->next_to_clean = 0;
514 atomic_set(&txring->nr_free, 0);
518 __always_inline static void
519 jme_enable_tx_engine(struct jme_adapter *jme)
524 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
527 * Setup TX Queue 0 DMA Bass Address
529 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
530 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
531 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
534 * Setup TX Descptor Count
536 jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
542 jwrite32(jme, JME_TXCS, jme->reg_txcs |
548 __always_inline static void
549 jme_restart_tx_engine(struct jme_adapter *jme)
554 jwrite32(jme, JME_TXCS, jme->reg_txcs |
559 __always_inline static void
560 jme_disable_tx_engine(struct jme_adapter *jme)
568 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
570 val = jread32(jme, JME_TXCS);
571 for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
574 val = jread32(jme, JME_TXCS);
578 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
579 jme_reset_mac_processor(jme);
586 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
588 struct jme_ring *rxring = jme->rxring;
589 register volatile struct rxdesc* rxdesc = rxring->desc;
590 struct jme_buffer_info *rxbi = rxring->bufinf;
596 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
597 rxdesc->desc1.bufaddrl = cpu_to_le32(
598 (__u64)rxbi->mapping & 0xFFFFFFFFUL);
599 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len);
600 if(jme->dev->features & NETIF_F_HIGHDMA)
601 rxdesc->desc1.flags = RXFLAG_64BIT;
603 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
607 jme_make_new_rx_buf(struct jme_adapter *jme, int i)
609 struct jme_ring *rxring = &(jme->rxring[0]);
610 struct jme_buffer_info *rxbi = rxring->bufinf + i;
611 unsigned long offset;
614 skb = netdev_alloc_skb(jme->dev,
615 jme->dev->mtu + RX_EXTRA_LEN);
620 (unsigned long)(skb->data)
621 & ((unsigned long)RX_BUF_DMA_ALIGN - 1)))
622 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
625 rxbi->len = skb_tailroom(skb);
626 rxbi->mapping = pci_map_page(jme->pdev,
627 virt_to_page(skb->data),
628 offset_in_page(skb->data),
636 jme_free_rx_buf(struct jme_adapter *jme, int i)
638 struct jme_ring *rxring = &(jme->rxring[0]);
639 struct jme_buffer_info *rxbi = rxring->bufinf;
643 pci_unmap_page(jme->pdev,
647 dev_kfree_skb(rxbi->skb);
655 jme_free_rx_resources(struct jme_adapter *jme)
658 struct jme_ring *rxring = &(jme->rxring[0]);
661 for(i = 0 ; i < jme->rx_ring_size ; ++i)
662 jme_free_rx_buf(jme, i);
664 dma_free_coherent(&(jme->pdev->dev),
665 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
668 rxring->alloc = NULL;
670 rxring->dmaalloc = 0;
673 rxring->next_to_use = 0;
674 rxring->next_to_clean = 0;
678 jme_setup_rx_resources(struct jme_adapter *jme)
681 struct jme_ring *rxring = &(jme->rxring[0]);
683 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
684 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
689 rxring->dmaalloc = 0;
697 rxring->desc = (void*)ALIGN((unsigned long)(rxring->alloc),
699 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
700 rxring->next_to_use = 0;
701 rxring->next_to_clean = 0;
704 * Initiallize Receive Descriptors
706 for(i = 0 ; i < jme->rx_ring_size ; ++i) {
707 if(unlikely(jme_make_new_rx_buf(jme, i))) {
708 jme_free_rx_resources(jme);
712 jme_set_clean_rxdesc(jme, i);
718 __always_inline static void
719 jme_enable_rx_engine(struct jme_adapter *jme)
722 * Setup RX DMA Bass Address
724 jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
725 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
726 jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
729 * Setup RX Descriptor Count
731 jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
734 * Setup Unicast Filter
736 jme_set_multi(jme->dev);
742 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
748 __always_inline static void
749 jme_restart_rx_engine(struct jme_adapter *jme)
754 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
761 __always_inline static void
762 jme_disable_rx_engine(struct jme_adapter *jme)
770 jwrite32(jme, JME_RXCS, jme->reg_rxcs);
772 val = jread32(jme, JME_RXCS);
773 for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
776 val = jread32(jme, JME_RXCS);
780 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
785 jme_rxsum_ok(struct jme_adapter *jme, __u16 flags)
787 if(!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
790 if(unlikely((flags & RXWBFLAG_TCPON) &&
791 !(flags & RXWBFLAG_TCPCS))) {
792 csum_dbg(jme->dev->name, "TCP Checksum error.\n");
796 if(unlikely((flags & RXWBFLAG_UDPON) &&
797 !(flags & RXWBFLAG_UDPCS))) {
798 csum_dbg(jme->dev->name, "UDP Checksum error.\n");
802 if(unlikely((flags & RXWBFLAG_IPV4) &&
803 !(flags & RXWBFLAG_IPCS))) {
804 csum_dbg(jme->dev->name, "IPv4 Checksum error.\n");
812 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
814 struct jme_ring *rxring = &(jme->rxring[0]);
815 volatile struct rxdesc *rxdesc = rxring->desc;
816 struct jme_buffer_info *rxbi = rxring->bufinf;
824 pci_dma_sync_single_for_cpu(jme->pdev,
829 if(unlikely(jme_make_new_rx_buf(jme, idx))) {
830 pci_dma_sync_single_for_device(jme->pdev,
835 ++(NET_STAT(jme).rx_dropped);
838 framesize = le16_to_cpu(rxdesc->descwb.framesize)
841 skb_reserve(skb, RX_PREPAD_SIZE);
842 skb_put(skb, framesize);
843 skb->protocol = eth_type_trans(skb, jme->dev);
845 if(jme_rxsum_ok(jme, rxdesc->descwb.flags))
846 skb->ip_summed = CHECKSUM_UNNECESSARY;
848 skb->ip_summed = CHECKSUM_NONE;
851 if(rxdesc->descwb.flags & RXWBFLAG_TAGON) {
852 vlan_dbg(jme->dev->name, "VLAN: %04x\n",
853 rxdesc->descwb.vlan);
855 vlan_dbg(jme->dev->name,
856 "VLAN Passed to kernel.\n");
857 vlan_hwaccel_rx(skb, jme->vlgrp,
858 le32_to_cpu(rxdesc->descwb.vlan));
859 NET_STAT(jme).rx_bytes += 4;
866 if((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
868 ++(NET_STAT(jme).multicast);
870 jme->dev->last_rx = jiffies;
871 NET_STAT(jme).rx_bytes += framesize;
872 ++(NET_STAT(jme).rx_packets);
875 jme_set_clean_rxdesc(jme, idx);
882 jme_process_receive(struct jme_adapter *jme, int limit)
884 struct jme_ring *rxring = &(jme->rxring[0]);
885 volatile struct rxdesc *rxdesc = rxring->desc;
886 int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
888 if(unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
891 if(unlikely(atomic_read(&jme->link_changing) != 1))
894 if(unlikely(!netif_carrier_ok(jme->dev)))
897 i = rxring->next_to_clean;
900 rxdesc = rxring->desc;
903 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
904 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
907 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
909 rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
911 if(unlikely(desccnt > 1 ||
912 rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
914 if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
915 ++(NET_STAT(jme).rx_crc_errors);
916 else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
917 ++(NET_STAT(jme).rx_fifo_errors);
919 ++(NET_STAT(jme).rx_errors);
922 rx_dbg(jme->dev->name,
923 "RX: More than one(%d) descriptor, "
925 desccnt, le16_to_cpu(rxdesc->descwb.framesize));
926 limit -= desccnt - 1;
929 for(j = i, ccnt = desccnt ; ccnt-- ; ) {
930 jme_set_clean_rxdesc(jme, j);
931 j = (j + 1) & (mask);
936 jme_alloc_and_feed_skb(jme, i);
939 i = (i + desccnt) & (mask);
944 rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
945 rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
946 (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
949 rxring->next_to_clean = i;
952 atomic_inc(&jme->rx_cleaning);
954 return limit > 0 ? limit : 0;
959 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
961 if(likely(atmp == dpi->cur)) {
966 if(dpi->attempt == atmp) {
977 jme_dynamic_pcc(struct jme_adapter *jme)
979 register struct dynpcc_info *dpi = &(jme->dpi);
981 if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
982 jme_attempt_pcc(dpi, PCC_P3);
983 else if((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD
984 || dpi->intr_cnt > PCC_INTR_THRESHOLD)
985 jme_attempt_pcc(dpi, PCC_P2);
987 jme_attempt_pcc(dpi, PCC_P1);
989 if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
990 jme_set_rx_pcc(jme, dpi->attempt);
991 dpi->cur = dpi->attempt;
997 jme_start_pcc_timer(struct jme_adapter *jme)
999 struct dynpcc_info *dpi = &(jme->dpi);
1000 dpi->last_bytes = NET_STAT(jme).rx_bytes;
1001 dpi->last_pkts = NET_STAT(jme).rx_packets;
1003 jwrite32(jme, JME_TMCSR,
1004 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1007 __always_inline static void
1008 jme_stop_pcc_timer(struct jme_adapter *jme)
1010 jwrite32(jme, JME_TMCSR, 0);
1014 jme_pcc_tasklet(unsigned long arg)
1016 struct jme_adapter *jme = (struct jme_adapter*)arg;
1017 struct net_device *netdev = jme->dev;
1020 if(unlikely(!netif_carrier_ok(netdev) ||
1021 (atomic_read(&jme->link_changing) != 1)
1023 jme_stop_pcc_timer(jme);
1027 if(!(jme->flags & JME_FLAG_POLL))
1028 jme_dynamic_pcc(jme);
1030 jme_start_pcc_timer(jme);
1033 __always_inline static void
1034 jme_polling_mode(struct jme_adapter *jme)
1036 jme_set_rx_pcc(jme, PCC_OFF);
1039 __always_inline static void
1040 jme_interrupt_mode(struct jme_adapter *jme)
1042 jme_set_rx_pcc(jme, PCC_P1);
1046 jme_link_change_tasklet(unsigned long arg)
1048 struct jme_adapter *jme = (struct jme_adapter*)arg;
1049 struct net_device *netdev = jme->dev;
1050 int timeout = WAIT_TASKLET_TIMEOUT;
1053 if(!atomic_dec_and_test(&jme->link_changing))
1056 if(jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1059 jme->old_mtu = netdev->mtu;
1060 netif_stop_queue(netdev);
1062 while(--timeout > 0 &&
1064 atomic_read(&jme->rx_cleaning) != 1 ||
1065 atomic_read(&jme->tx_cleaning) != 1
1071 if(netif_carrier_ok(netdev)) {
1072 jme_stop_pcc_timer(jme);
1073 jme_reset_mac_processor(jme);
1074 jme_free_rx_resources(jme);
1075 jme_free_tx_resources(jme);
1077 if(jme->flags & JME_FLAG_POLL) {
1078 jme_polling_mode(jme);
1079 napi_disable(&jme->napi);
1083 jme_check_link(netdev, 0);
1084 if(netif_carrier_ok(netdev)) {
1085 rc = jme_setup_rx_resources(jme);
1087 jeprintk(netdev->name,
1088 "Allocating resources for RX error"
1089 ", Device STOPPED!\n");
1094 rc = jme_setup_tx_resources(jme);
1096 jeprintk(netdev->name,
1097 "Allocating resources for TX error"
1098 ", Device STOPPED!\n");
1099 goto err_out_free_rx_resources;
1102 jme_enable_rx_engine(jme);
1103 jme_enable_tx_engine(jme);
1105 netif_start_queue(netdev);
1107 if(jme->flags & JME_FLAG_POLL) {
1108 napi_enable(&jme->napi);
1109 jme_interrupt_mode(jme);
1112 jme_start_pcc_timer(jme);
1117 err_out_free_rx_resources:
1118 jme_free_rx_resources(jme);
1120 atomic_inc(&jme->link_changing);
1124 jme_rx_clean_tasklet(unsigned long arg)
1126 struct jme_adapter *jme = (struct jme_adapter*)arg;
1127 struct dynpcc_info *dpi = &(jme->dpi);
1129 jme_process_receive(jme, jme->rx_ring_size);
1135 jme_poll(struct napi_struct *napi, int budget)
1137 struct jme_adapter *jme = container_of(napi, struct jme_adapter, napi);
1138 struct net_device *netdev = jme->dev;
1141 rest = jme_process_receive(jme, budget);
1143 while(!atomic_dec_and_test(&jme->rx_empty)) {
1144 ++(NET_STAT(jme).rx_dropped);
1145 jme_restart_rx_engine(jme);
1147 atomic_inc(&jme->rx_empty);
1150 netif_rx_complete(netdev, napi);
1151 jme_interrupt_mode(jme);
1154 return budget - rest;
1158 jme_rx_empty_tasklet(unsigned long arg)
1160 struct jme_adapter *jme = (struct jme_adapter*)arg;
1162 if(unlikely(atomic_read(&jme->link_changing) != 1))
1165 if(unlikely(!netif_carrier_ok(jme->dev)))
1168 queue_dbg(jme->dev->name, "RX Queue Full!\n");
1170 jme_rx_clean_tasklet(arg);
1171 jme_restart_rx_engine(jme);
1175 jme_wake_queue_if_stopped(struct jme_adapter *jme)
1177 struct jme_ring *txring = jme->txring;
1180 if(unlikely(netif_queue_stopped(jme->dev) &&
1181 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1183 queue_dbg(jme->dev->name, "TX Queue Waked.\n");
1184 netif_wake_queue(jme->dev);
1191 jme_tx_clean_tasklet(unsigned long arg)
1193 struct jme_adapter *jme = (struct jme_adapter*)arg;
1194 struct jme_ring *txring = &(jme->txring[0]);
1195 volatile struct txdesc *txdesc = txring->desc;
1196 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1197 int i, j, cnt = 0, max, err, mask;
1199 if(unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1202 if(unlikely(atomic_read(&jme->link_changing) != 1))
1205 if(unlikely(!netif_carrier_ok(jme->dev)))
1208 max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1209 mask = jme->tx_ring_mask;
1211 tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
1213 for(i = txring->next_to_clean ; cnt < max ; ) {
1217 if(likely(ctxbi->skb &&
1218 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1220 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1222 tx_dbg(jme->dev->name,
1223 "Tx Tasklet: Clean %d+%d\n",
1226 for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
1227 ttxbi = txbi + ((i + j) & (mask));
1228 txdesc[(i + j) & (mask)].dw[0] = 0;
1230 pci_unmap_page(jme->pdev,
1239 dev_kfree_skb(ctxbi->skb);
1241 cnt += ctxbi->nr_desc;
1244 ++(NET_STAT(jme).tx_carrier_errors);
1246 ++(NET_STAT(jme).tx_packets);
1247 NET_STAT(jme).tx_bytes += ctxbi->len;
1255 tx_dbg(jme->dev->name,
1257 " Stopped due to no skb.\n");
1259 tx_dbg(jme->dev->name,
1261 "Stopped due to not done.\n");
1265 i = (i + ctxbi->nr_desc) & mask;
1270 tx_dbg(jme->dev->name,
1271 "Tx Tasklet: Stop %d Jiffies %lu\n",
1273 txring->next_to_clean = i;
1275 atomic_add(cnt, &txring->nr_free);
1277 jme_wake_queue_if_stopped(jme);
1280 atomic_inc(&jme->tx_cleaning);
1284 jme_intr_msi(struct jme_adapter *jme, __u32 intrstat)
1289 jwrite32f(jme, JME_IENC, INTR_ENABLE);
1292 * Write 1 clear interrupt status
1294 jwrite32f(jme, JME_IEVE, intrstat);
1296 if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1297 tasklet_schedule(&jme->linkch_task);
1301 if(intrstat & INTR_TMINTR)
1302 tasklet_schedule(&jme->pcc_task);
1304 if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
1305 tasklet_schedule(&jme->txclean_task);
1307 if(jme->flags & JME_FLAG_POLL) {
1308 if(intrstat & INTR_RX0EMP)
1309 atomic_inc(&jme->rx_empty);
1311 if((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1313 netif_rx_schedule_prep(jme->dev, &jme->napi))) {
1314 jme_polling_mode(jme);
1315 __netif_rx_schedule(jme->dev, &jme->napi);
1320 if(intrstat & INTR_RX0EMP)
1321 tasklet_schedule(&jme->rxempty_task);
1323 if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
1324 tasklet_schedule(&jme->rxclean_task);
1329 * Re-enable interrupt
1331 jwrite32f(jme, JME_IENS, INTR_ENABLE);
1337 jme_intr(int irq, void *dev_id)
1339 struct net_device *netdev = dev_id;
1340 struct jme_adapter *jme = netdev_priv(netdev);
1343 intrstat = jread32(jme, JME_IEVE);
1346 * Check if it's really an interrupt for us
1348 if(unlikely(intrstat == 0))
1352 * Check if the device still exist
1354 if(unlikely(intrstat == ~((typeof(intrstat))0)))
1357 jme_intr_msi(jme, intrstat);
1363 jme_msi(int irq, void *dev_id)
1365 struct net_device *netdev = dev_id;
1366 struct jme_adapter *jme = netdev_priv(netdev);
1369 pci_dma_sync_single_for_cpu(jme->pdev,
1371 sizeof(__u32) * SHADOW_REG_NR,
1372 PCI_DMA_FROMDEVICE);
1373 intrstat = jme->shadow_regs[SHADOW_IEVE];
1374 jme->shadow_regs[SHADOW_IEVE] = 0;
1376 jme_intr_msi(jme, intrstat);
1383 jme_reset_link(struct jme_adapter *jme)
1385 jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1389 jme_restart_an(struct jme_adapter *jme)
1392 unsigned long flags;
1394 spin_lock_irqsave(&jme->phy_lock, flags);
1395 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1396 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1397 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1398 spin_unlock_irqrestore(&jme->phy_lock, flags);
1402 jme_request_irq(struct jme_adapter *jme)
1405 struct net_device *netdev = jme->dev;
1406 irq_handler_t handler = jme_intr;
1407 int irq_flags = IRQF_SHARED;
1409 if (!pci_enable_msi(jme->pdev)) {
1410 jme->flags |= JME_FLAG_MSI;
1415 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1418 jeprintk(netdev->name,
1419 "Unable to request %s interrupt (return: %d)\n",
1420 jme->flags & JME_FLAG_MSI ? "MSI":"INTx", rc);
1422 if(jme->flags & JME_FLAG_MSI) {
1423 pci_disable_msi(jme->pdev);
1424 jme->flags &= ~JME_FLAG_MSI;
1428 netdev->irq = jme->pdev->irq;
1435 jme_free_irq(struct jme_adapter *jme)
1437 free_irq(jme->pdev->irq, jme->dev);
1438 if (jme->flags & JME_FLAG_MSI) {
1439 pci_disable_msi(jme->pdev);
1440 jme->flags &= ~JME_FLAG_MSI;
1441 jme->dev->irq = jme->pdev->irq;
1446 jme_open(struct net_device *netdev)
1448 struct jme_adapter *jme = netdev_priv(netdev);
1449 int rc, timeout = 100;
1454 atomic_read(&jme->link_changing) != 1 ||
1455 atomic_read(&jme->rx_cleaning) != 1 ||
1456 atomic_read(&jme->tx_cleaning) != 1
1467 jme_reset_mac_processor(jme);
1469 rc = jme_request_irq(jme);
1473 jme_enable_shadow(jme);
1476 if(jme->flags & JME_FLAG_SSET)
1477 jme_set_settings(netdev, &jme->old_ecmd);
1479 jme_reset_phy_processor(jme);
1481 jme_reset_link(jme);
1486 netif_stop_queue(netdev);
1487 netif_carrier_off(netdev);
1492 jme_set_100m_half(struct jme_adapter *jme)
1496 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1497 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1498 BMCR_SPEED1000 | BMCR_FULLDPLX);
1499 tmp |= BMCR_SPEED100;
1502 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1504 jwrite32(jme, JME_GHC, GHC_SPEED_100M);
1508 jme_phy_off(struct jme_adapter *jme)
1510 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1515 jme_close(struct net_device *netdev)
1517 struct jme_adapter *jme = netdev_priv(netdev);
1519 netif_stop_queue(netdev);
1520 netif_carrier_off(netdev);
1523 jme_disable_shadow(jme);
1526 if(jme->flags & JME_FLAG_POLL)
1527 napi_disable(&jme->napi);
1529 tasklet_kill(&jme->linkch_task);
1530 tasklet_kill(&jme->txclean_task);
1531 tasklet_kill(&jme->rxclean_task);
1532 tasklet_kill(&jme->rxempty_task);
1534 jme_reset_mac_processor(jme);
1535 jme_free_rx_resources(jme);
1536 jme_free_tx_resources(jme);
1544 jme_alloc_txdesc(struct jme_adapter *jme,
1545 struct sk_buff *skb)
1547 struct jme_ring *txring = jme->txring;
1548 int idx, nr_alloc, mask = jme->tx_ring_mask;
1550 idx = txring->next_to_use;
1551 nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1553 if(unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1556 atomic_sub(nr_alloc, &txring->nr_free);
1558 txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1564 jme_fill_tx_map(struct pci_dev *pdev,
1565 volatile struct txdesc *txdesc,
1566 struct jme_buffer_info *txbi,
1574 dmaaddr = pci_map_page(pdev,
1580 pci_dma_sync_single_for_device(pdev,
1587 txdesc->desc2.flags = TXFLAG_OWN;
1588 txdesc->desc2.flags |= (hidma)?TXFLAG_64BIT:0;
1589 txdesc->desc2.datalen = cpu_to_le16(len);
1590 txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
1591 txdesc->desc2.bufaddrl = cpu_to_le32(
1592 (__u64)dmaaddr & 0xFFFFFFFFUL);
1594 txbi->mapping = dmaaddr;
1599 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1601 struct jme_ring *txring = jme->txring;
1602 volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
1603 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
1604 __u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
1605 int i, nr_frags = skb_shinfo(skb)->nr_frags;
1606 int mask = jme->tx_ring_mask;
1607 struct skb_frag_struct *frag;
1610 for(i = 0 ; i < nr_frags ; ++i) {
1611 frag = &skb_shinfo(skb)->frags[i];
1612 ctxdesc = txdesc + ((idx + i + 2) & (mask));
1613 ctxbi = txbi + ((idx + i + 2) & (mask));
1615 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
1616 frag->page_offset, frag->size, hidma);
1619 len = skb_is_nonlinear(skb)?skb_headlen(skb):skb->len;
1620 ctxdesc = txdesc + ((idx + 1) & (mask));
1621 ctxbi = txbi + ((idx + 1) & (mask));
1622 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
1623 offset_in_page(skb->data), len, hidma);
1628 jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
1630 if(unlikely(skb_shinfo(skb)->gso_size &&
1631 skb_header_cloned(skb) &&
1632 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
1641 jme_tx_tso(struct sk_buff *skb,
1642 volatile __u16 *mss, __u8 *flags)
1644 if((*mss = (skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT))) {
1645 *flags |= TXFLAG_LSEN;
1647 if(skb->protocol == __constant_htons(ETH_P_IP)) {
1648 struct iphdr *iph = ip_hdr(skb);
1651 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1657 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1659 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
1672 jme_tx_csum(struct sk_buff *skb, __u8 *flags)
1674 if(skb->ip_summed == CHECKSUM_PARTIAL) {
1677 switch (skb->protocol) {
1678 case __constant_htons(ETH_P_IP):
1679 ip_proto = ip_hdr(skb)->protocol;
1681 case __constant_htons(ETH_P_IPV6):
1682 ip_proto = ipv6_hdr(skb)->nexthdr;
1691 *flags |= TXFLAG_TCPCS;
1694 *flags |= TXFLAG_UDPCS;
1697 jeprintk("jme", "Error upper layer protocol.\n");
1703 __always_inline static void
1704 jme_tx_vlan(struct sk_buff *skb, volatile __u16 *vlan, __u8 *flags)
1706 if(vlan_tx_tag_present(skb)) {
1707 vlan_dbg("jme", "Tag found!(%04x)\n", vlan_tx_tag_get(skb));
1708 *flags |= TXFLAG_TAGON;
1709 *vlan = vlan_tx_tag_get(skb);
1714 jme_fill_first_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1716 struct jme_ring *txring = jme->txring;
1717 volatile struct txdesc *txdesc;
1718 struct jme_buffer_info *txbi;
1721 txdesc = (volatile struct txdesc*)txring->desc + idx;
1722 txbi = txring->bufinf + idx;
1728 txdesc->desc1.pktsize = cpu_to_le16(skb->len);
1730 * Set OWN bit at final.
1731 * When kernel transmit faster than NIC.
1732 * And NIC trying to send this descriptor before we tell
1733 * it to start sending this TX queue.
1734 * Other fields are already filled correctly.
1737 flags = TXFLAG_OWN | TXFLAG_INT;
1738 //Set checksum flags while not tso
1739 if(jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
1740 jme_tx_csum(skb, &flags);
1741 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
1742 txdesc->desc1.flags = flags;
1744 * Set tx buffer info after telling NIC to send
1745 * For better tx_clean timing
1748 txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
1750 txbi->len = skb->len;
1756 jme_stop_queue_if_full(struct jme_adapter *jme)
1758 struct jme_ring *txring = jme->txring;
1761 if(unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
1762 netif_stop_queue(jme->dev);
1763 queue_dbg(jme->dev->name, "TX Queue Paused.\n");
1765 if (atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold)) {
1766 netif_wake_queue(jme->dev);
1767 queue_dbg(jme->dev->name, "TX Queue Fast Waked.\n");
1774 * This function is already protected by netif_tx_lock()
1777 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1779 struct jme_adapter *jme = netdev_priv(netdev);
1782 if(skb_shinfo(skb)->nr_frags) {
1783 tx_dbg(netdev->name, "Frags: %d Headlen: %d Len: %d MSS: %d Sum:%d\n",
1784 skb_shinfo(skb)->nr_frags,
1787 skb_shinfo(skb)->gso_size,
1791 if(unlikely(jme_expand_header(jme, skb))) {
1792 ++(NET_STAT(jme).tx_dropped);
1793 return NETDEV_TX_OK;
1796 idx = jme_alloc_txdesc(jme, skb);
1798 if(unlikely(idx<0)) {
1799 netif_stop_queue(netdev);
1800 jeprintk(netdev->name,
1801 "BUG! Tx ring full when queue awake!\n");
1803 return NETDEV_TX_BUSY;
1806 jme_map_tx_skb(jme, skb, idx);
1807 jme_fill_first_tx_desc(jme, skb, idx);
1809 tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, skb_shinfo(skb)->nr_frags + 2);
1811 jwrite32(jme, JME_TXCS, jme->reg_txcs |
1812 TXCS_SELECT_QUEUE0 |
1815 netdev->trans_start = jiffies;
1817 jme_stop_queue_if_full(jme);
1819 return NETDEV_TX_OK;
1823 jme_set_macaddr(struct net_device *netdev, void *p)
1825 struct jme_adapter *jme = netdev_priv(netdev);
1826 struct sockaddr *addr = p;
1829 if(netif_running(netdev))
1832 spin_lock(&jme->macaddr_lock);
1833 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1835 val = addr->sa_data[3] << 24 |
1836 addr->sa_data[2] << 16 |
1837 addr->sa_data[1] << 8 |
1839 jwrite32(jme, JME_RXUMA_LO, val);
1840 val = addr->sa_data[5] << 8 |
1842 jwrite32(jme, JME_RXUMA_HI, val);
1843 spin_unlock(&jme->macaddr_lock);
1849 jme_set_multi(struct net_device *netdev)
1851 struct jme_adapter *jme = netdev_priv(netdev);
1852 u32 mc_hash[2] = {};
1854 unsigned long flags;
1856 spin_lock_irqsave(&jme->rxmcs_lock, flags);
1858 jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
1860 if (netdev->flags & IFF_PROMISC) {
1861 jme->reg_rxmcs |= RXMCS_ALLFRAME;
1863 else if (netdev->flags & IFF_ALLMULTI) {
1864 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
1866 else if(netdev->flags & IFF_MULTICAST) {
1867 struct dev_mc_list *mclist;
1870 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
1871 for (i = 0, mclist = netdev->mc_list;
1872 mclist && i < netdev->mc_count;
1873 ++i, mclist = mclist->next) {
1875 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1876 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
1879 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1880 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
1884 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1886 spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
1890 jme_change_mtu(struct net_device *netdev, int new_mtu)
1892 struct jme_adapter *jme = netdev_priv(netdev);
1894 if(new_mtu == jme->old_mtu)
1897 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
1898 ((new_mtu) < IPV6_MIN_MTU))
1901 if(new_mtu > 4000) {
1902 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1903 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
1904 jme_restart_rx_engine(jme);
1907 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1908 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
1909 jme_restart_rx_engine(jme);
1912 if(new_mtu > 1900) {
1913 netdev->features &= ~(NETIF_F_HW_CSUM |
1918 if(jme->flags & JME_FLAG_TXCSUM)
1919 netdev->features |= NETIF_F_HW_CSUM;
1920 if(jme->flags & JME_FLAG_TSO)
1921 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
1924 netdev->mtu = new_mtu;
1925 jme_reset_link(jme);
1931 jme_tx_timeout(struct net_device *netdev)
1933 struct jme_adapter *jme = netdev_priv(netdev);
1937 * And the link change will reinitialize all RX/TX resources
1940 jme_reset_link(jme);
1944 jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1946 struct jme_adapter *jme = netdev_priv(netdev);
1952 jme_get_drvinfo(struct net_device *netdev,
1953 struct ethtool_drvinfo *info)
1955 struct jme_adapter *jme = netdev_priv(netdev);
1957 strcpy(info->driver, DRV_NAME);
1958 strcpy(info->version, DRV_VERSION);
1959 strcpy(info->bus_info, pci_name(jme->pdev));
1963 jme_get_regs_len(struct net_device *netdev)
1969 mmapio_memcpy(struct jme_adapter *jme, __u32 *p, __u32 reg, int len)
1973 for(i = 0 ; i < len ; i += 4)
1974 p[i >> 2] = jread32(jme, reg + i);
1979 jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
1981 struct jme_adapter *jme = netdev_priv(netdev);
1982 __u32 *p32 = (__u32*)p;
1984 memset(p, 0, 0x400);
1987 mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
1990 mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
1993 mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
1996 mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
2001 jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2003 struct jme_adapter *jme = netdev_priv(netdev);
2005 if(jme->flags & JME_FLAG_POLL)
2006 ecmd->use_adaptive_rx_coalesce = false;
2008 ecmd->use_adaptive_rx_coalesce = true;
2010 ecmd->tx_coalesce_usecs = PCC_TX_TO;
2011 ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
2013 switch(jme->dpi.cur) {
2015 ecmd->rx_coalesce_usecs = PCC_P1_TO;
2016 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
2019 ecmd->rx_coalesce_usecs = PCC_P2_TO;
2020 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
2023 ecmd->rx_coalesce_usecs = PCC_P3_TO;
2024 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
2034 jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2036 struct jme_adapter *jme = netdev_priv(netdev);
2037 struct dynpcc_info *dpi = &(jme->dpi);
2039 if(ecmd->use_adaptive_rx_coalesce
2040 && (jme->flags & JME_FLAG_POLL)) {
2041 jme->flags &= ~JME_FLAG_POLL;
2042 napi_disable(&jme->napi);
2044 dpi->attempt = PCC_P1;
2046 jme_set_rx_pcc(jme, PCC_P1);
2047 jme_interrupt_mode(jme);
2049 else if(!(ecmd->use_adaptive_rx_coalesce)
2050 && !(jme->flags & JME_FLAG_POLL)) {
2051 jme->flags |= JME_FLAG_POLL;
2052 napi_enable(&jme->napi);
2053 jme_interrupt_mode(jme);
2060 jme_get_pauseparam(struct net_device *netdev,
2061 struct ethtool_pauseparam *ecmd)
2063 struct jme_adapter *jme = netdev_priv(netdev);
2064 unsigned long flags;
2067 ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
2068 ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
2070 spin_lock_irqsave(&jme->phy_lock, flags);
2071 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2072 spin_unlock_irqrestore(&jme->phy_lock, flags);
2075 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2079 jme_set_pauseparam(struct net_device *netdev,
2080 struct ethtool_pauseparam *ecmd)
2082 struct jme_adapter *jme = netdev_priv(netdev);
2083 unsigned long flags;
2086 if( ((jme->reg_txpfc & TXPFC_PF_EN) != 0) !=
2087 (ecmd->tx_pause != 0)) {
2090 jme->reg_txpfc |= TXPFC_PF_EN;
2092 jme->reg_txpfc &= ~TXPFC_PF_EN;
2094 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2097 spin_lock_irqsave(&jme->rxmcs_lock, flags);
2098 if( ((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) !=
2099 (ecmd->rx_pause != 0)) {
2102 jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2104 jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2106 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2108 spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
2110 spin_lock_irqsave(&jme->phy_lock, flags);
2111 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2112 if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) !=
2113 (ecmd->autoneg != 0)) {
2116 val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2118 val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2120 jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2121 MII_ADVERTISE, val);
2123 spin_unlock_irqrestore(&jme->phy_lock, flags);
2129 jme_get_wol(struct net_device *netdev,
2130 struct ethtool_wolinfo *wol)
2132 struct jme_adapter *jme = netdev_priv(netdev);
2134 wol->supported = WAKE_MAGIC | WAKE_PHY;
2138 if(jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2139 wol->wolopts |= WAKE_PHY;
2141 if(jme->reg_pmcs & PMCS_MFEN)
2142 wol->wolopts |= WAKE_MAGIC;
2147 jme_set_wol(struct net_device *netdev,
2148 struct ethtool_wolinfo *wol)
2150 struct jme_adapter *jme = netdev_priv(netdev);
2152 if(wol->wolopts & (WAKE_MAGICSECURE |
2161 if(wol->wolopts & WAKE_PHY)
2162 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2164 if(wol->wolopts & WAKE_MAGIC)
2165 jme->reg_pmcs |= PMCS_MFEN;
2172 jme_get_settings(struct net_device *netdev,
2173 struct ethtool_cmd *ecmd)
2175 struct jme_adapter *jme = netdev_priv(netdev);
2177 unsigned long flags;
2179 spin_lock_irqsave(&jme->phy_lock, flags);
2180 rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
2181 spin_unlock_irqrestore(&jme->phy_lock, flags);
2186 jme_set_settings(struct net_device *netdev,
2187 struct ethtool_cmd *ecmd)
2189 struct jme_adapter *jme = netdev_priv(netdev);
2191 unsigned long flags;
2193 if(ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
2196 if(jme->mii_if.force_media &&
2197 ecmd->autoneg != AUTONEG_ENABLE &&
2198 (jme->mii_if.full_duplex != ecmd->duplex))
2201 spin_lock_irqsave(&jme->phy_lock, flags);
2202 rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
2203 spin_unlock_irqrestore(&jme->phy_lock, flags);
2206 jme_reset_link(jme);
2209 jme->flags |= JME_FLAG_SSET;
2210 jme->old_ecmd = *ecmd;
2217 jme_get_link(struct net_device *netdev)
2219 struct jme_adapter *jme = netdev_priv(netdev);
2220 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2224 jme_get_rx_csum(struct net_device *netdev)
2226 struct jme_adapter *jme = netdev_priv(netdev);
2228 return jme->reg_rxmcs & RXMCS_CHECKSUM;
2232 jme_set_rx_csum(struct net_device *netdev, u32 on)
2234 struct jme_adapter *jme = netdev_priv(netdev);
2235 unsigned long flags;
2237 spin_lock_irqsave(&jme->rxmcs_lock, flags);
2239 jme->reg_rxmcs |= RXMCS_CHECKSUM;
2241 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2242 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2243 spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
2249 jme_set_tx_csum(struct net_device *netdev, u32 on)
2251 struct jme_adapter *jme = netdev_priv(netdev);
2254 jme->flags |= JME_FLAG_TXCSUM;
2255 if(netdev->mtu <= 1900)
2256 netdev->features |= NETIF_F_HW_CSUM;
2259 jme->flags &= ~JME_FLAG_TXCSUM;
2260 netdev->features &= ~NETIF_F_HW_CSUM;
2267 jme_set_tso(struct net_device *netdev, u32 on)
2269 struct jme_adapter *jme = netdev_priv(netdev);
2272 jme->flags |= JME_FLAG_TSO;
2273 if(netdev->mtu <= 1900)
2274 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2277 jme->flags &= ~JME_FLAG_TSO;
2278 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2285 jme_nway_reset(struct net_device *netdev)
2287 struct jme_adapter *jme = netdev_priv(netdev);
2288 jme_restart_an(jme);
2292 static const struct ethtool_ops jme_ethtool_ops = {
2293 .get_drvinfo = jme_get_drvinfo,
2294 .get_regs_len = jme_get_regs_len,
2295 .get_regs = jme_get_regs,
2296 .get_coalesce = jme_get_coalesce,
2297 .set_coalesce = jme_set_coalesce,
2298 .get_pauseparam = jme_get_pauseparam,
2299 .set_pauseparam = jme_set_pauseparam,
2300 .get_wol = jme_get_wol,
2301 .set_wol = jme_set_wol,
2302 .get_settings = jme_get_settings,
2303 .set_settings = jme_set_settings,
2304 .get_link = jme_get_link,
2305 .get_rx_csum = jme_get_rx_csum,
2306 .set_rx_csum = jme_set_rx_csum,
2307 .set_tx_csum = jme_set_tx_csum,
2308 .set_tso = jme_set_tso,
2309 .set_sg = ethtool_op_set_sg,
2310 .nway_reset = jme_nway_reset,
2314 jme_pci_dma64(struct pci_dev *pdev)
2316 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
2317 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
2318 dprintk("jme", "64Bit DMA Selected.\n");
2322 if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
2323 if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK)) {
2324 dprintk("jme", "40Bit DMA Selected.\n");
2328 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
2329 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
2330 dprintk("jme", "32Bit DMA Selected.\n");
2337 __always_inline static void
2338 jme_set_phy_ps(struct jme_adapter *jme)
2340 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, 0x00001000);
2343 static int __devinit
2344 jme_init_one(struct pci_dev *pdev,
2345 const struct pci_device_id *ent)
2347 int rc = 0, using_dac;
2348 struct net_device *netdev;
2349 struct jme_adapter *jme;
2352 * set up PCI device basics
2354 rc = pci_enable_device(pdev);
2356 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
2360 using_dac = jme_pci_dma64(pdev);
2362 printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
2364 goto err_out_disable_pdev;
2367 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2368 printk(KERN_ERR PFX "No PCI resource region found.\n");
2370 goto err_out_disable_pdev;
2373 rc = pci_request_regions(pdev, DRV_NAME);
2375 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
2376 goto err_out_disable_pdev;
2379 pci_set_master(pdev);
2382 * alloc and init net device
2384 netdev = alloc_etherdev(sizeof(*jme));
2386 printk(KERN_ERR PFX "Cannot allocate netdev structure.\n");
2388 goto err_out_release_regions;
2390 netdev->open = jme_open;
2391 netdev->stop = jme_close;
2392 netdev->hard_start_xmit = jme_start_xmit;
2393 netdev->set_mac_address = jme_set_macaddr;
2394 netdev->set_multicast_list = jme_set_multi;
2395 netdev->change_mtu = jme_change_mtu;
2396 netdev->ethtool_ops = &jme_ethtool_ops;
2397 netdev->tx_timeout = jme_tx_timeout;
2398 netdev->watchdog_timeo = TX_TIMEOUT;
2399 netdev->vlan_rx_register = jme_vlan_rx_register;
2400 NETDEV_GET_STATS(netdev, &jme_get_stats);
2401 netdev->features = NETIF_F_HW_CSUM |
2405 NETIF_F_HW_VLAN_TX |
2408 netdev->features |= NETIF_F_HIGHDMA;
2410 SET_NETDEV_DEV(netdev, &pdev->dev);
2411 pci_set_drvdata(pdev, netdev);
2416 jme = netdev_priv(netdev);
2419 jme->old_mtu = netdev->mtu = 1500;
2421 jme->tx_ring_size = 1 << 10;
2422 jme->tx_ring_mask = jme->tx_ring_size - 1;
2423 jme->tx_wake_threshold = 1 << 9;
2424 jme->rx_ring_size = 1 << 9;
2425 jme->rx_ring_mask = jme->rx_ring_size - 1;
2426 jme->regs = ioremap(pci_resource_start(pdev, 0),
2427 pci_resource_len(pdev, 0));
2429 printk(KERN_ERR PFX "Mapping PCI resource region error.\n");
2431 goto err_out_free_netdev;
2433 jme->shadow_regs = pci_alloc_consistent(pdev,
2434 sizeof(__u32) * SHADOW_REG_NR,
2435 &(jme->shadow_dma));
2436 if (!(jme->shadow_regs)) {
2437 printk(KERN_ERR PFX "Allocating shadow register mapping error.\n");
2442 netif_napi_add(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2);
2444 spin_lock_init(&jme->phy_lock);
2445 spin_lock_init(&jme->macaddr_lock);
2446 spin_lock_init(&jme->rxmcs_lock);
2448 atomic_set(&jme->link_changing, 1);
2449 atomic_set(&jme->rx_cleaning, 1);
2450 atomic_set(&jme->tx_cleaning, 1);
2451 atomic_set(&jme->rx_empty, 1);
2453 tasklet_init(&jme->pcc_task,
2455 (unsigned long) jme);
2456 tasklet_init(&jme->linkch_task,
2457 &jme_link_change_tasklet,
2458 (unsigned long) jme);
2459 tasklet_init(&jme->txclean_task,
2460 &jme_tx_clean_tasklet,
2461 (unsigned long) jme);
2462 tasklet_init(&jme->rxclean_task,
2463 &jme_rx_clean_tasklet,
2464 (unsigned long) jme);
2465 tasklet_init(&jme->rxempty_task,
2466 &jme_rx_empty_tasklet,
2467 (unsigned long) jme);
2468 jme->mii_if.dev = netdev;
2469 jme->mii_if.phy_id = 1;
2470 jme->mii_if.supports_gmii = 1;
2471 jme->mii_if.mdio_read = jme_mdio_read;
2472 jme->mii_if.mdio_write = jme_mdio_write;
2474 jme->dpi.cur = PCC_P1;
2476 jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
2477 jme->reg_rxcs = RXCS_DEFAULT;
2478 jme->reg_rxmcs = RXMCS_DEFAULT;
2480 jme->reg_pmcs = PMCS_LFEN | PMCS_LREN | PMCS_MFEN;
2481 jme->flags = JME_FLAG_TXCSUM | JME_FLAG_TSO | JME_FLAG_POLL;
2484 * Get Max Read Req Size from PCI Config Space
2486 pci_read_config_byte(pdev, PCI_CONF_DCSR_MRRS, &jme->mrrs);
2489 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
2492 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
2495 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
2501 * Reset MAC processor and reload EEPROM for MAC Address
2504 jme_set_phy_ps(jme);
2506 jme_reset_mac_processor(jme);
2507 rc = jme_reload_eeprom(jme);
2510 "Reload eeprom for reading MAC Address error.\n");
2511 goto err_out_free_shadow;
2513 jme_load_macaddr(netdev);
2517 * Tell stack that we are not ready to work until open()
2519 netif_carrier_off(netdev);
2520 netif_stop_queue(netdev);
2525 rc = register_netdev(netdev);
2527 printk(KERN_ERR PFX "Cannot register net device.\n");
2528 goto err_out_free_shadow;
2531 jprintk(netdev->name,
2532 "JMC250 gigabit eth %02x:%02x:%02x:%02x:%02x:%02x\n",
2533 netdev->dev_addr[0],
2534 netdev->dev_addr[1],
2535 netdev->dev_addr[2],
2536 netdev->dev_addr[3],
2537 netdev->dev_addr[4],
2538 netdev->dev_addr[5]);
2542 err_out_free_shadow:
2543 pci_free_consistent(pdev,
2544 sizeof(__u32) * SHADOW_REG_NR,
2549 err_out_free_netdev:
2550 pci_set_drvdata(pdev, NULL);
2551 free_netdev(netdev);
2552 err_out_release_regions:
2553 pci_release_regions(pdev);
2554 err_out_disable_pdev:
2555 pci_disable_device(pdev);
2560 static void __devexit
2561 jme_remove_one(struct pci_dev *pdev)
2563 struct net_device *netdev = pci_get_drvdata(pdev);
2564 struct jme_adapter *jme = netdev_priv(netdev);
2566 unregister_netdev(netdev);
2567 pci_free_consistent(pdev,
2568 sizeof(__u32) * SHADOW_REG_NR,
2572 pci_set_drvdata(pdev, NULL);
2573 free_netdev(netdev);
2574 pci_release_regions(pdev);
2575 pci_disable_device(pdev);
2580 jme_suspend(struct pci_dev *pdev, pm_message_t state)
2582 struct net_device *netdev = pci_get_drvdata(pdev);
2583 struct jme_adapter *jme = netdev_priv(netdev);
2586 atomic_dec(&jme->link_changing);
2588 netif_device_detach(netdev);
2589 netif_stop_queue(netdev);
2593 while(--timeout > 0 &&
2595 atomic_read(&jme->rx_cleaning) != 1 ||
2596 atomic_read(&jme->tx_cleaning) != 1
2601 jeprintk(netdev->name, "Waiting tasklets timeout.\n");
2604 jme_disable_shadow(jme);
2606 if(netif_carrier_ok(netdev)) {
2607 jme_stop_pcc_timer(jme);
2608 jme_reset_mac_processor(jme);
2609 jme_free_rx_resources(jme);
2610 jme_free_tx_resources(jme);
2611 netif_carrier_off(netdev);
2614 if(jme->flags & JME_FLAG_POLL) {
2615 jme_polling_mode(jme);
2616 napi_disable(&jme->napi);
2621 pci_save_state(pdev);
2623 jme_set_100m_half(jme);
2624 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2625 pci_enable_wake(pdev, PCI_D3hot, true);
2626 pci_enable_wake(pdev, PCI_D3cold, true);
2630 pci_enable_wake(pdev, PCI_D3hot, false);
2631 pci_enable_wake(pdev, PCI_D3cold, false);
2633 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2639 jme_resume(struct pci_dev *pdev)
2641 struct net_device *netdev = pci_get_drvdata(pdev);
2642 struct jme_adapter *jme = netdev_priv(netdev);
2645 pci_restore_state(pdev);
2647 if(jme->flags & JME_FLAG_SSET)
2648 jme_set_settings(netdev, &jme->old_ecmd);
2650 jme_reset_phy_processor(jme);
2652 jme_reset_mac_processor(jme);
2653 jme_enable_shadow(jme);
2654 jme_request_irq(jme);
2656 netif_device_attach(netdev);
2658 atomic_inc(&jme->link_changing);
2660 jme_reset_link(jme);
2665 static struct pci_device_id jme_pci_tbl[] = {
2666 { PCI_VDEVICE(JMICRON, 0x250) },
2670 static struct pci_driver jme_driver = {
2672 .id_table = jme_pci_tbl,
2673 .probe = jme_init_one,
2674 .remove = __devexit_p(jme_remove_one),
2676 .suspend = jme_suspend,
2677 .resume = jme_resume,
2678 #endif /* CONFIG_PM */
2682 jme_init_module(void)
2684 printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
2685 "driver version %s\n", DRV_VERSION);
2686 return pci_register_driver(&jme_driver);
2690 jme_cleanup_module(void)
2692 pci_unregister_driver(&jme_driver);
2695 module_init(jme_init_module);
2696 module_exit(jme_cleanup_module);
2698 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
2699 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
2700 MODULE_LICENSE("GPL");
2701 MODULE_VERSION(DRV_VERSION);
2702 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);