2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 * Timeline before release:
24 * Stage 1: Basic Performance / Capbility fine tune.
25 * - Implement PCC -- Dynamic adjustment.
26 * - Use NAPI instead of rx_tasklet?
27 * PCC Support Both Packet Counter and Timeout Interrupt for
28 * receive and transmit complete, does NAPI really needed?
29 * I'll add NAPI support anyway..
30 * For CPU busy and heavy network loading system..
31 * - Try setting 64bit DMA with pci_set[_consistent]_dma_mask
32 * and set netdev feature flag.
33 * (Need to modity transmit descriptor filling policy as well)
34 * - Use pci_map_page instead of pci_map_single for HIGHMEM support
36 * Stage 2: Error handling.
40 * Stage 3: Basic offloading support.
41 * - Implement scatter-gather offloading.
42 * A system page per RX (buffer|descriptor)?
43 * Handle fraged sk_buff to TX descriptors.
44 * - Implement tx/rx ipv6/ip/tcp/udp checksum offloading
46 * Stage 4: Basic feature support.
47 * - Implement Power Managemt related functions.
48 * - Implement Jumboframe.
51 * Stage 5: Advanced offloading support.
52 * - Implement VLAN offloading.
53 * - Implement TCP Segement offloading.
55 * Stage 6: CPU Load balancing.
57 * Along with multiple RX queue, for CPU load balancing.
58 * - Use Multiple TX Queue for Multiple CPU Transmit
59 * Simultaneously Without Lock.
62 * - Cleanup/re-orginize code, performence tuneing(alignment etc...).
63 * - Test and Release 1.0
66 #include <linux/version.h>
67 #include <linux/module.h>
68 #include <linux/kernel.h>
69 #include <linux/pci.h>
70 #include <linux/netdevice.h>
71 #include <linux/etherdevice.h>
72 #include <linux/ethtool.h>
73 #include <linux/mii.h>
74 #include <linux/crc32.h>
75 #include <linux/delay.h>
78 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
79 static struct net_device_stats *jme_get_stats(struct net_device *netdev)
81 struct jme_adapter *jme = netdev_priv(netdev);
86 static int jme_mdio_read(struct net_device *netdev, int phy, int reg)
88 struct jme_adapter *jme = netdev_priv(netdev);
91 jwrite32(jme, JME_SMI, SMI_OP_REQ |
96 for (i = JME_PHY_TIMEOUT; i > 0; --i) {
98 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
103 jeprintk(netdev->name, "phy read timeout : %d\n", reg);
107 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
110 static void jme_mdio_write(struct net_device *netdev, int phy, int reg, int val)
112 struct jme_adapter *jme = netdev_priv(netdev);
115 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
116 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
117 smi_phy_addr(phy) | smi_reg_addr(reg));
120 for (i = JME_PHY_TIMEOUT; i > 0; --i)
123 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
128 jeprintk(netdev->name, "phy write timeout : %d\n", reg);
133 static void jme_reset_mac_processor(struct jme_adapter *jme)
137 val = jread32(jme, JME_GHC);
139 jwrite32(jme, JME_GHC, val);
142 jwrite32(jme, JME_GHC, val);
143 jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
144 jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
145 jwrite32(jme, JME_WFODP, 0);
146 jwrite32(jme, JME_WFOI, 0);
147 jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
148 jwrite32(jme, JME_GPREG1, 0);
151 __always_inline static void jme_clear_pm(struct jme_adapter *jme)
153 jwrite32(jme, JME_PMCS, 0xFFFF0000);
154 pci_set_power_state(jme->pdev, PCI_D0);
157 static int jme_reload_eeprom(struct jme_adapter *jme)
162 val = jread32(jme, JME_SMBCSR);
164 if(val & SMBCSR_EEPROMD)
167 jwrite32(jme, JME_SMBCSR, val);
168 val |= SMBCSR_RELOAD;
169 jwrite32(jme, JME_SMBCSR, val);
172 for (i = JME_SMB_TIMEOUT; i > 0; --i)
175 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
180 jeprintk(jme->dev->name, "eeprom reload timeout\n");
190 __always_inline static void jme_load_macaddr(struct net_device *netdev)
192 struct jme_adapter *jme = netdev_priv(netdev);
193 unsigned char macaddr[6];
196 val = jread32(jme, JME_RXUMA_LO);
197 macaddr[0] = (val >> 0) & 0xFF;
198 macaddr[1] = (val >> 8) & 0xFF;
199 macaddr[2] = (val >> 16) & 0xFF;
200 macaddr[3] = (val >> 24) & 0xFF;
201 val = jread32(jme, JME_RXUMA_HI);
202 macaddr[4] = (val >> 0) & 0xFF;
203 macaddr[5] = (val >> 8) & 0xFF;
204 memcpy(netdev->dev_addr, macaddr, 6);
207 __always_inline static void jme_start_irq(struct jme_adapter *jme)
212 jwrite32(jme, JME_IENS, INTR_ENABLE);
215 __always_inline static void jme_stop_irq(struct jme_adapter *jme)
220 jwrite32(jme, JME_IENC, INTR_ENABLE);
224 __always_inline static void jme_enable_shadow(struct jme_adapter *jme)
228 ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
231 __always_inline static void jme_disable_shadow(struct jme_adapter *jme)
233 jwrite32(jme, JME_SHBA_LO, 0x0);
236 static void jme_check_link(struct net_device *netdev)
238 struct jme_adapter *jme = netdev_priv(netdev);
239 __u32 phylink, ghc, cnt = JME_AUTONEG_TIMEOUT;
242 phylink = jread32(jme, JME_PHY_LINK);
244 if (phylink & PHY_LINK_UP) {
246 * Keep polling for autoneg complete
248 while(!(phylink & PHY_LINK_AUTONEG_COMPLETE) && --cnt > 0) {
250 phylink = jread32(jme, JME_PHY_LINK);
254 jeprintk(netdev->name, "Waiting autoneg timeout.\n");
256 switch(phylink & PHY_LINK_SPEED_MASK) {
257 case PHY_LINK_SPEED_10M:
259 strcpy(linkmsg, "10 Mbps, ");
261 case PHY_LINK_SPEED_100M:
262 ghc = GHC_SPEED_100M;
263 strcpy(linkmsg, "100 Mbps, ");
265 case PHY_LINK_SPEED_1000M:
266 ghc = GHC_SPEED_1000M;
267 strcpy(linkmsg, "1000 Mbps, ");
273 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
274 jwrite32(jme, JME_GHC, ghc);
275 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
279 if(phylink & PHY_LINK_DUPLEX)
280 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
282 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
287 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
288 netif_carrier_on(netdev);
291 jprintk(netdev->name, "Link is down.\n");
292 netif_carrier_off(netdev);
296 static void jme_link_change_tasklet(unsigned long arg)
298 struct jme_adapter *jme = (struct jme_adapter*)arg;
299 jme_check_link(jme->dev);
302 static void jme_set_new_txdesc(struct jme_adapter *jme,
303 int i, struct sk_buff *skb)
305 struct jme_ring *txring = jme->txring;
306 register struct TxDesc* txdesc = txring->desc;
307 struct jme_buffer_info *txbi = txring->bufinf;
313 dmaaddr = pci_map_single(jme->pdev,
318 pci_dma_sync_single_for_device(jme->pdev,
326 txdesc->desc1.bufaddr = cpu_to_le32(dmaaddr);
327 txdesc->desc1.datalen = cpu_to_le16(skb->len);
328 txdesc->desc1.pktsize = cpu_to_le16(skb->len);
330 * Set OWN bit at final.
331 * When kernel transmit faster than NIC last packet sent,
332 * and NIC trying to send this descriptor before we tell
333 * it to start sending this TX queue.
334 * Other fields are already filled correctly.
337 txdesc->desc1.flags = TXFLAG_OWN | TXFLAG_INT;
339 txbi->mapping = dmaaddr;
340 txbi->len = skb->len;
342 #ifdef TX_QUEUE_DEBUG
343 dprintk(jme->dev->name, "TX Ring Buf Address(%08x,%08x,%d).\n",
345 (txdesc->all[12] << 0) |
346 (txdesc->all[13] << 8) |
347 (txdesc->all[14] << 16) |
348 (txdesc->all[15] << 24),
349 (txdesc->all[4] << 0) |
350 (txdesc->all[5] << 8));
356 static int jme_setup_tx_resources(struct jme_adapter *jme)
358 struct jme_ring *txring = &(jme->txring[0]);
360 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
366 txring->dmaalloc = 0;
374 txring->desc = (void*)ALIGN((unsigned long)(txring->alloc), RING_DESC_ALIGN);
375 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
376 txring->next_to_use = 0;
377 txring->next_to_clean = 0;
379 #ifdef TX_QUEUE_DEBUG
380 dprintk(jme->dev->name, "TX Ring Base Address(%08x,%08x).\n",
386 * Initiallize Transmit Descriptors
388 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE);
389 memset(txring->bufinf, 0, sizeof(struct jme_buffer_info) * RING_DESC_NR);
394 static void jme_free_tx_resources(struct jme_adapter *jme)
397 struct jme_ring *txring = &(jme->txring[0]);
398 struct jme_buffer_info *txbi = txring->bufinf;
401 for(i=0;i<RING_DESC_NR;++i) {
402 txbi = txring->bufinf + i;
404 dev_kfree_skb(txbi->skb);
411 dma_free_coherent(&(jme->pdev->dev),
415 txring->alloc = NULL;
417 txring->dmaalloc = 0;
420 txring->next_to_use = 0;
421 txring->next_to_clean = 0;
425 __always_inline static void jme_enable_tx_engine(struct jme_adapter *jme)
432 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
435 * Setup TX Queue 0 DMA Bass Address
437 jwrite32(jme, JME_TXDBA, jme->txring[0].dma);
438 jwrite32(jme, JME_TXNDA, jme->txring[0].dma);
441 * Setup TX Descptor Count
443 jwrite32(jme, JME_TXQDC, RING_DESC_NR);
446 * Get Max Read Req Size from PCI Config Space
448 pci_read_config_byte(jme->pdev, PCI_CONF_DCSR_MRRS, &mrrs);
451 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
454 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
457 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
465 jwrite32(jme, JME_TXCS, jme->reg_txcs |
471 __always_inline static void jme_disable_tx_engine(struct jme_adapter *jme)
479 jwrite32(jme, JME_TXCS, jme->reg_txcs);
481 val = jread32(jme, JME_TXCS);
482 for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
485 val = jread32(jme, JME_TXCS);
489 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
494 static void jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
496 struct jme_ring *rxring = jme->rxring;
497 register struct RxDesc* rxdesc = rxring->desc;
498 struct jme_buffer_info *rxbi = rxring->bufinf;
504 rxdesc->desc1.bufaddrh = cpu_to_le32(((__u64)rxbi->mapping) >> 32);
505 rxdesc->desc1.bufaddrl = cpu_to_le32(rxbi->mapping);
506 rxdesc->desc1.datalen = cpu_to_le16(RX_BUF_SIZE);
508 rxdesc->desc1.flags = RXFLAG_OWN | RXFLAG_INT;
510 #ifdef RX_QUEUE_DEBUG
511 dprintk(jme->dev->name, "RX Ring Buf Address(%08x,%08x,%d).\n",
513 (rxdesc->all[12] << 0) |
514 (rxdesc->all[13] << 8) |
515 (rxdesc->all[14] << 16) |
516 (rxdesc->all[15] << 24),
517 (rxdesc->all[4] << 0) |
518 (rxdesc->all[5] << 8));
523 static int jme_make_new_rx_buf(struct jme_adapter *jme, int i)
525 struct jme_ring *rxring = &(jme->rxring[0]);
526 struct jme_buffer_info *rxbi = rxring->bufinf;
527 unsigned long offset;
530 skb = netdev_alloc_skb(jme->dev, RX_BUF_ALLOC_SIZE);
533 if(unlikely(skb_shinfo(skb)->nr_frags)) {
534 dprintk(jme->dev->name, "Allocated skb fragged(%d).\n", skb_shinfo(skb)->nr_frags);
542 (unsigned long)(skb->data)
543 & (unsigned long)(RX_BUF_DMA_ALIGN - 1))) {
544 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
549 rxbi->mapping = pci_map_single(jme->pdev,
557 static void jme_free_rx_buf(struct jme_adapter *jme, int i)
559 struct jme_ring *rxring = &(jme->rxring[0]);
560 struct jme_buffer_info *rxbi = rxring->bufinf;
564 pci_unmap_single(jme->pdev,
568 dev_kfree_skb(rxbi->skb);
574 static int jme_setup_rx_resources(struct jme_adapter *jme)
577 struct jme_ring *rxring = &(jme->rxring[0]);
579 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
585 rxring->dmaalloc = 0;
593 rxring->desc = (void*)ALIGN((unsigned long)(rxring->alloc), RING_DESC_ALIGN);
594 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
595 rxring->next_to_use = 0;
596 rxring->next_to_clean = 0;
598 #ifdef RX_QUEUE_DEBUG
599 dprintk(jme->dev->name, "RX Ring Base Address(%08x,%08x).\n",
605 * Initiallize Receive Descriptors
607 for(i = 0 ; i < RING_DESC_NR ; ++i) {
608 if(unlikely(jme_make_new_rx_buf(jme, i)))
611 jme_set_clean_rxdesc(jme, i);
615 * Cleanup allocated memories when error
617 if(i != RING_DESC_NR) {
618 for(--i ; i >= 0 ; --i)
619 jme_free_rx_buf(jme, i);
621 dma_free_coherent(&(jme->pdev->dev),
625 rxring->alloc = NULL;
627 rxring->dmaalloc = 0;
635 static void jme_free_rx_resources(struct jme_adapter *jme)
638 struct jme_ring *rxring = &(jme->rxring[0]);
641 for(i = 0 ; i < RING_DESC_NR ; ++i)
642 jme_free_rx_buf(jme, i);
644 dma_free_coherent(&(jme->pdev->dev),
648 rxring->alloc = NULL;
650 rxring->dmaalloc = 0;
653 rxring->next_to_use = 0;
654 rxring->next_to_clean = 0;
657 __always_inline static void jme_enable_rx_engine(struct jme_adapter *jme)
660 * Setup RX DMA Bass Address
662 jwrite32(jme, JME_RXDBA, jme->rxring[0].dma);
663 jwrite32(jme, JME_RXNDA, jme->rxring[0].dma);
666 * Setup RX Descptor Count
668 jwrite32(jme, JME_RXQDC, RING_DESC_NR);
671 * Setup Unicast Filter
673 jme_set_multi(jme->dev);
680 jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
686 __always_inline static void jme_restart_rx_engine(struct jme_adapter *jme)
691 jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
698 __always_inline static void jme_disable_rx_engine(struct jme_adapter *jme)
706 val = jread32(jme, JME_RXCS);
708 jwrite32(jme, JME_RXCS, val);
710 val = jread32(jme, JME_RXCS);
711 for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
714 val = jread32(jme, JME_RXCS);
718 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
722 static void jme_tx_clean_tasklet(unsigned long arg)
724 struct jme_adapter *jme = (struct jme_adapter*)arg;
725 struct jme_ring *txring = &(jme->txring[0]);
726 struct TxDesc *txdesc = txring->desc;
727 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
731 #ifdef TX_TASKLET_DEBUG
732 dprintk(jme->dev->name, "into tasklet\n");
735 end = txring->next_to_use;
736 for(i = txring->next_to_clean ; i != end ; ) {
739 if(skb && !(txdesc[i].desc1.flags & TXFLAG_OWN)) {
741 #ifdef TX_TASKLET_DEBUG
742 dprintk(jme->dev->name, "cleaning %d\n", i);
745 pci_unmap_single(jme->pdev,
751 prefetch(txbi + i + 1);
752 prefetch(txdesc + i + 1);
755 ctxbi->len = skb->len;
761 if(unlikely(++i == RING_DESC_NR))
764 txring->next_to_clean = i;
768 static void jme_process_receive(struct jme_adapter *jme)
770 struct net_device *netdev = jme->dev;
771 struct jme_ring *rxring = &(jme->rxring[0]);
772 struct RxDesc *rxdesc = rxring->desc;
773 struct jme_buffer_info *rxbi;
776 int i, j, start, cnt, ccnt;
777 unsigned int framesize, desccnt;
780 * Assume that one descriptor per frame,
781 * Should be fixed in the future
782 * (or not? If buffer already large enough to store entire packet.)
785 spin_lock(&jme->recv_lock);
786 i = start = rxring->next_to_clean;
788 * Decide how many descriptors need to be processed
789 * In the worst cast we'll have to process entire queue
791 for(cnt = 0 ; cnt < RING_DESC_NR ; )
793 rxdesc = (struct RxDesc*)(rxring->desc) + i;
794 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
795 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)
797 rxring->next_to_clean = i;
801 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
803 if(unlikely((cnt += desccnt) >= RING_DESC_NR)) {
808 if(unlikely((i += desccnt) >= RING_DESC_NR))
811 spin_unlock(&jme->recv_lock);
814 * Process descriptors independently accross cpu
815 * --- save for multiple cpu handling
817 for( i = start ; cnt-- ; ) {
818 rxdesc = (struct RxDesc*)(rxring->desc) + i;
819 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
820 rxbi = rxring->bufinf + i;
823 * Drop and record error packet
825 rxdesc->descwb.errstat & RXWBERR_ALLERR ||
827 if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
828 ++(NET_STAT.rx_fifo_errors);
829 else if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
830 ++(NET_STAT.rx_frame_errors);
832 ++(NET_STAT.rx_errors);
834 dprintk(netdev->name, "err: %02x\n", rxdesc->descwb.errstat);
841 for(j=i,ccnt=desccnt;ccnt--;) {
842 jme_set_clean_rxdesc(jme, j);
844 if(unlikely(++j == RING_DESC_NR))
850 * Pass received packet to kernel
853 buf_dma = rxbi->mapping;
854 pci_dma_sync_single_for_cpu(jme->pdev,
859 if(unlikely(jme_make_new_rx_buf(jme, i))) {
860 pci_dma_sync_single_for_device(jme->pdev,
864 ++(NET_STAT.rx_dropped);
867 framesize = le16_to_cpu(rxdesc->descwb.framesize);
869 skb_put(skb, framesize);
870 skb->protocol = eth_type_trans(skb, netdev);
874 if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
875 ++(NET_STAT.multicast);
877 netdev->last_rx = jiffies;
878 NET_STAT.rx_bytes += framesize;
879 ++(NET_STAT.rx_packets);
882 jme_set_clean_rxdesc(jme, i);
885 dprintk(netdev->name, "DESCCNT: %u, FSIZE: %u, ADDRH: %08x, "
886 "ADDRL: %08x, FLAGS: %04x, STAT: %02x, "
887 "DST:%02x:%02x:%02x:%02x:%02x:%02x\n",
890 le32_to_cpu(rxdesc->dw[2]),
891 le32_to_cpu(rxdesc->dw[3]),
892 le16_to_cpu(rxdesc->descwb.flags),
893 rxdesc->descwb.errstat,
894 rxbuf[0], rxbuf[1], rxbuf[2],
895 rxbuf[3], rxbuf[4], rxbuf[5]);
902 if(unlikely((i+=desccnt) >= RING_DESC_NR))
909 static void jme_rx_clean_tasklet(unsigned long arg)
911 struct jme_adapter *jme = (struct jme_adapter*)arg;
913 jme_process_receive(jme);
914 if(jme->flags & JME_FLAG_RXQ0_EMPTY) {
915 jme_restart_rx_engine(jme);
916 jme->flags &= ~JME_FLAG_RXQ0_EMPTY;
921 static irqreturn_t jme_intr(int irq, void *dev_id)
923 struct net_device *netdev = dev_id;
924 struct jme_adapter *jme = netdev_priv(netdev);
925 irqreturn_t rc = IRQ_HANDLED;
929 pci_dma_sync_single_for_cpu(jme->pdev,
931 sizeof(__u32) * SHADOW_REG_NR,
933 intrstat = jme->shadow_regs[SHADOW_IEVE];
934 jme->shadow_regs[SHADOW_IEVE] = 0;
936 intrstat = jread32(jme, JME_IEVE);
940 #ifdef INTERRUPT_DEBUG
941 dprintk(netdev->name, "Interrupt received(%08x) @ %lu.\n", intrstat, jiffies);
945 * Check if it's really an interrupt for us
946 * and if the device still exist
948 if((intrstat & INTR_ENABLE) == 0) {
952 if(unlikely(intrstat == ~((typeof(intrstat))0))) {
958 if(intrstat & INTR_LINKCH) {
960 * Process Link status change event
962 tasklet_schedule(&jme->linkch_task);
965 if(intrstat & INTR_RX0EMP) {
969 jme->flags |= JME_FLAG_RXQ0_EMPTY;
971 jeprintk(netdev->name, "Ranout of Receive Queue 0.\n");
974 if(intrstat & INTR_RX0) {
978 tasklet_schedule(&jme->rxclean_task);
981 dprintk(netdev->name, "Received From Queue 0.\n");
985 if(intrstat & INTR_TX0) {
989 tasklet_schedule(&jme->txclean_task);
992 dprintk(netdev->name, "Queue 0 transmit complete.\n");
996 if((intrstat & ~INTR_ENABLE) != 0) {
997 #ifdef INTERRUPT_DEBUG
998 dprintk(netdev->name, "Some interrupt event not handled: %08x\n", intrstat & ~INTR_ENABLE);
1003 * Deassert interrupts
1005 jwrite32(jme, JME_IEVE, intrstat & INTR_ENABLE);
1011 static int jme_open(struct net_device *netdev)
1013 struct jme_adapter *jme = netdev_priv(netdev);
1016 rc = request_irq(jme->pdev->irq, jme_intr,
1017 IRQF_SHARED, netdev->name, netdev);
1019 printk(KERN_ERR PFX "Requesting IRQ error.\n");
1023 rc = jme_setup_rx_resources(jme);
1025 printk(KERN_ERR PFX "Allocating resources for RX error.\n");
1026 goto err_out_free_irq;
1030 rc = jme_setup_tx_resources(jme);
1032 printk(KERN_ERR PFX "Allocating resources for TX error.\n");
1033 goto err_out_free_rx_resources;
1036 jme_reset_mac_processor(jme);
1037 jme_check_link(netdev);
1038 jme_enable_shadow(jme);
1040 jme_enable_rx_engine(jme);
1041 jme_enable_tx_engine(jme);
1042 netif_start_queue(netdev);
1046 err_out_free_rx_resources:
1047 jme_free_rx_resources(jme);
1049 free_irq(jme->pdev->irq, jme->dev);
1051 netif_stop_queue(netdev);
1052 netif_carrier_off(netdev);
1056 static int jme_close(struct net_device *netdev)
1058 struct jme_adapter *jme = netdev_priv(netdev);
1060 netif_stop_queue(netdev);
1061 netif_carrier_off(netdev);
1064 jme_disable_shadow(jme);
1065 free_irq(jme->pdev->irq, jme->dev);
1067 tasklet_kill(&jme->linkch_task);
1068 tasklet_kill(&jme->txclean_task);
1069 tasklet_kill(&jme->rxclean_task);
1070 jme_disable_rx_engine(jme);
1071 jme_disable_tx_engine(jme);
1072 jme_free_rx_resources(jme);
1073 jme_free_tx_resources(jme);
1078 static int jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1080 struct jme_adapter *jme = netdev_priv(netdev);
1081 struct jme_ring *txring = &(jme->txring[0]);
1082 struct TxDesc *txdesc = txring->desc;
1086 * Check if transmit queue is already full
1087 * and take one descriptor to use
1089 spin_lock(&jme->xmit_lock);
1090 idx = txring->next_to_use;
1091 if(unlikely(txdesc[idx].desc1.flags & TXFLAG_OWN)) {
1092 spin_unlock(&jme->xmit_lock);
1093 #ifdef TX_BUSY_DEBUG
1094 dprintk(netdev->name, "TX Device busy.\n");
1096 return NETDEV_TX_BUSY;
1098 if(unlikely(++(txring->next_to_use) == RING_DESC_NR))
1099 txring->next_to_use = 0;
1100 spin_unlock(&jme->xmit_lock);
1103 * Fill up TX descriptors
1105 jme_set_new_txdesc(jme, idx, skb);
1108 * Tell MAC HW to send
1110 jwrite32(jme, JME_TXCS, jme->reg_txcs |
1111 TXCS_SELECT_QUEUE0 |
1116 dprintk(netdev->name, "Asked to transmit.\n");
1119 NET_STAT.tx_bytes += skb->len;
1120 ++(NET_STAT.tx_packets);
1121 netdev->trans_start = jiffies;
1123 return NETDEV_TX_OK;
1126 static int jme_set_macaddr(struct net_device *netdev, void *p)
1128 struct jme_adapter *jme = netdev_priv(netdev);
1129 struct sockaddr *addr = p;
1132 if(netif_running(netdev))
1135 spin_lock(&jme->macaddr_lock);
1136 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1138 val = addr->sa_data[3] << 24 |
1139 addr->sa_data[2] << 16 |
1140 addr->sa_data[1] << 8 |
1142 jwrite32(jme, JME_RXUMA_LO, val);
1143 val = addr->sa_data[5] << 8 |
1145 jwrite32(jme, JME_RXUMA_HI, val);
1146 spin_unlock(&jme->macaddr_lock);
1151 static void jme_set_multi(struct net_device *netdev)
1153 struct jme_adapter *jme = netdev_priv(netdev);
1154 u32 mc_hash[2] = {};
1159 spin_lock(&jme->macaddr_lock);
1160 val = RXMCS_BRDFRAME | RXMCS_UNIFRAME;
1162 if (netdev->flags & IFF_PROMISC)
1163 val |= RXMCS_ALLFRAME;
1164 else if (netdev->flags & IFF_ALLMULTI)
1165 val |= RXMCS_ALLMULFRAME;
1166 else if(netdev->flags & IFF_MULTICAST) {
1167 struct dev_mc_list *mclist;
1170 val |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
1171 for (i = 0, mclist = netdev->mc_list;
1172 mclist && i < netdev->mc_count;
1173 ++i, mclist = mclist->next) {
1174 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1175 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
1176 #ifdef SET_MULTI_DEBUG
1177 dprintk(netdev->name, "Adding MCAddr: "
1178 "%02x:%02x:%02x:%02x:%02x:%02x (%d)\n",
1179 mclist->dmi_addr[0],
1180 mclist->dmi_addr[1],
1181 mclist->dmi_addr[2],
1182 mclist->dmi_addr[3],
1183 mclist->dmi_addr[4],
1184 mclist->dmi_addr[5],
1189 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1190 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
1195 jwrite32(jme, JME_RXMCS, val);
1196 spin_unlock(&jme->macaddr_lock);
1198 #ifdef SET_MULTI_DEBUG
1199 dprintk(netdev->name, "RX Mode changed: %08x\n", val);
1203 static int jme_change_mtu(struct net_device *dev, int new_mtu)
1206 * Do not support MTU change for now.
1211 static void jme_get_drvinfo(struct net_device *netdev,
1212 struct ethtool_drvinfo *info)
1214 struct jme_adapter *jme = netdev_priv(netdev);
1216 strcpy(info->driver, DRV_NAME);
1217 strcpy(info->version, DRV_VERSION);
1218 strcpy(info->bus_info, pci_name(jme->pdev));
1221 static int jme_get_settings(struct net_device *netdev,
1222 struct ethtool_cmd *ecmd)
1224 struct jme_adapter *jme = netdev_priv(netdev);
1226 spin_lock(&jme->phy_lock);
1227 rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
1228 spin_unlock(&jme->phy_lock);
1232 static int jme_set_settings(struct net_device *netdev,
1233 struct ethtool_cmd *ecmd)
1235 struct jme_adapter *jme = netdev_priv(netdev);
1237 spin_lock(&jme->phy_lock);
1238 rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
1239 spin_unlock(&jme->phy_lock);
1243 static u32 jme_get_link(struct net_device *netdev) {
1244 struct jme_adapter *jme = netdev_priv(netdev);
1245 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
1248 static const struct ethtool_ops jme_ethtool_ops = {
1249 .get_drvinfo = jme_get_drvinfo,
1250 .get_settings = jme_get_settings,
1251 .set_settings = jme_set_settings,
1252 .get_link = jme_get_link,
1255 static int __devinit jme_init_one(struct pci_dev *pdev,
1256 const struct pci_device_id *ent)
1259 struct net_device *netdev;
1260 struct jme_adapter *jme;
1263 * set up PCI device basics
1265 rc = pci_enable_device(pdev);
1267 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
1271 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1272 printk(KERN_ERR PFX "No PCI resource region found.\n");
1274 goto err_out_disable_pdev;
1277 rc = pci_request_regions(pdev, DRV_NAME);
1279 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
1280 goto err_out_disable_pdev;
1283 pci_set_master(pdev);
1286 * alloc and init net device
1288 netdev = alloc_etherdev(sizeof(struct jme_adapter));
1291 goto err_out_release_regions;
1293 netdev->open = jme_open;
1294 netdev->stop = jme_close;
1295 netdev->hard_start_xmit = jme_start_xmit;
1296 netdev->irq = pdev->irq;
1297 netdev->set_mac_address = jme_set_macaddr;
1298 netdev->set_multicast_list = jme_set_multi;
1299 netdev->change_mtu = jme_change_mtu;
1300 netdev->ethtool_ops = &jme_ethtool_ops;
1302 SET_NETDEV_DEV(netdev, &pdev->dev);
1303 pci_set_drvdata(pdev, netdev);
1308 jme = netdev_priv(netdev);
1311 jme->regs = ioremap(pci_resource_start(pdev, 0),
1312 pci_resource_len(pdev, 0));
1315 goto err_out_free_netdev;
1317 jme->shadow_regs = pci_alloc_consistent(pdev,
1318 sizeof(__u32) * SHADOW_REG_NR,
1319 &(jme->shadow_dma));
1320 if (!(jme->shadow_regs)) {
1325 spin_lock_init(&jme->xmit_lock);
1326 spin_lock_init(&jme->recv_lock);
1327 spin_lock_init(&jme->macaddr_lock);
1328 spin_lock_init(&jme->phy_lock);
1329 tasklet_init(&jme->linkch_task,
1330 &jme_link_change_tasklet,
1331 (unsigned long) jme);
1332 tasklet_init(&jme->txclean_task,
1333 &jme_tx_clean_tasklet,
1334 (unsigned long) jme);
1335 tasklet_init(&jme->rxclean_task,
1336 &jme_rx_clean_tasklet,
1337 (unsigned long) jme);
1338 jme->mii_if.dev = netdev;
1339 jme->mii_if.phy_id = 1;
1340 jme->mii_if.supports_gmii = 1;
1341 jme->mii_if.mdio_read = jme_mdio_read;
1342 jme->mii_if.mdio_write = jme_mdio_write;
1343 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
1344 netdev->get_stats = &(jme_get_stats);
1348 * Reset MAC processor and reload EEPROM for MAC Address
1351 jme_reset_mac_processor(jme);
1352 rc = jme_reload_eeprom(jme);
1354 printk(KERN_ERR PFX "Rload eeprom for reading MAC Address error.\n");
1355 goto err_out_free_shadow;
1357 jme_load_macaddr(netdev);
1361 * Tell stack that we are not ready to work until open()
1363 netif_carrier_off(netdev);
1364 netif_stop_queue(netdev);
1369 rc = register_netdev(netdev);
1371 printk(KERN_ERR PFX "Cannot register net device.\n");
1372 goto err_out_free_shadow;
1375 jprintk(netdev->name,
1376 "JMC250 gigabit eth at %llx, %02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n",
1377 (unsigned long long) pci_resource_start(pdev, 0),
1378 netdev->dev_addr[0],
1379 netdev->dev_addr[1],
1380 netdev->dev_addr[2],
1381 netdev->dev_addr[3],
1382 netdev->dev_addr[4],
1383 netdev->dev_addr[5],
1388 err_out_free_shadow:
1389 pci_free_consistent(pdev,
1390 sizeof(__u32) * SHADOW_REG_NR,
1395 err_out_free_netdev:
1396 pci_set_drvdata(pdev, NULL);
1397 free_netdev(netdev);
1398 err_out_release_regions:
1399 pci_release_regions(pdev);
1400 err_out_disable_pdev:
1401 pci_disable_device(pdev);
1406 static void __devexit jme_remove_one(struct pci_dev *pdev)
1408 struct net_device *netdev = pci_get_drvdata(pdev);
1409 struct jme_adapter *jme = netdev_priv(netdev);
1411 unregister_netdev(netdev);
1412 pci_free_consistent(pdev,
1413 sizeof(__u32) * SHADOW_REG_NR,
1417 pci_set_drvdata(pdev, NULL);
1418 free_netdev(netdev);
1419 pci_release_regions(pdev);
1420 pci_disable_device(pdev);
1424 static struct pci_device_id jme_pci_tbl[] = {
1425 { PCI_VDEVICE(JMICRON, 0x250) },
1429 static struct pci_driver jme_driver = {
1431 .id_table = jme_pci_tbl,
1432 .probe = jme_init_one,
1433 .remove = __devexit_p(jme_remove_one),
1436 .suspend = jme_suspend,
1437 .resume = jme_resume,
1438 #endif /* CONFIG_PM */
1442 static int __init jme_init_module(void)
1444 printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
1445 "driver version %s\n", DRV_VERSION);
1446 return pci_register_driver(&jme_driver);
1449 static void __exit jme_cleanup_module(void)
1451 pci_unregister_driver(&jme_driver);
1454 module_init(jme_init_module);
1455 module_exit(jme_cleanup_module);
1457 MODULE_AUTHOR("David Tseng <cooldavid@cooldavid.org>");
1458 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
1459 MODULE_LICENSE("GPL");
1460 MODULE_VERSION(DRV_VERSION);
1461 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);