2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 * check if rx queue stoped.
28 * And restart it after rx ring cleaned.
32 * Timeline before release:
33 * Stage 2: Error handling.
37 * Stage 3: Basic offloading support.
38 * - Use pci_map_page on scattered sk_buff for HIGHMEM support
39 * - Implement scatter-gather offloading.
40 * A system page per RX (buffer|descriptor)?
41 * Handle fraged sk_buff to TX descriptors.
42 * - Implement tx/rx ipv6/ip/tcp/udp checksum offloading
44 * Stage 4: Basic feature support.
45 * - Implement Power Managemt related functions.
46 * - Implement Jumboframe.
49 * Stage 5: Advanced offloading support.
50 * - Implement VLAN offloading.
51 * - Implement TCP Segement offloading.
53 * Stage 6: CPU Load balancing.
55 * Along with multiple RX queue, for CPU load balancing.
58 * - Use NAPI instead of rx_tasklet?
59 * PCC Support Both Packet Counter and Timeout Interrupt for
60 * receive and transmit complete, does NAPI really needed?
61 * - Cleanup/re-orginize code, performence tuneing(alignment etc...).
62 * - Test and Release 1.0
65 #include <linux/version.h>
66 #include <linux/module.h>
67 #include <linux/kernel.h>
68 #include <linux/pci.h>
69 #include <linux/netdevice.h>
70 #include <linux/etherdevice.h>
71 #include <linux/ethtool.h>
72 #include <linux/mii.h>
73 #include <linux/crc32.h>
74 #include <linux/delay.h>
77 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
78 static struct net_device_stats *
79 jme_get_stats(struct net_device *netdev)
81 struct jme_adapter *jme = netdev_priv(netdev);
87 jme_mdio_read(struct net_device *netdev, int phy, int reg)
89 struct jme_adapter *jme = netdev_priv(netdev);
92 jwrite32(jme, JME_SMI, SMI_OP_REQ |
97 for (i = JME_PHY_TIMEOUT; i > 0; --i) {
99 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
104 jeprintk(netdev->name, "phy read timeout : %d\n", reg);
108 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
112 jme_mdio_write(struct net_device *netdev,
113 int phy, int reg, int val)
115 struct jme_adapter *jme = netdev_priv(netdev);
118 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
119 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
120 smi_phy_addr(phy) | smi_reg_addr(reg));
123 for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
125 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
130 jeprintk(netdev->name, "phy write timeout : %d\n", reg);
135 __always_inline static void
136 jme_reset_phy_processor(struct jme_adapter *jme)
140 val = jme_mdio_read(jme->dev,
144 jme_mdio_write(jme->dev,
146 MII_BMCR, val | BMCR_RESET);
148 for(i = JME_PHY_RST_TIMEOUT ; i > 0 ; --i) {
150 val = jme_mdio_read(jme->dev,
153 if(!(val & BMCR_RESET))
158 jeprintk(jme->dev->name, "phy reset timeout\n");
160 jme_mdio_write(jme->dev,
162 MII_ADVERTISE, ADVERTISE_ALL);
164 jme_mdio_write(jme->dev,
167 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
173 __always_inline static void
174 jme_reset_mac_processor(struct jme_adapter *jme)
176 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
178 jwrite32(jme, JME_GHC, jme->reg_ghc);
179 jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
180 jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
181 jwrite32(jme, JME_WFODP, 0);
182 jwrite32(jme, JME_WFOI, 0);
183 jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
184 jwrite32(jme, JME_GPREG1, 0);
187 __always_inline static void
188 jme_clear_pm(struct jme_adapter *jme)
190 jwrite32(jme, JME_PMCS, 0xFFFF0000);
191 pci_set_power_state(jme->pdev, PCI_D0);
195 jme_reload_eeprom(struct jme_adapter *jme)
200 val = jread32(jme, JME_SMBCSR);
202 if(val & SMBCSR_EEPROMD)
205 jwrite32(jme, JME_SMBCSR, val);
206 val |= SMBCSR_RELOAD;
207 jwrite32(jme, JME_SMBCSR, val);
210 for (i = JME_SMB_TIMEOUT; i > 0; --i)
213 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
218 jeprintk(jme->dev->name, "eeprom reload timeout\n");
229 jme_load_macaddr(struct net_device *netdev)
231 struct jme_adapter *jme = netdev_priv(netdev);
232 unsigned char macaddr[6];
235 spin_lock(&jme->phy_lock);
236 val = jread32(jme, JME_RXUMA_LO);
237 macaddr[0] = (val >> 0) & 0xFF;
238 macaddr[1] = (val >> 8) & 0xFF;
239 macaddr[2] = (val >> 16) & 0xFF;
240 macaddr[3] = (val >> 24) & 0xFF;
241 val = jread32(jme, JME_RXUMA_HI);
242 macaddr[4] = (val >> 0) & 0xFF;
243 macaddr[5] = (val >> 8) & 0xFF;
244 memcpy(netdev->dev_addr, macaddr, 6);
245 spin_unlock(&jme->phy_lock);
249 jme_set_rx_pcc(struct jme_adapter *jme, int p)
253 jwrite32(jme, JME_PCCRX0,
254 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
255 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
258 jwrite32(jme, JME_PCCRX0,
259 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
260 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
263 jwrite32(jme, JME_PCCRX0,
264 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
265 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
271 dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
274 __always_inline static void
275 jme_start_irq(struct jme_adapter *jme)
277 register struct dynpcc_info *dpi = &(jme->dpi);
279 jme_set_rx_pcc(jme, PCC_P1);
281 dpi->check_point = jiffies + PCC_INTERVAL;
282 dpi->last_bytes = NET_STAT(jme).rx_bytes;
283 dpi->last_pkts = NET_STAT(jme).rx_packets;
285 dpi->attempt = PCC_P1;
288 jwrite32(jme, JME_PCCTX,
289 ((60000 << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
290 ((8 << PCCTX_SHIFT) & PCCTX_MASK) |
297 atomic_set(&jme->intr_sem, 1);
298 jwrite32(jme, JME_IENS, INTR_ENABLE);
301 __always_inline static void
302 jme_stop_irq(struct jme_adapter *jme)
307 jwrite32(jme, JME_IENC, INTR_ENABLE);
311 __always_inline static void
312 jme_enable_shadow(struct jme_adapter *jme)
316 ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
319 __always_inline static void
320 jme_disable_shadow(struct jme_adapter *jme)
322 jwrite32(jme, JME_SHBA_LO, 0x0);
326 jme_check_link(struct net_device *netdev)
328 struct jme_adapter *jme = netdev_priv(netdev);
329 __u32 phylink, ghc, cnt = JME_AUTONEG_TIMEOUT;
332 spin_lock(&jme->phy_lock);
333 phylink = jread32(jme, JME_PHY_LINK);
335 if (phylink & PHY_LINK_UP) {
337 * Keep polling for autoneg complete
339 while(!(phylink & PHY_LINK_AUTONEG_COMPLETE) && --cnt > 0) {
341 phylink = jread32(jme, JME_PHY_LINK);
345 jeprintk(netdev->name, "Waiting autoneg timeout.\n");
347 switch(phylink & PHY_LINK_SPEED_MASK) {
348 case PHY_LINK_SPEED_10M:
350 strcpy(linkmsg, "10 Mbps, ");
352 case PHY_LINK_SPEED_100M:
353 ghc = GHC_SPEED_100M;
354 strcpy(linkmsg, "100 Mbps, ");
356 case PHY_LINK_SPEED_1000M:
357 ghc = GHC_SPEED_1000M;
358 strcpy(linkmsg, "1000 Mbps, ");
364 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
366 jwrite32(jme, JME_GHC, ghc);
367 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
371 if(phylink & PHY_LINK_DUPLEX)
372 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
374 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
379 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
380 netif_carrier_on(netdev);
383 jprintk(netdev->name, "Link is down.\n");
384 netif_carrier_off(netdev);
386 spin_unlock(&jme->phy_lock);
391 jme_alloc_txdesc(struct jme_adapter *jme,
394 struct jme_ring *txring = jme->txring;
397 idx = txring->next_to_use;
399 if(unlikely(txring->nr_free < nr_alloc))
402 spin_lock(&jme->tx_lock);
403 txring->nr_free -= nr_alloc;
405 if((txring->next_to_use += nr_alloc) >= RING_DESC_NR)
406 txring->next_to_use -= RING_DESC_NR;
407 spin_unlock(&jme->tx_lock);
413 jme_set_new_txdesc(struct jme_adapter *jme,
416 struct jme_ring *txring = jme->txring;
417 volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
418 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
423 idx = jme_alloc_txdesc(jme, nr_desc);
426 return NETDEV_TX_BUSY;
428 for(i = 1 ; i < nr_desc ; ++i) {
429 ctxdesc = txdesc + ((idx + i) & (RING_DESC_NR-1));
430 ctxbi = txbi + ((idx + i) & (RING_DESC_NR-1));
432 dmaaddr = pci_map_single(jme->pdev,
437 pci_dma_sync_single_for_device(jme->pdev,
444 ctxdesc->desc2.flags = TXFLAG_OWN;
445 if(jme->dev->features & NETIF_F_HIGHDMA)
446 ctxdesc->desc2.flags |= TXFLAG_64BIT;
447 ctxdesc->desc2.datalen = cpu_to_le16(skb->len);
448 ctxdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
449 ctxdesc->desc2.bufaddrl = cpu_to_le32(dmaaddr & 0xFFFFFFFF);
451 ctxbi->mapping = dmaaddr;
452 ctxbi->len = skb->len;
455 ctxdesc = txdesc + idx;
462 ctxdesc->desc1.pktsize = cpu_to_le16(skb->len);
464 * Set OWN bit at final.
465 * When kernel transmit faster than NIC.
466 * And NIC trying to send this descriptor before we tell
467 * it to start sending this TX queue.
468 * Other fields are already filled correctly.
471 ctxdesc->desc1.flags = TXFLAG_OWN | TXFLAG_INT;
473 * Set tx buffer info after telling NIC to send
474 * For better tx_clean timing
477 ctxbi->nr_desc = nr_desc;
480 tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, nr_desc);
487 jme_setup_tx_resources(struct jme_adapter *jme)
489 struct jme_ring *txring = &(jme->txring[0]);
491 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
497 txring->dmaalloc = 0;
505 txring->desc = (void*)ALIGN((unsigned long)(txring->alloc),
507 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
508 txring->next_to_use = 0;
509 txring->next_to_clean = 0;
510 txring->nr_free = RING_DESC_NR;
513 * Initiallize Transmit Descriptors
515 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE);
516 memset(txring->bufinf, 0,
517 sizeof(struct jme_buffer_info) * RING_DESC_NR);
523 jme_free_tx_resources(struct jme_adapter *jme)
526 struct jme_ring *txring = &(jme->txring[0]);
527 struct jme_buffer_info *txbi = txring->bufinf;
530 for(i = 0 ; i < RING_DESC_NR ; ++i) {
531 txbi = txring->bufinf + i;
533 dev_kfree_skb(txbi->skb);
541 dma_free_coherent(&(jme->pdev->dev),
546 txring->alloc = NULL;
548 txring->dmaalloc = 0;
551 txring->next_to_use = 0;
552 txring->next_to_clean = 0;
557 __always_inline static void
558 jme_enable_tx_engine(struct jme_adapter *jme)
565 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
568 * Setup TX Queue 0 DMA Bass Address
570 jwrite32(jme, JME_TXDBA_LO, jme->txring[0].dma);
571 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
572 jwrite32(jme, JME_TXNDA, jme->txring[0].dma);
575 * Setup TX Descptor Count
577 jwrite32(jme, JME_TXQDC, RING_DESC_NR);
580 * Get Max Read Req Size from PCI Config Space
582 pci_read_config_byte(jme->pdev, PCI_CONF_DCSR_MRRS, &mrrs);
585 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
588 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
591 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
599 jwrite32(jme, JME_TXCS, jme->reg_txcs |
605 __always_inline static void
606 jme_disable_tx_engine(struct jme_adapter *jme)
614 jwrite32(jme, JME_TXCS, jme->reg_txcs);
616 val = jread32(jme, JME_TXCS);
617 for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
620 val = jread32(jme, JME_TXCS);
624 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
630 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
632 struct jme_ring *rxring = jme->rxring;
633 register volatile struct rxdesc* rxdesc = rxring->desc;
634 struct jme_buffer_info *rxbi = rxring->bufinf;
640 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
641 rxdesc->desc1.bufaddrl = cpu_to_le32(rxbi->mapping & 0xFFFFFFFF);
642 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len);
643 if(jme->dev->features & NETIF_F_HIGHDMA)
644 rxdesc->desc1.flags = RXFLAG_64BIT;
646 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
650 jme_make_new_rx_buf(struct jme_adapter *jme, int i)
652 struct jme_ring *rxring = &(jme->rxring[0]);
653 struct jme_buffer_info *rxbi = rxring->bufinf;
654 unsigned long offset;
657 skb = netdev_alloc_skb(jme->dev, RX_BUF_ALLOC_SIZE);
661 if(unlikely(skb_is_nonlinear(skb))) {
662 dprintk(jme->dev->name,
663 "Allocated skb fragged(%d).\n",
664 skb_shinfo(skb)->nr_frags);
670 (unsigned long)(skb->data)
671 & (unsigned long)(RX_BUF_DMA_ALIGN - 1)))
672 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
676 rxbi->len = skb_tailroom(skb);
677 rxbi->mapping = pci_map_single(jme->pdev,
686 jme_free_rx_buf(struct jme_adapter *jme, int i)
688 struct jme_ring *rxring = &(jme->rxring[0]);
689 struct jme_buffer_info *rxbi = rxring->bufinf;
693 pci_unmap_single(jme->pdev,
697 dev_kfree_skb(rxbi->skb);
705 jme_free_rx_resources(struct jme_adapter *jme)
708 struct jme_ring *rxring = &(jme->rxring[0]);
711 for(i = 0 ; i < RING_DESC_NR ; ++i)
712 jme_free_rx_buf(jme, i);
714 dma_free_coherent(&(jme->pdev->dev),
718 rxring->alloc = NULL;
720 rxring->dmaalloc = 0;
723 rxring->next_to_use = 0;
724 rxring->next_to_clean = 0;
728 jme_setup_rx_resources(struct jme_adapter *jme)
731 struct jme_ring *rxring = &(jme->rxring[0]);
733 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
739 rxring->dmaalloc = 0;
747 rxring->desc = (void*)ALIGN((unsigned long)(rxring->alloc),
749 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
750 rxring->next_to_use = 0;
751 rxring->next_to_clean = 0;
754 * Initiallize Receive Descriptors
756 for(i = 0 ; i < RING_DESC_NR ; ++i) {
757 if(unlikely(jme_make_new_rx_buf(jme, i))) {
758 jme_free_rx_resources(jme);
762 jme_set_clean_rxdesc(jme, i);
768 __always_inline static void
769 jme_enable_rx_engine(struct jme_adapter *jme)
772 * Setup RX DMA Bass Address
774 jwrite32(jme, JME_RXDBA_LO, jme->rxring[0].dma);
775 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
776 jwrite32(jme, JME_RXNDA, jme->rxring[0].dma);
779 * Setup RX Descptor Count
781 jwrite32(jme, JME_RXQDC, RING_DESC_NR);
784 * Setup Unicast Filter
786 jme->reg_rxmcs = RXMCS_VTAGRM | RXMCS_PREPAD;
787 jme_set_multi(jme->dev);
793 jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
799 __always_inline static void
800 jme_restart_rx_engine(struct jme_adapter *jme)
805 jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
812 __always_inline static void
813 jme_disable_rx_engine(struct jme_adapter *jme)
821 val = jread32(jme, JME_RXCS);
823 jwrite32(jme, JME_RXCS, val);
825 val = jread32(jme, JME_RXCS);
826 for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
829 val = jread32(jme, JME_RXCS);
833 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
838 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
840 if(dpi->attempt == atmp) {
850 jme_dynamic_pcc(struct jme_adapter *jme)
852 register struct dynpcc_info *dpi = &(jme->dpi);
854 if(jiffies >= dpi->check_point) {
855 if(jiffies > (dpi->check_point + PCC_INTERVAL)) {
856 jme_attempt_pcc(dpi, PCC_P1);
859 if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
861 jme_attempt_pcc(dpi, PCC_P3);
862 else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
864 jme_attempt_pcc(dpi, PCC_P2);
866 jme_attempt_pcc(dpi, PCC_P1);
869 if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
870 jme_set_rx_pcc(jme, dpi->attempt);
871 dpi->cur = dpi->attempt;
875 dpi->last_bytes = NET_STAT(jme).rx_bytes;
876 dpi->last_pkts = NET_STAT(jme).rx_packets;
877 dpi->check_point = jiffies + PCC_INTERVAL;
882 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
884 struct jme_ring *rxring = &(jme->rxring[0]);
885 volatile struct rxdesc *rxdesc = rxring->desc;
886 struct jme_buffer_info *rxbi = rxring->bufinf;
894 pci_dma_sync_single_for_cpu(jme->pdev,
899 if(unlikely(jme_make_new_rx_buf(jme, idx))) {
900 pci_dma_sync_single_for_device(jme->pdev,
905 ++(NET_STAT(jme).rx_dropped);
908 framesize = le16_to_cpu(rxdesc->descwb.framesize)
911 skb_reserve(skb, RX_PREPAD_SIZE);
912 skb_put(skb, framesize);
913 skb->protocol = eth_type_trans(skb, jme->dev);
917 if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
918 ++(NET_STAT(jme).multicast);
920 jme->dev->last_rx = jiffies;
921 NET_STAT(jme).rx_bytes += framesize;
922 ++(NET_STAT(jme).rx_packets);
925 jme_set_clean_rxdesc(jme, idx);
930 jme_process_receive(struct jme_adapter *jme, int limit)
932 struct jme_ring *rxring = &(jme->rxring[0]);
933 volatile struct rxdesc *rxdesc = rxring->desc;
934 int i, j, ccnt, desccnt;
936 i = rxring->next_to_clean;
939 rxdesc = rxring->desc;
942 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
943 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
946 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
948 rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
951 rxdesc->descwb.errstat & RXWBERR_ALLERR) {
953 if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
954 ++(NET_STAT(jme).rx_crc_errors);
955 else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
956 ++(NET_STAT(jme).rx_fifo_errors);
958 ++(NET_STAT(jme).rx_errors);
961 limit -= desccnt - 1;
963 for(j = i, ccnt = desccnt ; ccnt-- ; ) {
964 jme_set_clean_rxdesc(jme, j);
966 if(unlikely(++j == RING_DESC_NR))
972 jme_alloc_and_feed_skb(jme, i);
976 if((i += desccnt) >= RING_DESC_NR)
981 rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
982 rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
983 (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
986 jme_dynamic_pcc(jme);
987 rxring->next_to_clean = i;
989 return limit > 0 ? limit : 0;
994 jme_link_change_tasklet(unsigned long arg)
996 struct jme_adapter *jme = (struct jme_adapter*)arg;
997 jme_check_link(jme->dev);
1001 jme_rx_clean_tasklet(unsigned long arg)
1003 struct jme_adapter *jme = (struct jme_adapter*)arg;
1005 spin_lock(&jme->rx_lock);
1006 jme_process_receive(jme, RING_DESC_NR);
1007 spin_unlock(&jme->rx_lock);
1008 if(jme->flags & JME_FLAG_RXQ0_EMPTY) {
1009 jme->flags &= ~JME_FLAG_RXQ0_EMPTY;
1010 jme_restart_rx_engine(jme);
1015 jme_tx_clean_tasklet(unsigned long arg)
1017 struct jme_adapter *jme = (struct jme_adapter*)arg;
1018 struct jme_ring *txring = &(jme->txring[0]);
1019 volatile struct txdesc *txdesc = txring->desc;
1020 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1021 int i, j, cnt = 0, max;
1023 spin_lock(&jme->tx_lock);
1024 max = RING_DESC_NR - txring->nr_free;
1025 spin_unlock(&jme->tx_lock);
1027 tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
1029 for(i = txring->next_to_clean ; cnt < max ; ) {
1033 if(ctxbi->skb && !(txdesc[i].desc1.flags & TXFLAG_OWN)) {
1035 tx_dbg(jme->dev->name,
1036 "Tx Tasklet: Clean %d+%d\n",
1039 for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
1040 ttxbi = txbi + ((i + j) & (RING_DESC_NR - 1));
1041 txdesc[(i+j)&(RING_DESC_NR-1)].dw[0] = 0;
1043 pci_unmap_single(jme->pdev,
1048 NET_STAT(jme).tx_bytes += ttxbi->len;
1053 dev_kfree_skb(ctxbi->skb);
1056 cnt += ctxbi->nr_desc;
1058 ++(NET_STAT(jme).tx_packets);
1062 tx_dbg(jme->dev->name,
1064 " Stoped due to no skb.\n");
1066 tx_dbg(jme->dev->name,
1068 "Stoped due to not done.\n");
1072 if(unlikely((i += ctxbi->nr_desc) >= RING_DESC_NR))
1078 tx_dbg(jme->dev->name,
1079 "Tx Tasklet: Stop %d Jiffies %lu\n",
1081 txring->next_to_clean = i;
1083 spin_lock(&jme->tx_lock);
1084 txring->nr_free += cnt;
1085 spin_unlock(&jme->tx_lock);
1090 jme_intr(int irq, void *dev_id)
1092 struct net_device *netdev = dev_id;
1093 struct jme_adapter *jme = netdev_priv(netdev);
1094 irqreturn_t rc = IRQ_HANDLED;
1098 pci_dma_sync_single_for_cpu(jme->pdev,
1100 sizeof(__u32) * SHADOW_REG_NR,
1101 PCI_DMA_FROMDEVICE);
1102 intrstat = jme->shadow_regs[SHADOW_IEVE];
1103 jme->shadow_regs[SHADOW_IEVE] = 0;
1105 intrstat = jread32(jme, JME_IEVE);
1109 * Check if it's really an interrupt for us
1117 * Check if the device still exist
1119 if(unlikely(intrstat == ~((typeof(intrstat))0))) {
1125 * Allow one interrupt handling at a time
1127 if(unlikely(!atomic_dec_and_test(&jme->intr_sem)))
1133 jwrite32f(jme, JME_IENC, INTR_ENABLE);
1135 if(intrstat & INTR_LINKCH)
1136 tasklet_schedule(&jme->linkch_task);
1138 if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP)) {
1139 if(intrstat & INTR_RX0EMP) {
1140 jme->flags |= JME_FLAG_RXQ0_EMPTY;
1141 jeprintk(netdev->name, "Ranout of Receive Queue 0.\n");
1144 tasklet_schedule(&jme->rxclean_task);
1147 if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
1148 tasklet_schedule(&jme->txclean_task);
1150 if((intrstat & ~INTR_ENABLE) != 0) {
1152 * Some interrupt not handled
1153 * but not enabled also (for debug)
1158 * Deassert interrupts
1160 jwrite32f(jme, JME_IEVE, intrstat);
1163 * Enable next interrupt handling
1165 atomic_set(&jme->intr_sem, 1);
1168 * Re-enable interrupt
1170 jwrite32f(jme, JME_IENS, INTR_ENABLE);
1177 jme_open(struct net_device *netdev)
1179 struct jme_adapter *jme = netdev_priv(netdev);
1182 rc = request_irq(jme->pdev->irq, jme_intr,
1183 IRQF_SHARED, netdev->name, netdev);
1185 printk(KERN_ERR PFX "Requesting IRQ error.\n");
1189 rc = jme_setup_rx_resources(jme);
1191 printk(KERN_ERR PFX "Allocating resources for RX error.\n");
1192 goto err_out_free_irq;
1196 rc = jme_setup_tx_resources(jme);
1198 printk(KERN_ERR PFX "Allocating resources for TX error.\n");
1199 goto err_out_free_rx_resources;
1202 jme_reset_mac_processor(jme);
1203 jme_check_link(netdev);
1204 jme_enable_shadow(jme);
1206 jme_enable_rx_engine(jme);
1207 jme_enable_tx_engine(jme);
1208 netif_start_queue(netdev);
1212 err_out_free_rx_resources:
1213 jme_free_rx_resources(jme);
1215 free_irq(jme->pdev->irq, jme->dev);
1217 netif_stop_queue(netdev);
1218 netif_carrier_off(netdev);
1223 jme_close(struct net_device *netdev)
1225 struct jme_adapter *jme = netdev_priv(netdev);
1227 netif_stop_queue(netdev);
1228 netif_carrier_off(netdev);
1231 jme_disable_shadow(jme);
1232 free_irq(jme->pdev->irq, jme->dev);
1234 tasklet_kill(&jme->linkch_task);
1235 tasklet_kill(&jme->txclean_task);
1236 tasklet_kill(&jme->rxclean_task);
1237 jme_disable_rx_engine(jme);
1238 jme_disable_tx_engine(jme);
1239 jme_free_rx_resources(jme);
1240 jme_free_tx_resources(jme);
1246 * This function is already protected by netif_tx_lock()
1249 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1251 struct jme_adapter *jme = netdev_priv(netdev);
1254 rc = jme_set_new_txdesc(jme, skb);
1256 if(unlikely(rc != NETDEV_TX_OK))
1259 jwrite32(jme, JME_TXCS, jme->reg_txcs |
1260 TXCS_SELECT_QUEUE0 |
1263 netdev->trans_start = jiffies;
1265 return NETDEV_TX_OK;
1269 jme_set_macaddr(struct net_device *netdev, void *p)
1271 struct jme_adapter *jme = netdev_priv(netdev);
1272 struct sockaddr *addr = p;
1275 if(netif_running(netdev))
1278 spin_lock(&jme->phy_lock);
1279 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1281 val = addr->sa_data[3] << 24 |
1282 addr->sa_data[2] << 16 |
1283 addr->sa_data[1] << 8 |
1285 jwrite32(jme, JME_RXUMA_LO, val);
1286 val = addr->sa_data[5] << 8 |
1288 jwrite32(jme, JME_RXUMA_HI, val);
1289 spin_unlock(&jme->phy_lock);
1295 jme_set_multi(struct net_device *netdev)
1297 struct jme_adapter *jme = netdev_priv(netdev);
1298 u32 mc_hash[2] = {};
1302 spin_lock(&jme->phy_lock);
1303 val = jme->reg_rxmcs | RXMCS_BRDFRAME | RXMCS_UNIFRAME;
1305 if (netdev->flags & IFF_PROMISC) {
1306 val |= RXMCS_ALLFRAME;
1308 else if (netdev->flags & IFF_ALLMULTI) {
1309 val |= RXMCS_ALLMULFRAME;
1311 else if(netdev->flags & IFF_MULTICAST) {
1312 struct dev_mc_list *mclist;
1315 val |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
1316 for (i = 0, mclist = netdev->mc_list;
1317 mclist && i < netdev->mc_count;
1318 ++i, mclist = mclist->next) {
1320 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1321 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
1324 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1325 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
1330 jwrite32(jme, JME_RXMCS, val);
1331 spin_unlock(&jme->phy_lock);
1335 jme_change_mtu(struct net_device *dev, int new_mtu)
1338 * Not supporting MTU change for now.
1344 jme_get_drvinfo(struct net_device *netdev,
1345 struct ethtool_drvinfo *info)
1347 struct jme_adapter *jme = netdev_priv(netdev);
1349 strcpy(info->driver, DRV_NAME);
1350 strcpy(info->version, DRV_VERSION);
1351 strcpy(info->bus_info, pci_name(jme->pdev));
1355 jme_get_settings(struct net_device *netdev,
1356 struct ethtool_cmd *ecmd)
1358 struct jme_adapter *jme = netdev_priv(netdev);
1360 spin_lock(&jme->phy_lock);
1361 rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
1362 spin_unlock(&jme->phy_lock);
1367 jme_set_settings(struct net_device *netdev,
1368 struct ethtool_cmd *ecmd)
1370 struct jme_adapter *jme = netdev_priv(netdev);
1372 spin_lock(&jme->phy_lock);
1373 rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
1374 spin_unlock(&jme->phy_lock);
1379 jme_get_link(struct net_device *netdev)
1381 struct jme_adapter *jme = netdev_priv(netdev);
1382 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
1385 static const struct ethtool_ops jme_ethtool_ops = {
1386 .get_drvinfo = jme_get_drvinfo,
1387 .get_settings = jme_get_settings,
1388 .set_settings = jme_set_settings,
1389 .get_link = jme_get_link,
1393 jme_pci_dma64(struct pci_dev *pdev)
1395 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
1396 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1399 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
1400 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
1406 static int __devinit
1407 jme_init_one(struct pci_dev *pdev,
1408 const struct pci_device_id *ent)
1410 int rc = 0, using_dac;
1411 struct net_device *netdev;
1412 struct jme_adapter *jme;
1415 * set up PCI device basics
1417 rc = pci_enable_device(pdev);
1419 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
1423 using_dac = jme_pci_dma64(pdev);
1425 printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
1427 goto err_out_disable_pdev;
1430 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1431 printk(KERN_ERR PFX "No PCI resource region found.\n");
1433 goto err_out_disable_pdev;
1436 rc = pci_request_regions(pdev, DRV_NAME);
1438 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
1439 goto err_out_disable_pdev;
1442 pci_set_master(pdev);
1445 * alloc and init net device
1447 netdev = alloc_etherdev(sizeof(*jme));
1450 goto err_out_release_regions;
1452 netdev->open = jme_open;
1453 netdev->stop = jme_close;
1454 netdev->hard_start_xmit = jme_start_xmit;
1455 netdev->irq = pdev->irq;
1456 netdev->set_mac_address = jme_set_macaddr;
1457 netdev->set_multicast_list = jme_set_multi;
1458 netdev->change_mtu = jme_change_mtu;
1459 netdev->ethtool_ops = &jme_ethtool_ops;
1460 NETDEV_GET_STATS(netdev, &jme_get_stats);
1463 netdev->features = NETIF_F_HIGHDMA;
1465 SET_NETDEV_DEV(netdev, &pdev->dev);
1466 pci_set_drvdata(pdev, netdev);
1471 jme = netdev_priv(netdev);
1474 jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
1475 jme->regs = ioremap(pci_resource_start(pdev, 0),
1476 pci_resource_len(pdev, 0));
1479 goto err_out_free_netdev;
1481 jme->shadow_regs = pci_alloc_consistent(pdev,
1482 sizeof(__u32) * SHADOW_REG_NR,
1483 &(jme->shadow_dma));
1484 if (!(jme->shadow_regs)) {
1489 spin_lock_init(&jme->rx_lock);
1490 spin_lock_init(&jme->tx_lock);
1491 spin_lock_init(&jme->phy_lock);
1492 tasklet_init(&jme->linkch_task,
1493 &jme_link_change_tasklet,
1494 (unsigned long) jme);
1495 tasklet_init(&jme->txclean_task,
1496 &jme_tx_clean_tasklet,
1497 (unsigned long) jme);
1498 tasklet_init(&jme->rxclean_task,
1499 &jme_rx_clean_tasklet,
1500 (unsigned long) jme);
1501 jme->mii_if.dev = netdev;
1502 jme->mii_if.phy_id = 1;
1503 jme->mii_if.supports_gmii = 1;
1504 jme->mii_if.mdio_read = jme_mdio_read;
1505 jme->mii_if.mdio_write = jme_mdio_write;
1508 * Reset MAC processor and reload EEPROM for MAC Address
1511 jme_reset_phy_processor(jme);
1512 jme_reset_mac_processor(jme);
1513 rc = jme_reload_eeprom(jme);
1516 "Rload eeprom for reading MAC Address error.\n");
1517 goto err_out_free_shadow;
1519 jme_load_macaddr(netdev);
1523 * Tell stack that we are not ready to work until open()
1525 netif_carrier_off(netdev);
1526 netif_stop_queue(netdev);
1531 rc = register_netdev(netdev);
1533 printk(KERN_ERR PFX "Cannot register net device.\n");
1534 goto err_out_free_shadow;
1537 jprintk(netdev->name,
1538 "JMC250 gigabit eth at %llx, "
1539 "%02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n",
1540 (unsigned long long) pci_resource_start(pdev, 0),
1541 netdev->dev_addr[0],
1542 netdev->dev_addr[1],
1543 netdev->dev_addr[2],
1544 netdev->dev_addr[3],
1545 netdev->dev_addr[4],
1546 netdev->dev_addr[5],
1551 err_out_free_shadow:
1552 pci_free_consistent(pdev,
1553 sizeof(__u32) * SHADOW_REG_NR,
1558 err_out_free_netdev:
1559 pci_set_drvdata(pdev, NULL);
1560 free_netdev(netdev);
1561 err_out_release_regions:
1562 pci_release_regions(pdev);
1563 err_out_disable_pdev:
1564 pci_disable_device(pdev);
1569 static void __devexit
1570 jme_remove_one(struct pci_dev *pdev)
1572 struct net_device *netdev = pci_get_drvdata(pdev);
1573 struct jme_adapter *jme = netdev_priv(netdev);
1575 unregister_netdev(netdev);
1576 pci_free_consistent(pdev,
1577 sizeof(__u32) * SHADOW_REG_NR,
1581 pci_set_drvdata(pdev, NULL);
1582 free_netdev(netdev);
1583 pci_release_regions(pdev);
1584 pci_disable_device(pdev);
1588 static struct pci_device_id jme_pci_tbl[] = {
1589 { PCI_VDEVICE(JMICRON, 0x250) },
1593 static struct pci_driver jme_driver = {
1595 .id_table = jme_pci_tbl,
1596 .probe = jme_init_one,
1597 .remove = __devexit_p(jme_remove_one),
1600 .suspend = jme_suspend,
1601 .resume = jme_resume,
1602 #endif /* CONFIG_PM */
1607 jme_init_module(void)
1609 printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
1610 "driver version %s\n", DRV_VERSION);
1611 return pci_register_driver(&jme_driver);
1615 jme_cleanup_module(void)
1617 pci_unregister_driver(&jme_driver);
1620 module_init(jme_init_module);
1621 module_exit(jme_cleanup_module);
1623 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
1624 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
1625 MODULE_LICENSE("GPL");
1626 MODULE_VERSION(DRV_VERSION);
1627 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);