2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 * check if rx queue stoped.
28 * And restart it after rx ring cleaned.
32 * Timeline before release:
33 * Stage 2: Error handling.
37 * Stage 3: Basic offloading support.
38 * - Use pci_map_page on scattered sk_buff for HIGHMEM support
39 * - Implement scatter-gather offloading.
40 * A system page per RX (buffer|descriptor)?
41 * Handle fraged sk_buff to TX descriptors.
42 * - Implement tx/rx ipv6/ip/tcp/udp checksum offloading
44 * Stage 4: Basic feature support.
45 * - Implement Power Managemt related functions.
46 * - Implement Jumboframe.
49 * Stage 5: Advanced offloading support.
50 * - Implement VLAN offloading.
51 * - Implement TCP Segement offloading.
53 * Stage 6: CPU Load balancing.
55 * Along with multiple RX queue, for CPU load balancing.
58 * - Use NAPI instead of rx_tasklet?
59 * PCC Support Both Packet Counter and Timeout Interrupt for
60 * receive and transmit complete, does NAPI really needed?
61 * - Cleanup/re-orginize code, performence tuneing(alignment etc...).
62 * - Test and Release 1.0
65 #include <linux/version.h>
66 #include <linux/module.h>
67 #include <linux/kernel.h>
68 #include <linux/pci.h>
69 #include <linux/netdevice.h>
70 #include <linux/etherdevice.h>
71 #include <linux/ethtool.h>
72 #include <linux/mii.h>
73 #include <linux/crc32.h>
74 #include <linux/delay.h>
77 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
78 static struct net_device_stats *
79 jme_get_stats(struct net_device *netdev)
81 struct jme_adapter *jme = netdev_priv(netdev);
87 jme_mdio_read(struct net_device *netdev, int phy, int reg)
89 struct jme_adapter *jme = netdev_priv(netdev);
92 jwrite32(jme, JME_SMI, SMI_OP_REQ |
97 for (i = JME_PHY_TIMEOUT; i > 0; --i) {
99 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
104 jeprintk(netdev->name, "phy read timeout : %d\n", reg);
108 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
112 jme_mdio_write(struct net_device *netdev,
113 int phy, int reg, int val)
115 struct jme_adapter *jme = netdev_priv(netdev);
118 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
119 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
120 smi_phy_addr(phy) | smi_reg_addr(reg));
123 for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
125 if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
130 jeprintk(netdev->name, "phy write timeout : %d\n", reg);
135 __always_inline static void
136 jme_reset_phy_processor(struct jme_adapter *jme)
140 jme_mdio_write(jme->dev,
142 MII_ADVERTISE, ADVERTISE_ALL);
144 jme_mdio_write(jme->dev,
147 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
149 val = jme_mdio_read(jme->dev,
153 jme_mdio_write(jme->dev,
155 MII_BMCR, val | BMCR_RESET);
161 __always_inline static void
162 jme_reset_mac_processor(struct jme_adapter *jme)
164 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
166 jwrite32(jme, JME_GHC, jme->reg_ghc);
167 jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
168 jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
169 jwrite32(jme, JME_WFODP, 0);
170 jwrite32(jme, JME_WFOI, 0);
171 jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
172 jwrite32(jme, JME_GPREG1, 0);
175 __always_inline static void
176 jme_clear_pm(struct jme_adapter *jme)
178 jwrite32(jme, JME_PMCS, 0xFFFF0000);
179 pci_set_power_state(jme->pdev, PCI_D0);
183 jme_reload_eeprom(struct jme_adapter *jme)
188 val = jread32(jme, JME_SMBCSR);
190 if(val & SMBCSR_EEPROMD)
193 jwrite32(jme, JME_SMBCSR, val);
194 val |= SMBCSR_RELOAD;
195 jwrite32(jme, JME_SMBCSR, val);
198 for (i = JME_SMB_TIMEOUT; i > 0; --i)
201 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
206 jeprintk(jme->dev->name, "eeprom reload timeout\n");
217 jme_load_macaddr(struct net_device *netdev)
219 struct jme_adapter *jme = netdev_priv(netdev);
220 unsigned char macaddr[6];
223 spin_lock(&jme->macaddr_lock);
224 val = jread32(jme, JME_RXUMA_LO);
225 macaddr[0] = (val >> 0) & 0xFF;
226 macaddr[1] = (val >> 8) & 0xFF;
227 macaddr[2] = (val >> 16) & 0xFF;
228 macaddr[3] = (val >> 24) & 0xFF;
229 val = jread32(jme, JME_RXUMA_HI);
230 macaddr[4] = (val >> 0) & 0xFF;
231 macaddr[5] = (val >> 8) & 0xFF;
232 memcpy(netdev->dev_addr, macaddr, 6);
233 spin_unlock(&jme->macaddr_lock);
236 __always_inline static void
237 jme_set_rx_pcc(struct jme_adapter *jme, int p)
241 jwrite32(jme, JME_PCCRX0,
242 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
243 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
246 jwrite32(jme, JME_PCCRX0,
247 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
248 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
251 jwrite32(jme, JME_PCCRX0,
252 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
253 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
259 dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
263 jme_start_irq(struct jme_adapter *jme)
265 register struct dynpcc_info *dpi = &(jme->dpi);
267 jme_set_rx_pcc(jme, PCC_P1);
269 dpi->check_point = jiffies + PCC_INTERVAL;
270 dpi->last_bytes = NET_STAT(jme).rx_bytes;
271 dpi->last_pkts = NET_STAT(jme).rx_packets;
273 dpi->attempt = PCC_P1;
276 jwrite32(jme, JME_PCCTX,
277 ((60000 << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
278 ((8 << PCCTX_SHIFT) & PCCTX_MASK) |
285 jwrite32(jme, JME_IENS, INTR_ENABLE);
288 __always_inline static void
289 jme_stop_irq(struct jme_adapter *jme)
294 jwrite32(jme, JME_IENC, INTR_ENABLE);
298 __always_inline static void
299 jme_enable_shadow(struct jme_adapter *jme)
303 ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
306 __always_inline static void
307 jme_disable_shadow(struct jme_adapter *jme)
309 jwrite32(jme, JME_SHBA_LO, 0x0);
313 jme_check_link(struct net_device *netdev, int testonly)
315 struct jme_adapter *jme = netdev_priv(netdev);
316 __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT;
320 phylink = jread32(jme, JME_PHY_LINK);
322 if (phylink & PHY_LINK_UP) {
324 * Keep polling for autoneg complete
326 while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && --cnt > 0) {
328 phylink = jread32(jme, JME_PHY_LINK);
331 if(jme->phylink == phylink) {
338 jme->phylink = phylink;
341 jeprintk(netdev->name,
342 "Waiting speed resolve timeout.\n");
344 if(!(phylink & PHY_LINK_AUTONEG_COMPLETE))
345 jprintk(netdev->name,
346 "Link partener does not support AN.\n");
348 switch(phylink & PHY_LINK_SPEED_MASK) {
349 case PHY_LINK_SPEED_10M:
351 strcpy(linkmsg, "10 Mbps, ");
353 case PHY_LINK_SPEED_100M:
354 ghc = GHC_SPEED_100M;
355 strcpy(linkmsg, "100 Mbps, ");
357 case PHY_LINK_SPEED_1000M:
358 ghc = GHC_SPEED_1000M;
359 strcpy(linkmsg, "1000 Mbps, ");
365 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
367 strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
371 if(phylink & PHY_LINK_MDI_STAT)
372 strcat(linkmsg, "MDI");
374 strcat(linkmsg, "MDI-X");
376 if(phylink & PHY_LINK_DUPLEX)
377 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
379 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
385 jwrite32(jme, JME_GHC, ghc);
387 jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
388 netif_carrier_on(netdev);
394 jprintk(netdev->name, "Link is down.\n");
396 netif_carrier_off(netdev);
405 jme_alloc_txdesc(struct jme_adapter *jme,
408 struct jme_ring *txring = jme->txring;
411 idx = txring->next_to_use;
413 if(unlikely(txring->nr_free < nr_alloc))
416 spin_lock(&jme->tx_lock);
417 txring->nr_free -= nr_alloc;
419 if((txring->next_to_use += nr_alloc) >= RING_DESC_NR)
420 txring->next_to_use -= RING_DESC_NR;
421 spin_unlock(&jme->tx_lock);
427 jme_set_new_txdesc(struct jme_adapter *jme,
430 struct jme_ring *txring = jme->txring;
431 volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
432 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
437 idx = jme_alloc_txdesc(jme, nr_desc);
440 return NETDEV_TX_BUSY;
442 for(i = 1 ; i < nr_desc ; ++i) {
443 ctxdesc = txdesc + ((idx + i) & (RING_DESC_NR-1));
444 ctxbi = txbi + ((idx + i) & (RING_DESC_NR-1));
446 dmaaddr = pci_map_single(jme->pdev,
451 pci_dma_sync_single_for_device(jme->pdev,
458 ctxdesc->desc2.flags = TXFLAG_OWN;
459 if(jme->dev->features & NETIF_F_HIGHDMA)
460 ctxdesc->desc2.flags |= TXFLAG_64BIT;
461 ctxdesc->desc2.datalen = cpu_to_le16(skb->len);
462 ctxdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
463 ctxdesc->desc2.bufaddrl = cpu_to_le32(
464 (__u64)dmaaddr & 0xFFFFFFFFUL);
466 ctxbi->mapping = dmaaddr;
467 ctxbi->len = skb->len;
470 ctxdesc = txdesc + idx;
477 ctxdesc->desc1.pktsize = cpu_to_le16(skb->len);
479 * Set OWN bit at final.
480 * When kernel transmit faster than NIC.
481 * And NIC trying to send this descriptor before we tell
482 * it to start sending this TX queue.
483 * Other fields are already filled correctly.
486 ctxdesc->desc1.flags = TXFLAG_OWN | TXFLAG_INT;
488 * Set tx buffer info after telling NIC to send
489 * For better tx_clean timing
492 ctxbi->nr_desc = nr_desc;
495 tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, nr_desc);
502 jme_setup_tx_resources(struct jme_adapter *jme)
504 struct jme_ring *txring = &(jme->txring[0]);
506 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
513 txring->dmaalloc = 0;
521 txring->desc = (void*)ALIGN((unsigned long)(txring->alloc),
523 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
524 txring->next_to_use = 0;
525 txring->next_to_clean = 0;
526 txring->nr_free = RING_DESC_NR;
529 * Initiallize Transmit Descriptors
531 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE);
532 memset(txring->bufinf, 0,
533 sizeof(struct jme_buffer_info) * RING_DESC_NR);
539 jme_free_tx_resources(struct jme_adapter *jme)
542 struct jme_ring *txring = &(jme->txring[0]);
543 struct jme_buffer_info *txbi = txring->bufinf;
546 for(i = 0 ; i < RING_DESC_NR ; ++i) {
547 txbi = txring->bufinf + i;
549 dev_kfree_skb(txbi->skb);
557 dma_free_coherent(&(jme->pdev->dev),
562 txring->alloc = NULL;
564 txring->dmaalloc = 0;
567 txring->next_to_use = 0;
568 txring->next_to_clean = 0;
573 __always_inline static void
574 jme_enable_tx_engine(struct jme_adapter *jme)
579 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
582 * Setup TX Queue 0 DMA Bass Address
584 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
585 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
586 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
589 * Setup TX Descptor Count
591 jwrite32(jme, JME_TXQDC, RING_DESC_NR);
597 jwrite32(jme, JME_TXCS, jme->reg_txcs |
603 __always_inline static void
604 jme_disable_tx_engine(struct jme_adapter *jme)
612 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
614 val = jread32(jme, JME_TXCS);
615 for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
618 val = jread32(jme, JME_TXCS);
622 jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
628 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
630 struct jme_ring *rxring = jme->rxring;
631 register volatile struct rxdesc* rxdesc = rxring->desc;
632 struct jme_buffer_info *rxbi = rxring->bufinf;
638 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
639 rxdesc->desc1.bufaddrl = cpu_to_le32(
640 (__u64)rxbi->mapping & 0xFFFFFFFFUL);
641 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len);
642 if(jme->dev->features & NETIF_F_HIGHDMA)
643 rxdesc->desc1.flags = RXFLAG_64BIT;
645 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
649 jme_make_new_rx_buf(struct jme_adapter *jme, int i)
651 struct jme_ring *rxring = &(jme->rxring[0]);
652 struct jme_buffer_info *rxbi = rxring->bufinf;
653 unsigned long offset;
656 skb = netdev_alloc_skb(jme->dev, RX_BUF_ALLOC_SIZE);
660 if(unlikely(skb_is_nonlinear(skb))) {
661 dprintk(jme->dev->name,
662 "Allocated skb fragged(%d).\n",
663 skb_shinfo(skb)->nr_frags);
669 (unsigned long)(skb->data)
670 & (unsigned long)(RX_BUF_DMA_ALIGN - 1)))
671 skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
675 rxbi->len = skb_tailroom(skb);
676 rxbi->mapping = pci_map_single(jme->pdev,
685 jme_free_rx_buf(struct jme_adapter *jme, int i)
687 struct jme_ring *rxring = &(jme->rxring[0]);
688 struct jme_buffer_info *rxbi = rxring->bufinf;
692 pci_unmap_single(jme->pdev,
696 dev_kfree_skb(rxbi->skb);
704 jme_free_rx_resources(struct jme_adapter *jme)
707 struct jme_ring *rxring = &(jme->rxring[0]);
710 for(i = 0 ; i < RING_DESC_NR ; ++i)
711 jme_free_rx_buf(jme, i);
713 dma_free_coherent(&(jme->pdev->dev),
717 rxring->alloc = NULL;
719 rxring->dmaalloc = 0;
722 rxring->next_to_use = 0;
723 rxring->next_to_clean = 0;
727 jme_setup_rx_resources(struct jme_adapter *jme)
730 struct jme_ring *rxring = &(jme->rxring[0]);
732 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
738 rxring->dmaalloc = 0;
746 rxring->desc = (void*)ALIGN((unsigned long)(rxring->alloc),
748 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
749 rxring->next_to_use = 0;
750 rxring->next_to_clean = 0;
753 * Initiallize Receive Descriptors
755 for(i = 0 ; i < RING_DESC_NR ; ++i) {
756 if(unlikely(jme_make_new_rx_buf(jme, i))) {
757 jme_free_rx_resources(jme);
761 jme_set_clean_rxdesc(jme, i);
767 __always_inline static void
768 jme_enable_rx_engine(struct jme_adapter *jme)
771 * Setup RX DMA Bass Address
773 jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
774 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
775 jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
778 * Setup RX Descptor Count
780 jwrite32(jme, JME_RXQDC, RING_DESC_NR);
783 * Setup Unicast Filter
785 jme->reg_rxmcs = RXMCS_VTAGRM | RXMCS_PREPAD;
786 jme_set_multi(jme->dev);
792 jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
798 __always_inline static void
799 jme_restart_rx_engine(struct jme_adapter *jme)
804 jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
811 __always_inline static void
812 jme_disable_rx_engine(struct jme_adapter *jme)
820 val = jread32(jme, JME_RXCS);
822 jwrite32(jme, JME_RXCS, val);
824 val = jread32(jme, JME_RXCS);
825 for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
828 val = jread32(jme, JME_RXCS);
832 jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
837 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
839 if(dpi->attempt == atmp) {
849 jme_dynamic_pcc(struct jme_adapter *jme)
851 register struct dynpcc_info *dpi = &(jme->dpi);
853 if(jiffies >= dpi->check_point) {
854 if(jiffies > (dpi->check_point + PCC_INTERVAL))
855 jme_attempt_pcc(dpi, PCC_P1);
856 else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
858 jme_attempt_pcc(dpi, PCC_P3);
859 else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
861 jme_attempt_pcc(dpi, PCC_P2);
863 jme_attempt_pcc(dpi, PCC_P1);
865 if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
866 jme_set_rx_pcc(jme, dpi->attempt);
867 dpi->cur = dpi->attempt;
871 dpi->last_bytes = NET_STAT(jme).rx_bytes;
872 dpi->last_pkts = NET_STAT(jme).rx_packets;
873 dpi->check_point = jiffies + PCC_INTERVAL;
878 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
880 struct jme_ring *rxring = &(jme->rxring[0]);
881 volatile struct rxdesc *rxdesc = rxring->desc;
882 struct jme_buffer_info *rxbi = rxring->bufinf;
890 pci_dma_sync_single_for_cpu(jme->pdev,
895 if(unlikely(jme_make_new_rx_buf(jme, idx))) {
896 pci_dma_sync_single_for_device(jme->pdev,
901 ++(NET_STAT(jme).rx_dropped);
904 framesize = le16_to_cpu(rxdesc->descwb.framesize)
907 skb_reserve(skb, RX_PREPAD_SIZE);
908 skb_put(skb, framesize);
909 skb->protocol = eth_type_trans(skb, jme->dev);
913 if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
914 ++(NET_STAT(jme).multicast);
916 jme->dev->last_rx = jiffies;
917 NET_STAT(jme).rx_bytes += framesize;
918 ++(NET_STAT(jme).rx_packets);
921 jme_set_clean_rxdesc(jme, idx);
923 jme_dynamic_pcc(jme);
928 jme_process_receive(struct jme_adapter *jme, int limit)
930 struct jme_ring *rxring = &(jme->rxring[0]);
931 volatile struct rxdesc *rxdesc = rxring->desc;
932 int i, j, ccnt, desccnt;
934 i = rxring->next_to_clean;
937 rxdesc = rxring->desc;
940 if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
941 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
944 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
946 rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
949 rxdesc->descwb.errstat & RXWBERR_ALLERR) {
951 if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
952 ++(NET_STAT(jme).rx_crc_errors);
953 else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
954 ++(NET_STAT(jme).rx_fifo_errors);
956 ++(NET_STAT(jme).rx_errors);
959 limit -= desccnt - 1;
961 for(j = i, ccnt = desccnt ; ccnt-- ; ) {
962 jme_set_clean_rxdesc(jme, j);
964 if(unlikely(++j == RING_DESC_NR))
970 jme_alloc_and_feed_skb(jme, i);
974 if((i += desccnt) >= RING_DESC_NR)
979 rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
980 rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
981 (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
984 rxring->next_to_clean = i;
986 return limit > 0 ? limit : 0;
991 jme_link_change_tasklet(unsigned long arg)
993 struct jme_adapter *jme = (struct jme_adapter*)arg;
994 struct net_device *netdev = jme->dev;
995 int timeout = WAIT_TASKLET_TIMEOUT;
998 if(!atomic_dec_and_test(&jme->link_changing))
1001 if(jme_check_link(netdev, 1))
1004 netif_stop_queue(netdev);
1006 while(--timeout > 0 &&
1008 atomic_read(&jme->rx_cleaning) != 1 ||
1009 atomic_read(&jme->tx_cleaning) != 1
1015 if(netif_carrier_ok(netdev)) {
1016 jme_reset_mac_processor(jme);
1017 jme_free_rx_resources(jme);
1018 jme_free_tx_resources(jme);
1021 jme_check_link(netdev, 0);
1022 if(netif_carrier_ok(netdev)) {
1023 rc = jme_setup_rx_resources(jme);
1025 jeprintk(netdev->name,
1026 "Allocating resources for RX error"
1027 ", Device STOPPED!\n");
1032 rc = jme_setup_tx_resources(jme);
1034 jeprintk(netdev->name,
1035 "Allocating resources for TX error"
1036 ", Device STOPPED!\n");
1037 goto err_out_free_rx_resources;
1040 jme_enable_rx_engine(jme);
1041 jme_enable_tx_engine(jme);
1043 netif_start_queue(netdev);
1048 err_out_free_rx_resources:
1049 jme_free_rx_resources(jme);
1051 atomic_inc(&jme->link_changing);
1055 jme_rx_clean_tasklet(unsigned long arg)
1057 struct jme_adapter *jme = (struct jme_adapter*)arg;
1059 if(!atomic_dec_and_test(&jme->rx_cleaning))
1062 if(atomic_read(&jme->link_changing) != 1)
1065 if(unlikely(netif_queue_stopped(jme->dev)))
1068 jme_process_receive(jme, RING_DESC_NR);
1071 atomic_inc(&jme->rx_cleaning);
1075 jme_rx_empty_tasklet(unsigned long arg)
1077 struct jme_adapter *jme = (struct jme_adapter*)arg;
1079 if(atomic_read(&jme->link_changing) != 1)
1082 if(unlikely(netif_queue_stopped(jme->dev)))
1085 jme_rx_clean_tasklet(arg);
1086 jme_restart_rx_engine(jme);
1090 jme_tx_clean_tasklet(unsigned long arg)
1092 struct jme_adapter *jme = (struct jme_adapter*)arg;
1093 struct jme_ring *txring = &(jme->txring[0]);
1094 volatile struct txdesc *txdesc = txring->desc;
1095 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1096 int i, j, cnt = 0, max;
1098 if(!atomic_dec_and_test(&jme->tx_cleaning))
1101 if(atomic_read(&jme->link_changing) != 1)
1104 if(unlikely(netif_queue_stopped(jme->dev)))
1107 spin_lock(&jme->tx_lock);
1108 max = RING_DESC_NR - txring->nr_free;
1109 spin_unlock(&jme->tx_lock);
1111 tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
1113 for(i = txring->next_to_clean ; cnt < max ; ) {
1117 if(ctxbi->skb && !(txdesc[i].desc1.flags & TXFLAG_OWN)) {
1119 tx_dbg(jme->dev->name,
1120 "Tx Tasklet: Clean %d+%d\n",
1123 for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
1124 ttxbi = txbi + ((i + j) & (RING_DESC_NR - 1));
1125 txdesc[(i+j)&(RING_DESC_NR-1)].dw[0] = 0;
1127 pci_unmap_single(jme->pdev,
1132 NET_STAT(jme).tx_bytes += ttxbi->len;
1137 dev_kfree_skb(ctxbi->skb);
1140 cnt += ctxbi->nr_desc;
1142 ++(NET_STAT(jme).tx_packets);
1146 tx_dbg(jme->dev->name,
1148 " Stoped due to no skb.\n");
1150 tx_dbg(jme->dev->name,
1152 "Stoped due to not done.\n");
1156 if(unlikely((i += ctxbi->nr_desc) >= RING_DESC_NR))
1162 tx_dbg(jme->dev->name,
1163 "Tx Tasklet: Stop %d Jiffies %lu\n",
1165 txring->next_to_clean = i;
1167 spin_lock(&jme->tx_lock);
1168 txring->nr_free += cnt;
1169 spin_unlock(&jme->tx_lock);
1172 atomic_inc(&jme->tx_cleaning);
1176 jme_intr(int irq, void *dev_id)
1178 struct net_device *netdev = dev_id;
1179 struct jme_adapter *jme = netdev_priv(netdev);
1180 irqreturn_t rc = IRQ_HANDLED;
1184 pci_dma_sync_single_for_cpu(jme->pdev,
1186 sizeof(__u32) * SHADOW_REG_NR,
1187 PCI_DMA_FROMDEVICE);
1188 intrstat = jme->shadow_regs[SHADOW_IEVE];
1189 jme->shadow_regs[SHADOW_IEVE] = 0;
1191 intrstat = jread32(jme, JME_IEVE);
1195 * Check if it's really an interrupt for us
1203 * Check if the device still exist
1205 if(unlikely(intrstat == ~((typeof(intrstat))0))) {
1211 * Allow one interrupt handling at a time
1213 if(unlikely(!atomic_dec_and_test(&jme->intr_sem)))
1219 jwrite32f(jme, JME_IENC, INTR_ENABLE);
1221 if(intrstat & INTR_LINKCH) {
1222 tasklet_schedule(&jme->linkch_task);
1226 if(intrstat & INTR_RX0EMP)
1227 tasklet_schedule(&jme->rxempty_task);
1229 if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
1230 tasklet_schedule(&jme->rxclean_task);
1232 if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
1233 tasklet_schedule(&jme->txclean_task);
1235 if((intrstat & ~INTR_ENABLE) != 0) {
1237 * Some interrupt not handled
1238 * but not enabled also (for debug)
1244 * Deassert interrupts
1246 jwrite32f(jme, JME_IEVE, intrstat);
1249 * Re-enable interrupt
1251 jwrite32f(jme, JME_IENS, INTR_ENABLE);
1255 * Enable next interrupt handling
1257 atomic_inc(&jme->intr_sem);
1264 jme_restart_an(struct jme_adapter *jme)
1268 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1269 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1270 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1274 jme_open(struct net_device *netdev)
1276 struct jme_adapter *jme = netdev_priv(netdev);
1277 int rc, timeout = 100;
1282 atomic_read(&jme->link_changing) != 1 ||
1283 atomic_read(&jme->rx_cleaning) != 1 ||
1284 atomic_read(&jme->tx_cleaning) != 1
1289 jme_reset_mac_processor(jme);
1291 rc = request_irq(jme->pdev->irq, jme_intr,
1292 IRQF_SHARED, netdev->name, netdev);
1294 printk(KERN_ERR PFX "Requesting IRQ error.\n");
1297 jme_enable_shadow(jme);
1299 jme_restart_an(jme);
1304 netif_stop_queue(netdev);
1305 netif_carrier_off(netdev);
1310 jme_close(struct net_device *netdev)
1312 struct jme_adapter *jme = netdev_priv(netdev);
1314 netif_stop_queue(netdev);
1315 netif_carrier_off(netdev);
1318 jme_disable_shadow(jme);
1319 free_irq(jme->pdev->irq, jme->dev);
1321 tasklet_kill(&jme->linkch_task);
1322 tasklet_kill(&jme->txclean_task);
1323 tasklet_kill(&jme->rxclean_task);
1324 tasklet_kill(&jme->rxempty_task);
1325 jme_disable_rx_engine(jme);
1326 jme_disable_tx_engine(jme);
1327 jme_free_rx_resources(jme);
1328 jme_free_tx_resources(jme);
1334 * This function is already protected by netif_tx_lock()
1337 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1339 struct jme_adapter *jme = netdev_priv(netdev);
1342 if(unlikely(netif_queue_stopped(jme->dev)))
1343 return NETDEV_TX_BUSY;
1345 rc = jme_set_new_txdesc(jme, skb);
1347 if(unlikely(rc != NETDEV_TX_OK))
1350 jwrite32(jme, JME_TXCS, jme->reg_txcs |
1351 TXCS_SELECT_QUEUE0 |
1354 netdev->trans_start = jiffies;
1356 return NETDEV_TX_OK;
1360 jme_set_macaddr(struct net_device *netdev, void *p)
1362 struct jme_adapter *jme = netdev_priv(netdev);
1363 struct sockaddr *addr = p;
1366 if(netif_running(netdev))
1369 spin_lock(&jme->macaddr_lock);
1370 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1372 val = addr->sa_data[3] << 24 |
1373 addr->sa_data[2] << 16 |
1374 addr->sa_data[1] << 8 |
1376 jwrite32(jme, JME_RXUMA_LO, val);
1377 val = addr->sa_data[5] << 8 |
1379 jwrite32(jme, JME_RXUMA_HI, val);
1380 spin_unlock(&jme->macaddr_lock);
1386 jme_set_multi(struct net_device *netdev)
1388 struct jme_adapter *jme = netdev_priv(netdev);
1389 u32 mc_hash[2] = {};
1393 val = jme->reg_rxmcs | RXMCS_BRDFRAME | RXMCS_UNIFRAME;
1395 if (netdev->flags & IFF_PROMISC) {
1396 val |= RXMCS_ALLFRAME;
1398 else if (netdev->flags & IFF_ALLMULTI) {
1399 val |= RXMCS_ALLMULFRAME;
1401 else if(netdev->flags & IFF_MULTICAST) {
1402 struct dev_mc_list *mclist;
1405 val |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
1406 for (i = 0, mclist = netdev->mc_list;
1407 mclist && i < netdev->mc_count;
1408 ++i, mclist = mclist->next) {
1410 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1411 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
1414 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1415 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
1420 jwrite32(jme, JME_RXMCS, val);
1424 jme_change_mtu(struct net_device *dev, int new_mtu)
1427 * Not supporting MTU change for now.
1433 jme_get_drvinfo(struct net_device *netdev,
1434 struct ethtool_drvinfo *info)
1436 struct jme_adapter *jme = netdev_priv(netdev);
1438 strcpy(info->driver, DRV_NAME);
1439 strcpy(info->version, DRV_VERSION);
1440 strcpy(info->bus_info, pci_name(jme->pdev));
1444 jme_get_settings(struct net_device *netdev,
1445 struct ethtool_cmd *ecmd)
1447 struct jme_adapter *jme = netdev_priv(netdev);
1449 spin_lock(&jme->phy_lock);
1450 rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
1451 spin_unlock(&jme->phy_lock);
1456 jme_set_settings(struct net_device *netdev,
1457 struct ethtool_cmd *ecmd)
1459 struct jme_adapter *jme = netdev_priv(netdev);
1461 unsigned long flags;
1463 spin_lock_irqsave(&jme->phy_lock, flags);
1464 rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
1465 spin_unlock_irqrestore(&jme->phy_lock, flags);
1471 jme_get_link(struct net_device *netdev)
1473 struct jme_adapter *jme = netdev_priv(netdev);
1474 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
1477 static const struct ethtool_ops jme_ethtool_ops = {
1478 .get_drvinfo = jme_get_drvinfo,
1479 .get_settings = jme_get_settings,
1480 .set_settings = jme_set_settings,
1481 .get_link = jme_get_link,
1485 jme_pci_dma64(struct pci_dev *pdev)
1487 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
1488 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1491 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
1492 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
1498 static int __devinit
1499 jme_init_one(struct pci_dev *pdev,
1500 const struct pci_device_id *ent)
1502 int rc = 0, using_dac;
1503 struct net_device *netdev;
1504 struct jme_adapter *jme;
1507 * set up PCI device basics
1509 rc = pci_enable_device(pdev);
1511 printk(KERN_ERR PFX "Cannot enable PCI device.\n");
1515 using_dac = jme_pci_dma64(pdev);
1517 printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
1519 goto err_out_disable_pdev;
1522 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1523 printk(KERN_ERR PFX "No PCI resource region found.\n");
1525 goto err_out_disable_pdev;
1528 rc = pci_request_regions(pdev, DRV_NAME);
1530 printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
1531 goto err_out_disable_pdev;
1534 pci_set_master(pdev);
1537 * alloc and init net device
1539 netdev = alloc_etherdev(sizeof(*jme));
1542 goto err_out_release_regions;
1544 netdev->open = jme_open;
1545 netdev->stop = jme_close;
1546 netdev->hard_start_xmit = jme_start_xmit;
1547 netdev->irq = pdev->irq;
1548 netdev->set_mac_address = jme_set_macaddr;
1549 netdev->set_multicast_list = jme_set_multi;
1550 netdev->change_mtu = jme_change_mtu;
1551 netdev->ethtool_ops = &jme_ethtool_ops;
1552 NETDEV_GET_STATS(netdev, &jme_get_stats);
1555 netdev->features = NETIF_F_HIGHDMA;
1557 SET_NETDEV_DEV(netdev, &pdev->dev);
1558 pci_set_drvdata(pdev, netdev);
1563 jme = netdev_priv(netdev);
1566 jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
1568 jme->regs = ioremap(pci_resource_start(pdev, 0),
1569 pci_resource_len(pdev, 0));
1572 goto err_out_free_netdev;
1574 jme->shadow_regs = pci_alloc_consistent(pdev,
1575 sizeof(__u32) * SHADOW_REG_NR,
1576 &(jme->shadow_dma));
1577 if (!(jme->shadow_regs)) {
1582 spin_lock_init(&jme->tx_lock);
1583 spin_lock_init(&jme->phy_lock);
1584 spin_lock_init(&jme->macaddr_lock);
1586 atomic_set(&jme->intr_sem, 1);
1587 atomic_set(&jme->link_changing, 1);
1588 atomic_set(&jme->rx_cleaning, 1);
1589 atomic_set(&jme->tx_cleaning, 1);
1591 tasklet_init(&jme->linkch_task,
1592 &jme_link_change_tasklet,
1593 (unsigned long) jme);
1594 tasklet_init(&jme->txclean_task,
1595 &jme_tx_clean_tasklet,
1596 (unsigned long) jme);
1597 tasklet_init(&jme->rxclean_task,
1598 &jme_rx_clean_tasklet,
1599 (unsigned long) jme);
1600 tasklet_init(&jme->rxempty_task,
1601 &jme_rx_empty_tasklet,
1602 (unsigned long) jme);
1603 jme->mii_if.dev = netdev;
1604 jme->mii_if.phy_id = 1;
1605 jme->mii_if.supports_gmii = 1;
1606 jme->mii_if.mdio_read = jme_mdio_read;
1607 jme->mii_if.mdio_write = jme_mdio_write;
1610 * Get Max Read Req Size from PCI Config Space
1612 pci_read_config_byte(pdev, PCI_CONF_DCSR_MRRS, &jme->mrrs);
1615 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
1618 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
1621 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
1627 * Reset MAC processor and reload EEPROM for MAC Address
1630 jme_reset_phy_processor(jme);
1631 jme_reset_mac_processor(jme);
1632 rc = jme_reload_eeprom(jme);
1635 "Rload eeprom for reading MAC Address error.\n");
1636 goto err_out_free_shadow;
1638 jme_load_macaddr(netdev);
1642 * Tell stack that we are not ready to work until open()
1644 netif_carrier_off(netdev);
1645 netif_stop_queue(netdev);
1650 rc = register_netdev(netdev);
1652 printk(KERN_ERR PFX "Cannot register net device.\n");
1653 goto err_out_free_shadow;
1656 jprintk(netdev->name,
1657 "JMC250 gigabit eth at %llx, "
1658 "%02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n",
1659 (unsigned long long) pci_resource_start(pdev, 0),
1660 netdev->dev_addr[0],
1661 netdev->dev_addr[1],
1662 netdev->dev_addr[2],
1663 netdev->dev_addr[3],
1664 netdev->dev_addr[4],
1665 netdev->dev_addr[5],
1670 err_out_free_shadow:
1671 pci_free_consistent(pdev,
1672 sizeof(__u32) * SHADOW_REG_NR,
1677 err_out_free_netdev:
1678 pci_set_drvdata(pdev, NULL);
1679 free_netdev(netdev);
1680 err_out_release_regions:
1681 pci_release_regions(pdev);
1682 err_out_disable_pdev:
1683 pci_disable_device(pdev);
1688 static void __devexit
1689 jme_remove_one(struct pci_dev *pdev)
1691 struct net_device *netdev = pci_get_drvdata(pdev);
1692 struct jme_adapter *jme = netdev_priv(netdev);
1694 unregister_netdev(netdev);
1695 pci_free_consistent(pdev,
1696 sizeof(__u32) * SHADOW_REG_NR,
1700 pci_set_drvdata(pdev, NULL);
1701 free_netdev(netdev);
1702 pci_release_regions(pdev);
1703 pci_disable_device(pdev);
1707 static struct pci_device_id jme_pci_tbl[] = {
1708 { PCI_VDEVICE(JMICRON, 0x250) },
1712 static struct pci_driver jme_driver = {
1714 .id_table = jme_pci_tbl,
1715 .probe = jme_init_one,
1716 .remove = __devexit_p(jme_remove_one),
1719 .suspend = jme_suspend,
1720 .resume = jme_resume,
1721 #endif /* CONFIG_PM */
1726 jme_init_module(void)
1728 printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
1729 "driver version %s\n", DRV_VERSION);
1730 return pci_register_driver(&jme_driver);
1734 jme_cleanup_module(void)
1736 pci_unregister_driver(&jme_driver);
1739 module_init(jme_init_module);
1740 module_exit(jme_cleanup_module);
1742 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
1743 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
1744 MODULE_LICENSE("GPL");
1745 MODULE_VERSION(DRV_VERSION);
1746 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);