* Copyright 2008 JMicron Technology Corporation
* http://www.jmicron.com/
*
+ * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
/*
- * TODO before release:
- * 1. Use sk_buff for dma buffer with pci_map_single,
- * and handle scattered sk_buffs (Reduce memory copy)
- * 2. Try setting 64bit DMA with pci_set[_consistent]_dma_mask
- * and set netdev feature flag.
- * 3. Implement Power Managemt related functions.
- * 4. Implement checksum offloading, VLAN offloading,
- * TCP Segement offloading.
- * 5. Implement Jumboframe.
- * 6. Implement NAPI option for user.
- * 7. Implement MSI / MSI-X.
- * 8. Implement PCC.
- * 9. Implement QoS according to "priority" attribute in sk_buff
- * with 8 TX priority queue provided by hardware.
- * 10.Cleanup/re-orginize code, performence tuneing(alignment etc...).
+ * Note:
+ * Watchdog:
+ * check if rx queue stoped.
+ * And restart it after rx ring cleaned.
*/
+/*
+ * Timeline before release:
+ * Stage 2: Error handling.
+ * - Wathch dog
+ * - Transmit timeout
+ *
+ * Stage 3: Basic offloading support.
+ * - Use pci_map_page on scattered sk_buff for HIGHMEM support
+ * - Implement scatter-gather offloading.
+ * A system page per RX (buffer|descriptor)?
+ * Handle fraged sk_buff to TX descriptors.
+ * - Implement tx/rx ipv6/ip/tcp/udp checksum offloading
+ *
+ * Stage 4: Basic feature support.
+ * - Implement Power Managemt related functions.
+ * - Implement Jumboframe.
+ * - Implement MSI.
+ *
+ * Stage 5: Advanced offloading support.
+ * - Implement VLAN offloading.
+ * - Implement TCP Segement offloading.
+ *
+ * Stage 6: CPU Load balancing.
+ * - Implement MSI-X.
+ * Along with multiple RX queue, for CPU load balancing.
+ *
+ * Stage 7:
+ * - Use NAPI instead of rx_tasklet?
+ * PCC Support Both Packet Counter and Timeout Interrupt for
+ * receive and transmit complete, does NAPI really needed?
+ * - Cleanup/re-orginize code, performence tuneing(alignment etc...).
+ * - Test and Release 1.0
+ */
+
+#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/crc32.h>
+#include <linux/delay.h>
#include "jme.h"
-static int jme_mdio_read(struct net_device *netdev, int phy, int reg)
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
+static struct net_device_stats *
+jme_get_stats(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ return &jme->stats;
+}
+#endif
+
+static int
+jme_mdio_read(struct net_device *netdev, int phy, int reg)
{
struct jme_adapter *jme = netdev_priv(netdev);
int i, val;
jwrite32(jme, JME_SMI, SMI_OP_REQ |
- smi_phy_addr(phy) |
- smi_reg_addr(reg));
+ smi_phy_addr(phy) |
+ smi_reg_addr(reg));
wmb();
for (i = JME_PHY_TIMEOUT; i > 0; --i) {
- udelay(1);
- if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
- break;
+ udelay(1);
+ if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
+ break;
}
if (i == 0) {
- dprintk("phy read timeout : %d\n", reg);
- return (0);
+ jeprintk(netdev->name, "phy read timeout : %d\n", reg);
+ return 0;
}
- return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
+ return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
}
-static void jme_mdio_write(struct net_device *netdev, int phy, int reg, int val)
+static void
+jme_mdio_write(struct net_device *netdev,
+ int phy, int reg, int val)
{
struct jme_adapter *jme = netdev_priv(netdev);
int i;
- jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
- ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
- smi_phy_addr(phy) | smi_reg_addr(reg));
+ jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
+ ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
+ smi_phy_addr(phy) | smi_reg_addr(reg));
wmb();
- for (i = JME_PHY_TIMEOUT; i > 0; --i)
- {
- udelay(1);
- if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
- break;
- }
+ for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
+ udelay(1);
+ if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
+ break;
+ }
- if (i == 0)
- dprintk("phy write timeout : %d\n", reg);
+ if (i == 0)
+ jeprintk(netdev->name, "phy write timeout : %d\n", reg);
- return;
+ return;
}
-static void jme_reset_mac_processor(struct jme_adapter *jme)
+__always_inline static void
+jme_reset_phy_processor(struct jme_adapter *jme)
{
__u32 val;
- val = jread32(jme, JME_GHC);
- val |= GHC_SWRST;
- jwrite32(jme, JME_GHC, val);
+ jme_mdio_write(jme->dev,
+ jme->mii_if.phy_id,
+ MII_ADVERTISE, ADVERTISE_ALL);
+
+ jme_mdio_write(jme->dev,
+ jme->mii_if.phy_id,
+ MII_CTRL1000,
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+
+ val = jme_mdio_read(jme->dev,
+ jme->mii_if.phy_id,
+ MII_BMCR);
+
+ jme_mdio_write(jme->dev,
+ jme->mii_if.phy_id,
+ MII_BMCR, val | BMCR_RESET);
+
+ return;
+}
+
+
+__always_inline static void
+jme_reset_mac_processor(struct jme_adapter *jme)
+{
+ jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
udelay(2);
- val &= ~GHC_SWRST;
- jwrite32(jme, JME_GHC, val);
- jwrite32(jme, JME_RXMCHT, 0x00000000);
- jwrite32(jme, JME_RXMCHT+4, 0x00000000);
+ jwrite32(jme, JME_GHC, jme->reg_ghc);
+ jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
+ jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
jwrite32(jme, JME_WFODP, 0);
jwrite32(jme, JME_WFOI, 0);
+ jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
+ jwrite32(jme, JME_GPREG1, 0);
}
-__always_inline static void jme_clear_pm(struct jme_adapter *jme)
+__always_inline static void
+jme_clear_pm(struct jme_adapter *jme)
{
jwrite32(jme, JME_PMCS, 0xFFFF0000);
+ pci_set_power_state(jme->pdev, PCI_D0);
}
-static int jme_reload_eeprom(struct jme_adapter *jme)
+static int
+jme_reload_eeprom(struct jme_adapter *jme)
{
__u32 val;
int i;
}
if(i == 0) {
- dprintk("eeprom reload timeout\n");
+ jeprintk(jme->dev->name, "eeprom reload timeout\n");
return -EIO;
}
}
else
return -EIO;
-
+
return 0;
}
-__always_inline static void jme_load_macaddr(struct net_device *netdev)
+static void
+jme_load_macaddr(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
unsigned char macaddr[6];
__u32 val;
- val = jread32(jme, JME_RXUMA);
+ spin_lock(&jme->macaddr_lock);
+ val = jread32(jme, JME_RXUMA_LO);
macaddr[0] = (val >> 0) & 0xFF;
macaddr[1] = (val >> 8) & 0xFF;
macaddr[2] = (val >> 16) & 0xFF;
macaddr[3] = (val >> 24) & 0xFF;
- val = jread32(jme, JME_RXUMA+4);
+ val = jread32(jme, JME_RXUMA_HI);
macaddr[4] = (val >> 0) & 0xFF;
macaddr[5] = (val >> 8) & 0xFF;
memcpy(netdev->dev_addr, macaddr, 6);
+ spin_unlock(&jme->macaddr_lock);
+}
+
+__always_inline static void
+jme_set_rx_pcc(struct jme_adapter *jme, int p)
+{
+ switch(p) {
+ case PCC_P1:
+ jwrite32(jme, JME_PCCRX0,
+ ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+ ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+ break;
+ case PCC_P2:
+ jwrite32(jme, JME_PCCRX0,
+ ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+ ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+ break;
+ case PCC_P3:
+ jwrite32(jme, JME_PCCRX0,
+ ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+ ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+ break;
+ default:
+ break;
+ }
+
+ dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
}
-__always_inline static void jme_start_irq(struct jme_adapter *jme)
+static void
+jme_start_irq(struct jme_adapter *jme)
{
+ register struct dynpcc_info *dpi = &(jme->dpi);
+
+ jme_set_rx_pcc(jme, PCC_P1);
+
+ dpi->check_point = jiffies + PCC_INTERVAL;
+ dpi->last_bytes = NET_STAT(jme).rx_bytes;
+ dpi->last_pkts = NET_STAT(jme).rx_packets;
+ dpi->cur = PCC_P1;
+ dpi->attempt = PCC_P1;
+ dpi->cnt = 0;
+
+ jwrite32(jme, JME_PCCTX,
+ ((60000 << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
+ ((8 << PCCTX_SHIFT) & PCCTX_MASK) |
+ PCCTXQ0_EN
+ );
+
/*
* Enable Interrupts
*/
jwrite32(jme, JME_IENS, INTR_ENABLE);
}
-__always_inline static void jme_stop_irq(struct jme_adapter *jme)
+__always_inline static void
+jme_stop_irq(struct jme_adapter *jme)
{
/*
* Disable Interrupts
jwrite32(jme, JME_IENC, INTR_ENABLE);
}
-__always_inline static void jme_check_link(struct net_device *netdev)
+
+__always_inline static void
+jme_enable_shadow(struct jme_adapter *jme)
+{
+ jwrite32(jme,
+ JME_SHBA_LO,
+ ((__u32)jme->shadow_dma & ~((__u32)0x1F)) | SHBA_POSTEN);
+}
+
+__always_inline static void
+jme_disable_shadow(struct jme_adapter *jme)
+{
+ jwrite32(jme, JME_SHBA_LO, 0x0);
+}
+
+static int
+jme_check_link(struct net_device *netdev, int testonly)
{
struct jme_adapter *jme = netdev_priv(netdev);
- __u32 phylink, ghc, cnt = JME_AUTONEG_TIMEOUT;
+ __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT;
char linkmsg[32];
+ int rc = 0;
phylink = jread32(jme, JME_PHY_LINK);
/*
* Keep polling for autoneg complete
*/
- while(!(phylink & PHY_LINK_AUTONEG_COMPLETE) && --cnt > 0) {
- mdelay(1);
+ while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && --cnt > 0) {
+ udelay(1);
phylink = jread32(jme, JME_PHY_LINK);
}
+ if(jme->phylink == phylink) {
+ rc = 1;
+ goto out;
+ }
+ if(testonly)
+ goto out;
+
+ jme->phylink = phylink;
+
if(!cnt)
- printk(KERN_ERR "Waiting autoneg timeout.\n");
+ jeprintk(netdev->name,
+ "Waiting speed resolve timeout.\n");
+
+ if(!(phylink & PHY_LINK_AUTONEG_COMPLETE))
+ jprintk(netdev->name,
+ "Link partener does not support AN.\n");
switch(phylink & PHY_LINK_SPEED_MASK) {
case PHY_LINK_SPEED_10M:
break;
}
ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
- jwrite32(jme, JME_GHC, ghc);
+
strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
- "Full-Duplex" :
- "Half-Duplex");
+ "Full-Duplex, " :
+ "Half-Duplex, ");
+
+ if(phylink & PHY_LINK_MDI_STAT)
+ strcat(linkmsg, "MDI");
+ else
+ strcat(linkmsg, "MDI-X");
if(phylink & PHY_LINK_DUPLEX)
jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
else
jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
- TXMCS_BACKOFF |
- TXMCS_CARRIERSENSE |
- TXMCS_COLLISION);
+ TXMCS_BACKOFF |
+ TXMCS_CARRIERSENSE |
+ TXMCS_COLLISION);
+
+ jme->reg_ghc = ghc;
+ jwrite32(jme, JME_GHC, ghc);
- jprintk("Link is up at %s.\n", linkmsg);
+ jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
netif_carrier_on(netdev);
}
else {
- jprintk("Link is down.\n");
+ if(testonly)
+ goto out;
+
+ jprintk(netdev->name, "Link is down.\n");
+ jme->phylink = 0;
netif_carrier_off(netdev);
}
+
+out:
+ return rc;
}
-__always_inline static void jme_set_new_txdesc(struct jme_adapter *jme,
- int i, int framesize)
+
+static int
+jme_alloc_txdesc(struct jme_adapter *jme,
+ int nr_alloc)
{
struct jme_ring *txring = jme->txring;
- struct TxDesc* txdesc = txring->desc;
-
- memset(txdesc + i, 0, TX_DESC_SIZE);
- txdesc[i].desc1.bufaddr = cpu_to_le32(ALIGN(txring->buf_dma[i], 8));
- txdesc[i].desc1.datalen = cpu_to_le16(TX_BUF_SIZE);
- txdesc[i].desc1.pktsize = cpu_to_le16(framesize);
+ int idx;
+
+ idx = txring->next_to_use;
+
+ if(unlikely(txring->nr_free < nr_alloc))
+ return -1;
+
+ spin_lock(&jme->tx_lock);
+ txring->nr_free -= nr_alloc;
+
+ if((txring->next_to_use += nr_alloc) >= RING_DESC_NR)
+ txring->next_to_use -= RING_DESC_NR;
+ spin_unlock(&jme->tx_lock);
+
+ return idx;
+}
+
+static int
+jme_set_new_txdesc(struct jme_adapter *jme,
+ struct sk_buff *skb)
+{
+ struct jme_ring *txring = jme->txring;
+ volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
+ struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+ dma_addr_t dmaaddr;
+ int i, idx, nr_desc;
+
+ nr_desc = 2;
+ idx = jme_alloc_txdesc(jme, nr_desc);
+
+ if(unlikely(idx<0))
+ return NETDEV_TX_BUSY;
+
+ for(i = 1 ; i < nr_desc ; ++i) {
+ ctxdesc = txdesc + ((idx + i) & (RING_DESC_NR-1));
+ ctxbi = txbi + ((idx + i) & (RING_DESC_NR-1));
+
+ dmaaddr = pci_map_single(jme->pdev,
+ skb->data,
+ skb->len,
+ PCI_DMA_TODEVICE);
+
+ pci_dma_sync_single_for_device(jme->pdev,
+ dmaaddr,
+ skb->len,
+ PCI_DMA_TODEVICE);
+
+ ctxdesc->dw[0] = 0;
+ ctxdesc->dw[1] = 0;
+ ctxdesc->desc2.flags = TXFLAG_OWN;
+ if(jme->dev->features & NETIF_F_HIGHDMA)
+ ctxdesc->desc2.flags |= TXFLAG_64BIT;
+ ctxdesc->desc2.datalen = cpu_to_le16(skb->len);
+ ctxdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
+ ctxdesc->desc2.bufaddrl = cpu_to_le32(
+ (__u64)dmaaddr & 0xFFFFFFFFUL);
+
+ ctxbi->mapping = dmaaddr;
+ ctxbi->len = skb->len;
+ }
+
+ ctxdesc = txdesc + idx;
+ ctxbi = txbi + idx;
+
+ ctxdesc->dw[0] = 0;
+ ctxdesc->dw[1] = 0;
+ ctxdesc->dw[2] = 0;
+ ctxdesc->dw[3] = 0;
+ ctxdesc->desc1.pktsize = cpu_to_le16(skb->len);
/*
* Set OWN bit at final.
- * When kernel transmit faster than NIC last packet sent,
- * and NIC tring to send this descriptor before we tell
+ * When kernel transmit faster than NIC.
+ * And NIC trying to send this descriptor before we tell
* it to start sending this TX queue.
* Other fields are already filled correctly.
*/
wmb();
- txdesc[i].desc1.flags = TXFLAG_OWN | TXFLAG_INT;
+ ctxdesc->desc1.flags = TXFLAG_OWN | TXFLAG_INT;
+ /*
+ * Set tx buffer info after telling NIC to send
+ * For better tx_clean timing
+ */
+ wmb();
+ ctxbi->nr_desc = nr_desc;
+ ctxbi->skb = skb;
- dprintk("TX Ring Buf Address(%08x,%08x,%d).\n",
- txring->buf_dma[i],
- (txdesc[i].all[12] << 0) |
- (txdesc[i].all[13] << 8) |
- (txdesc[i].all[14] << 16) |
- (txdesc[i].all[15] << 24),
- (txdesc[i].all[4] << 0) |
- (txdesc[i].all[5] << 8));
+ tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, nr_desc);
+ return 0;
}
-__always_inline static int jme_setup_tx_resources(struct jme_adapter *jme)
+static int
+jme_setup_tx_resources(struct jme_adapter *jme)
{
- int i;
struct jme_ring *txring = &(jme->txring[0]);
txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
TX_RING_ALLOC_SIZE,
- &(txring->dmaalloc),
- GFP_KERNEL);
- if(!txring->alloc)
+ &(txring->dmaalloc),
+ GFP_ATOMIC);
+
+ if(!txring->alloc) {
+ txring->desc = NULL;
+ txring->dmaalloc = 0;
+ txring->dma = 0;
return -ENOMEM;
+ }
/*
* 16 Bytes align
*/
- txring->desc = (void*)ALIGN((unsigned long)(txring->alloc), 16);
- txring->dma = ALIGN(txring->dmaalloc, 16);
+ txring->desc = (void*)ALIGN((unsigned long)(txring->alloc),
+ RING_DESC_ALIGN);
+ txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
txring->next_to_use = 0;
txring->next_to_clean = 0;
-
- dprintk("TX Ring Base Address(%08x,%08x).\n",
- (__u32)txring->desc,
- txring->dma);
+ txring->nr_free = RING_DESC_NR;
/*
* Initiallize Transmit Descriptors
*/
memset(txring->alloc, 0, TX_RING_ALLOC_SIZE);
- for(i = 0 ; i < RING_DESC_NR ; ++i) {
- txring->buf_virt[i] = dma_alloc_coherent(&(jme->pdev->dev),
- TX_BUF_ALLOC_SIZE,
- &(txring->buf_dma[i]),
- GFP_KERNEL);
- if(!txring->buf_virt[i])
- break;
- }
-
- /*
- * Cleanup allocated memories when error
- */
- if(i != RING_DESC_NR) {
- for(--i ; i >= 0 ; --i) {
- dma_free_coherent(&(jme->pdev->dev),
- TX_BUF_ALLOC_SIZE,
- txring->buf_virt[i],
- txring->buf_dma[i]);
- }
- dma_free_coherent(&(jme->pdev->dev),
- TX_RING_ALLOC_SIZE,
- txring->alloc,
- txring->dmaalloc);
- txring->alloc = NULL;
- txring->desc = NULL;
- txring->dmaalloc = 0;
- txring->dma = 0;
- return -ENOMEM;
- }
-
+ memset(txring->bufinf, 0,
+ sizeof(struct jme_buffer_info) * RING_DESC_NR);
return 0;
}
-__always_inline static void jme_free_tx_resources(struct jme_adapter *jme)
+static void
+jme_free_tx_resources(struct jme_adapter *jme)
{
int i;
struct jme_ring *txring = &(jme->txring[0]);
+ struct jme_buffer_info *txbi = txring->bufinf;
if(txring->alloc) {
for(i = 0 ; i < RING_DESC_NR ; ++i) {
- if(txring->buf_virt[i]) {
- dma_free_coherent(&(jme->pdev->dev),
- TX_BUF_ALLOC_SIZE,
- txring->buf_virt[i],
- txring->buf_dma[i]);
+ txbi = txring->bufinf + i;
+ if(txbi->skb) {
+ dev_kfree_skb(txbi->skb);
+ txbi->skb = NULL;
}
+ txbi->mapping = 0;
+ txbi->len = 0;
+ txbi->nr_desc = 0;
}
dma_free_coherent(&(jme->pdev->dev),
TX_RING_ALLOC_SIZE,
txring->alloc,
txring->dmaalloc);
- txring->alloc = NULL;
- txring->desc = NULL;
- txring->dmaalloc = 0;
- txring->dma = 0;
+
+ txring->alloc = NULL;
+ txring->desc = NULL;
+ txring->dmaalloc = 0;
+ txring->dma = 0;
}
- txring->next_to_use = 0;
- txring->next_to_clean = 0;
+ txring->next_to_use = 0;
+ txring->next_to_clean = 0;
+ txring->nr_free = 0;
}
-__always_inline static void jme_enable_tx_engine(struct jme_adapter *jme)
+__always_inline static void
+jme_enable_tx_engine(struct jme_adapter *jme)
{
/*
* Select Queue 0
/*
* Setup TX Queue 0 DMA Bass Address
*/
- jwrite32(jme, JME_TXDBA, jme->txring[0].dma);
- jwrite32(jme, JME_TXNDA, jme->txring[0].dma);
+ jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
+ jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
+ jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
/*
* Setup TX Descptor Count
* Enable TX Engine
*/
wmb();
- jwrite32(jme, JME_TXCS, TXCS_DEFAULT |
- TXCS_SELECT_QUEUE0 |
- TXCS_ENABLE);
+ jwrite32(jme, JME_TXCS, jme->reg_txcs |
+ TXCS_SELECT_QUEUE0 |
+ TXCS_ENABLE);
}
-__always_inline static void jme_disable_tx_engine(struct jme_adapter *jme)
+__always_inline static void
+jme_disable_tx_engine(struct jme_adapter *jme)
{
int i;
__u32 val;
/*
* Disable TX Engine
*/
- jwrite32(jme, JME_TXCS, TXCS_DEFAULT);
+ jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
val = jread32(jme, JME_TXCS);
for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
{
- udelay(1);
+ mdelay(1);
val = jread32(jme, JME_TXCS);
}
if(!i)
- printk(KERN_ERR "Disable TX engine timeout.\n");
+ jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
}
-__always_inline static void jme_set_clean_rxdesc(struct jme_adapter *jme,
- int i)
+static void
+jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
{
struct jme_ring *rxring = jme->rxring;
- struct RxDesc* rxdesc = rxring->desc;
-
- memset(rxdesc + i, 0, RX_DESC_SIZE);
- rxdesc[i].desc1.bufaddrl = cpu_to_le32(ALIGN(rxring->buf_dma[i], 8));
- rxdesc[i].desc1.datalen = cpu_to_le16(RX_BUF_SIZE);
+ register volatile struct rxdesc* rxdesc = rxring->desc;
+ struct jme_buffer_info *rxbi = rxring->bufinf;
+ rxdesc += i;
+ rxbi += i;
+
+ rxdesc->dw[0] = 0;
+ rxdesc->dw[1] = 0;
+ rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
+ rxdesc->desc1.bufaddrl = cpu_to_le32(
+ (__u64)rxbi->mapping & 0xFFFFFFFFUL);
+ rxdesc->desc1.datalen = cpu_to_le16(rxbi->len);
+ if(jme->dev->features & NETIF_F_HIGHDMA)
+ rxdesc->desc1.flags = RXFLAG_64BIT;
wmb();
- rxdesc[i].desc1.flags = RXFLAG_OWN | RXFLAG_INT;
-
-#ifdef RX_QUEUE_DEBUG
- dprintk("RX Ring Buf Address(%08x,%08x,%d).\n",
- rxring->buf_dma[i],
- (rxdesc[i].all[12] << 0) |
- (rxdesc[i].all[13] << 8) |
- (rxdesc[i].all[14] << 16) |
- (rxdesc[i].all[15] << 24),
- (rxdesc[i].all[4] << 0) |
- (rxdesc[i].all[5] << 8));
-#endif
-
+ rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
}
-__always_inline static int jme_setup_rx_resources(struct jme_adapter *jme)
+static int
+jme_make_new_rx_buf(struct jme_adapter *jme, int i)
{
- int i;
struct jme_ring *rxring = &(jme->rxring[0]);
+ struct jme_buffer_info *rxbi = rxring->bufinf;
+ unsigned long offset;
+ struct sk_buff* skb;
- rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
- RX_RING_ALLOC_SIZE,
- &(rxring->dmaalloc),
- GFP_KERNEL);
- if(!rxring->alloc)
+ skb = netdev_alloc_skb(jme->dev, RX_BUF_ALLOC_SIZE);
+ if(unlikely(!skb))
return -ENOMEM;
- /*
- * 16 Bytes align
- */
- rxring->desc = (void*)ALIGN((unsigned long)(rxring->alloc), 16);
- rxring->dma = ALIGN(rxring->dmaalloc, 16);
- rxring->next_to_use = 0;
- rxring->next_to_clean = 0;
+ if(unlikely(skb_is_nonlinear(skb))) {
+ dprintk(jme->dev->name,
+ "Allocated skb fragged(%d).\n",
+ skb_shinfo(skb)->nr_frags);
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
-#ifdef RX_QUEUE_DEBUG
- dprintk("RX Ring Base Address(%08x,%08x).\n",
- (__u32)rxring->desc,
- rxring->dma);
-#endif
+ if(unlikely(offset =
+ (unsigned long)(skb->data)
+ & (unsigned long)(RX_BUF_DMA_ALIGN - 1)))
+ skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
- /*
- * Initiallize Receive Descriptors
- */
- for(i = 0 ; i < RING_DESC_NR ; ++i) {
- rxring->buf_virt[i] = dma_alloc_coherent(&(jme->pdev->dev),
- RX_BUF_ALLOC_SIZE,
- &(rxring->buf_dma[i]),
- GFP_KERNEL);
- if(!rxring->buf_virt[i])
- break;
+ rxbi += i;
+ rxbi->skb = skb;
+ rxbi->len = skb_tailroom(skb);
+ rxbi->mapping = pci_map_single(jme->pdev,
+ skb->data,
+ rxbi->len,
+ PCI_DMA_FROMDEVICE);
- jme_set_clean_rxdesc(jme, i);
- }
+ return 0;
+}
- /*
- * Cleanup allocated memories when error
- */
- if(i != RING_DESC_NR) {
- for(--i ; i >= 0 ; --i) {
- dma_free_coherent(&(jme->pdev->dev),
- RX_BUF_ALLOC_SIZE,
- rxring->buf_virt[i],
- rxring->buf_dma[i]);
- }
- dma_free_coherent(&(jme->pdev->dev),
- RX_RING_ALLOC_SIZE,
- rxring->alloc,
- rxring->dmaalloc);
- rxring->alloc = NULL;
- rxring->desc = NULL;
- rxring->dmaalloc = 0;
- rxring->dma = 0;
- return -ENOMEM;
+static void
+jme_free_rx_buf(struct jme_adapter *jme, int i)
+{
+ struct jme_ring *rxring = &(jme->rxring[0]);
+ struct jme_buffer_info *rxbi = rxring->bufinf;
+ rxbi += i;
+
+ if(rxbi->skb) {
+ pci_unmap_single(jme->pdev,
+ rxbi->mapping,
+ rxbi->len,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(rxbi->skb);
+ rxbi->skb = NULL;
+ rxbi->mapping = 0;
+ rxbi->len = 0;
}
-
- return 0;
}
-__always_inline static void jme_free_rx_resources(struct jme_adapter *jme)
+static void
+jme_free_rx_resources(struct jme_adapter *jme)
{
int i;
struct jme_ring *rxring = &(jme->rxring[0]);
if(rxring->alloc) {
- for(i = 0 ; i < RING_DESC_NR ; ++i) {
- if(rxring->buf_virt[i]) {
- dma_free_coherent(&(jme->pdev->dev),
- RX_BUF_ALLOC_SIZE,
- rxring->buf_virt[i],
- rxring->buf_dma[i]);
- }
- }
+ for(i = 0 ; i < RING_DESC_NR ; ++i)
+ jme_free_rx_buf(jme, i);
dma_free_coherent(&(jme->pdev->dev),
RX_RING_ALLOC_SIZE,
rxring->next_to_clean = 0;
}
-__always_inline static void jme_enable_rx_engine(struct jme_adapter *jme)
+static int
+jme_setup_rx_resources(struct jme_adapter *jme)
{
- __u32 val;
+ int i;
+ struct jme_ring *rxring = &(jme->rxring[0]);
+
+ rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
+ RX_RING_ALLOC_SIZE,
+ &(rxring->dmaalloc),
+ GFP_ATOMIC);
+ if(!rxring->alloc) {
+ rxring->desc = NULL;
+ rxring->dmaalloc = 0;
+ rxring->dma = 0;
+ return -ENOMEM;
+ }
+ /*
+ * 16 Bytes align
+ */
+ rxring->desc = (void*)ALIGN((unsigned long)(rxring->alloc),
+ RING_DESC_ALIGN);
+ rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
+ rxring->next_to_use = 0;
+ rxring->next_to_clean = 0;
+
+ /*
+ * Initiallize Receive Descriptors
+ */
+ for(i = 0 ; i < RING_DESC_NR ; ++i) {
+ if(unlikely(jme_make_new_rx_buf(jme, i))) {
+ jme_free_rx_resources(jme);
+ return -ENOMEM;
+ }
+
+ jme_set_clean_rxdesc(jme, i);
+ }
+
+ return 0;
+}
+
+__always_inline static void
+jme_enable_rx_engine(struct jme_adapter *jme)
+{
/*
* Setup RX DMA Bass Address
*/
- jwrite32(jme, JME_RXDBA, jme->rxring[0].dma);
- jwrite32(jme, JME_RXNDA, jme->rxring[0].dma);
+ jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
+ jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
+ jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
/*
* Setup RX Descptor Count
*/
jwrite32(jme, JME_RXQDC, RING_DESC_NR);
- /*
+ /*
* Setup Unicast Filter
*/
+ jme->reg_rxmcs = RXMCS_VTAGRM | RXMCS_PREPAD;
jme_set_multi(jme->dev);
/*
* Enable RX Engine
*/
wmb();
- val = jread32(jme, JME_RXCS);
- val |= RXCS_ENABLE | RXCS_QST;
- jwrite32(jme, JME_RXCS, val);
+ jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
+ RXCS_QUEUESEL_Q0 |
+ RXCS_ENABLE |
+ RXCS_QST);
}
-__always_inline static void jme_disable_rx_engine(struct jme_adapter *jme)
+__always_inline static void
+jme_restart_rx_engine(struct jme_adapter *jme)
+{
+ /*
+ * Start RX Engine
+ */
+ jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
+ RXCS_QUEUESEL_Q0 |
+ RXCS_ENABLE |
+ RXCS_QST);
+}
+
+
+__always_inline static void
+jme_disable_rx_engine(struct jme_adapter *jme)
{
int i;
__u32 val;
val = jread32(jme, JME_RXCS);
for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
{
- udelay(1);
+ mdelay(100);
val = jread32(jme, JME_RXCS);
}
if(!i)
- printk(KERN_ERR "Disable RX engine timeout.\n");
+ jeprintk(jme->dev->name, "Disable RX engine timeout.\n");
}
-__always_inline static void jme_process_tx_complete(struct net_device *netdev)
+static void
+jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
{
- /*
- * Clear sk_buff here in the future
- * (Allowing NIC directly DMA with sk_buff kernel requested to send)
- */
+ if(dpi->attempt == atmp) {
+ ++(dpi->cnt);
+ }
+ else {
+ dpi->attempt = atmp;
+ dpi->cnt = 0;
+ }
}
-__always_inline static void jme_process_receive(struct net_device *netdev)
+static void
+jme_dynamic_pcc(struct jme_adapter *jme)
+{
+ register struct dynpcc_info *dpi = &(jme->dpi);
+
+ if(jiffies >= dpi->check_point) {
+ if(jiffies > (dpi->check_point + PCC_INTERVAL))
+ jme_attempt_pcc(dpi, PCC_P1);
+ else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
+ PCC_P3_THRESHOLD)
+ jme_attempt_pcc(dpi, PCC_P3);
+ else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
+ PCC_P2_THRESHOLD)
+ jme_attempt_pcc(dpi, PCC_P2);
+ else
+ jme_attempt_pcc(dpi, PCC_P1);
+
+ if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
+ jme_set_rx_pcc(jme, dpi->attempt);
+ dpi->cur = dpi->attempt;
+ dpi->cnt = 0;
+ }
+
+ dpi->last_bytes = NET_STAT(jme).rx_bytes;
+ dpi->last_pkts = NET_STAT(jme).rx_packets;
+ dpi->check_point = jiffies + PCC_INTERVAL;
+ }
+}
+
+static void
+jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
{
- struct jme_adapter *jme = netdev_priv(netdev);
struct jme_ring *rxring = &(jme->rxring[0]);
- struct RxDesc *rxdesc;
- __u8 *rxbuf;
+ volatile struct rxdesc *rxdesc = rxring->desc;
+ struct jme_buffer_info *rxbi = rxring->bufinf;
struct sk_buff *skb;
- int i, start, cnt;
- int framesize, desccnt;
+ int framesize;
- /*
- * Assume that one descriptor per frame,
- * Should be fixed in the future
- * (or not? If buffer already large enough to store entire packet.)
- */
+ rxdesc += idx;
+ rxbi += idx;
- rxdesc = rxring->desc;
+ skb = rxbi->skb;
+ pci_dma_sync_single_for_cpu(jme->pdev,
+ rxbi->mapping,
+ rxbi->len,
+ PCI_DMA_FROMDEVICE);
- spin_lock(&jme->recv_lock);
- i = start = rxring->next_to_clean;
- /*
- * Decide how many descriptors need to be processed
- * We have to process entire queue in worst case
- */
- for(cnt = 0 ; cnt < RING_DESC_NR ; ++cnt)
+ if(unlikely(jme_make_new_rx_buf(jme, idx))) {
+ pci_dma_sync_single_for_device(jme->pdev,
+ rxbi->mapping,
+ rxbi->len,
+ PCI_DMA_FROMDEVICE);
+
+ ++(NET_STAT(jme).rx_dropped);
+ }
+ else {
+ framesize = le16_to_cpu(rxdesc->descwb.framesize)
+ - RX_PREPAD_SIZE;
+
+ skb_reserve(skb, RX_PREPAD_SIZE);
+ skb_put(skb, framesize);
+ skb->protocol = eth_type_trans(skb, jme->dev);
+
+ netif_rx(skb);
+
+ if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
+ ++(NET_STAT(jme).multicast);
+
+ jme->dev->last_rx = jiffies;
+ NET_STAT(jme).rx_bytes += framesize;
+ ++(NET_STAT(jme).rx_packets);
+ }
+
+ jme_set_clean_rxdesc(jme, idx);
+
+ jme_dynamic_pcc(jme);
+
+}
+
+static int
+jme_process_receive(struct jme_adapter *jme, int limit)
+{
+ struct jme_ring *rxring = &(jme->rxring[0]);
+ volatile struct rxdesc *rxdesc = rxring->desc;
+ int i, j, ccnt, desccnt;
+
+ i = rxring->next_to_clean;
+ while( limit-- > 0 )
{
- if(rxdesc[i].descwb.flags & RXWBFLAG_OWN) {
- rxring->next_to_clean = i;
- break;
+ rxdesc = rxring->desc;
+ rxdesc += i;
+
+ if((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
+ !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
+ goto out;
+
+ desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
+
+ rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
+
+ if(desccnt > 1 ||
+ rxdesc->descwb.errstat & RXWBERR_ALLERR) {
+
+ if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
+ ++(NET_STAT(jme).rx_crc_errors);
+ else if(rxdesc->descwb.errstat & RXWBERR_OVERUN)
+ ++(NET_STAT(jme).rx_fifo_errors);
+ else
+ ++(NET_STAT(jme).rx_errors);
+
+ if(desccnt > 1)
+ limit -= desccnt - 1;
+
+ for(j = i, ccnt = desccnt ; ccnt-- ; ) {
+ jme_set_clean_rxdesc(jme, j);
+
+ if(unlikely(++j == RING_DESC_NR))
+ j = 0;
+ }
+
+ }
+ else {
+ jme_alloc_and_feed_skb(jme, i);
}
- if(unlikely(++i == RING_DESC_NR))
- i = 0;
+
+ if((i += desccnt) >= RING_DESC_NR)
+ i -= RING_DESC_NR;
}
- spin_unlock(&jme->recv_lock);
- /*
- * Process descriptors independently accross cpu
- * --- save for multiple cpu handling
- */
- for( i = start ; cnt-- ; ) {
- /*
- * Pass received packet to kernel
- */
- rxbuf = (void*)ALIGN((unsigned long)(rxring->buf_virt[i]), 8);
- desccnt = rxdesc[i].descwb.desccnt & RXWBDCNT_DCNT;
- framesize = le16_to_cpu(rxdesc[i].descwb.framesize);
- skb = dev_alloc_skb(framesize);
- if(!skb) {
- printk(KERN_ERR PFX "Out of memory.\n");
- ++(netdev->stats.rx_dropped);
+out:
+ rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
+ rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
+ (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
+ >> 4);
+
+ rxring->next_to_clean = i;
+
+ return limit > 0 ? limit : 0;
+
+}
+
+static void
+jme_link_change_tasklet(unsigned long arg)
+{
+ struct jme_adapter *jme = (struct jme_adapter*)arg;
+ struct net_device *netdev = jme->dev;
+ int timeout = WAIT_TASKLET_TIMEOUT;
+ int rc;
+
+ if(!atomic_dec_and_test(&jme->link_changing))
+ goto out;
+
+ if(jme_check_link(netdev, 1))
+ goto out;
+
+ netif_stop_queue(netdev);
+
+ while(--timeout > 0 &&
+ (
+ atomic_read(&jme->rx_cleaning) != 1 ||
+ atomic_read(&jme->tx_cleaning) != 1
+ )) {
+
+ mdelay(1);
+ }
+
+ if(netif_carrier_ok(netdev)) {
+ jme_reset_mac_processor(jme);
+ jme_free_rx_resources(jme);
+ jme_free_tx_resources(jme);
+ }
+
+ jme_check_link(netdev, 0);
+ if(netif_carrier_ok(netdev)) {
+ rc = jme_setup_rx_resources(jme);
+ if(rc) {
+ jeprintk(netdev->name,
+ "Allocating resources for RX error"
+ ", Device STOPPED!\n");
+ goto out;
}
- else {
- skb_put(skb, framesize);
- skb_copy_to_linear_data(skb, rxbuf, framesize);
- skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
-
- netdev->last_rx = jiffies;
- netdev->stats.rx_bytes += framesize;
- ++(netdev->stats.rx_packets);
+
+
+ rc = jme_setup_tx_resources(jme);
+ if(rc) {
+ jeprintk(netdev->name,
+ "Allocating resources for TX error"
+ ", Device STOPPED!\n");
+ goto err_out_free_rx_resources;
}
- dprintk("DESCCNT: %u, FSIZE: %u, ADDRH: %08x, "
- "ADDRL: %08x, FLAGS: %04x, STAT: %02x, "
- "DST:%02x:%02x:%02x:%02x:%02x:%02x, "
- "DSTCRC: %d\n",
- desccnt,
- framesize,
- le32_to_cpu(rxdesc[i].dw[2]),
- le32_to_cpu(rxdesc[i].dw[3]),
- le16_to_cpu(rxdesc[i].descwb.flags),
- rxdesc[i].descwb.stat,
- rxbuf[0], rxbuf[1], rxbuf[2],
- rxbuf[3], rxbuf[4], rxbuf[5],
- ether_crc(ETH_ALEN, rxbuf) & 0x3F);
+ jme_enable_rx_engine(jme);
+ jme_enable_tx_engine(jme);
+ netif_start_queue(netdev);
+ }
- /*
- * Cleanup descriptor for next receive
- */
- jme_set_clean_rxdesc(jme, i);
+ goto out;
+
+err_out_free_rx_resources:
+ jme_free_rx_resources(jme);
+out:
+ atomic_inc(&jme->link_changing);
+}
+
+static void
+jme_rx_clean_tasklet(unsigned long arg)
+{
+ struct jme_adapter *jme = (struct jme_adapter*)arg;
+
+ if(!atomic_dec_and_test(&jme->rx_cleaning))
+ goto out;
+
+ if(atomic_read(&jme->link_changing) != 1)
+ goto out;
+
+ if(unlikely(netif_queue_stopped(jme->dev)))
+ goto out;
- if(unlikely(++i == RING_DESC_NR))
- i = 0;
+ jme_process_receive(jme, RING_DESC_NR);
+
+out:
+ atomic_inc(&jme->rx_cleaning);
+}
+
+static void
+jme_rx_empty_tasklet(unsigned long arg)
+{
+ struct jme_adapter *jme = (struct jme_adapter*)arg;
+
+ if(atomic_read(&jme->link_changing) != 1)
+ return;
+
+ if(unlikely(netif_queue_stopped(jme->dev)))
+ return;
+
+ jme_rx_clean_tasklet(arg);
+ jme_restart_rx_engine(jme);
+}
+
+static void
+jme_tx_clean_tasklet(unsigned long arg)
+{
+ struct jme_adapter *jme = (struct jme_adapter*)arg;
+ struct jme_ring *txring = &(jme->txring[0]);
+ volatile struct txdesc *txdesc = txring->desc;
+ struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
+ int i, j, cnt = 0, max;
+
+ if(!atomic_dec_and_test(&jme->tx_cleaning))
+ goto out;
+
+ if(atomic_read(&jme->link_changing) != 1)
+ goto out;
+
+ if(unlikely(netif_queue_stopped(jme->dev)))
+ goto out;
+
+ spin_lock(&jme->tx_lock);
+ max = RING_DESC_NR - txring->nr_free;
+ spin_unlock(&jme->tx_lock);
+
+ tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
+
+ for(i = txring->next_to_clean ; cnt < max ; ) {
+
+ ctxbi = txbi + i;
+
+ if(ctxbi->skb && !(txdesc[i].desc1.flags & TXFLAG_OWN)) {
+
+ tx_dbg(jme->dev->name,
+ "Tx Tasklet: Clean %d+%d\n",
+ i, ctxbi->nr_desc);
+
+ for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
+ ttxbi = txbi + ((i + j) & (RING_DESC_NR - 1));
+ txdesc[(i+j)&(RING_DESC_NR-1)].dw[0] = 0;
+
+ pci_unmap_single(jme->pdev,
+ ttxbi->mapping,
+ ttxbi->len,
+ PCI_DMA_TODEVICE);
+
+ NET_STAT(jme).tx_bytes += ttxbi->len;
+ ttxbi->mapping = 0;
+ ttxbi->len = 0;
+ }
+
+ dev_kfree_skb(ctxbi->skb);
+ ctxbi->skb = NULL;
+
+ cnt += ctxbi->nr_desc;
+
+ ++(NET_STAT(jme).tx_packets);
+ }
+ else {
+ if(!ctxbi->skb)
+ tx_dbg(jme->dev->name,
+ "Tx Tasklet:"
+ " Stoped due to no skb.\n");
+ else
+ tx_dbg(jme->dev->name,
+ "Tx Tasklet:"
+ "Stoped due to not done.\n");
+ break;
+ }
+
+ if(unlikely((i += ctxbi->nr_desc) >= RING_DESC_NR))
+ i -= RING_DESC_NR;
+
+ ctxbi->nr_desc = 0;
}
+ tx_dbg(jme->dev->name,
+ "Tx Tasklet: Stop %d Jiffies %lu\n",
+ i, jiffies);
+ txring->next_to_clean = i;
+
+ spin_lock(&jme->tx_lock);
+ txring->nr_free += cnt;
+ spin_unlock(&jme->tx_lock);
+
+out:
+ atomic_inc(&jme->tx_cleaning);
}
-static irqreturn_t jme_intr(int irq, void *dev_id)
+static irqreturn_t
+jme_intr(int irq, void *dev_id)
{
struct net_device *netdev = dev_id;
struct jme_adapter *jme = netdev_priv(netdev);
irqreturn_t rc = IRQ_HANDLED;
- __u32 intrstat = jread32(jme, JME_IEVE);
-#ifdef RX_QUEUE_DEBUG
- __u32 val;
+ __u32 intrstat;
+
+#if USE_IEVE_SHADOW
+ pci_dma_sync_single_for_cpu(jme->pdev,
+ jme->shadow_dma,
+ sizeof(__u32) * SHADOW_REG_NR,
+ PCI_DMA_FROMDEVICE);
+ intrstat = jme->shadow_regs[SHADOW_IEVE];
+ jme->shadow_regs[SHADOW_IEVE] = 0;
+#else
+ intrstat = jread32(jme, JME_IEVE);
#endif
-#if 0
/*
- * Don't disable interrupt, the driver should be
- * working fine with multiple interrupt handling
- * at the same time. (When Multi-core CPU)
+ * Check if it's really an interrupt for us
*/
+ if(intrstat == 0) {
+ rc = IRQ_NONE;
+ goto out;
+ }
/*
- * Temporary disable all Interrupts From Our NIC
+ * Check if the device still exist
*/
- jwrite32(jme, JME_IENC, INTR_ENABLE);
- wmb();
-#endif
-
- dprintk("Interrupt received(%08x).\n", intrstat);
+ if(unlikely(intrstat == ~((typeof(intrstat))0))) {
+ rc = IRQ_NONE;
+ goto out;
+ }
+ /*
+ * Allow one interrupt handling at a time
+ */
+ if(unlikely(!atomic_dec_and_test(&jme->intr_sem)))
+ goto out_inc;
/*
- * Check if it's really an interrupt for us
- * and if the device still exist
+ * Disable interrupt
*/
- if((intrstat & INTR_ENABLE) == 0 || intrstat == ~0) {
- rc = IRQ_NONE;
- goto out;
- }
+ jwrite32f(jme, JME_IENC, INTR_ENABLE);
if(intrstat & INTR_LINKCH) {
- /*
- * Process Link status change event
- */
- jme_check_link(netdev);
-
- /*
- * Write 1 clear Link status change Interrupt
- */
- jwrite32(jme, JME_IEVE, INTR_LINKCH);
+ tasklet_schedule(&jme->linkch_task);
+ goto out_deassert;
}
- if(intrstat & INTR_RX0) {
- /*
- * Process event
- */
- jme_process_receive(netdev);
+ if(intrstat & INTR_RX0EMP)
+ tasklet_schedule(&jme->rxempty_task);
- /*
- * Write 1 clear Interrupt
- */
- jwrite32(jme, JME_IEVE, INTR_RX0);
+ if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
+ tasklet_schedule(&jme->rxclean_task);
- dprintk("Received From Queue 0.\n");
+ if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
+ tasklet_schedule(&jme->txclean_task);
-#ifdef RX_QUEUE_DEBUG
- //Poll out the Receive Queue Next Descriptor Address/Status
- val = jread32(jme, JME_RXCS);
- val |= RXCS_QST;
- jwrite32(jme, JME_RXCS, val);
- wmb();
- val = jread32(jme, JME_RXNDA);
- dprintk("NEXT_RX_DESC.(%08x)\n", val);
-#endif
-
- }
-
- if(intrstat & INTR_RX0EMP) {
+ if((intrstat & ~INTR_ENABLE) != 0) {
/*
- * Write 1 clear Interrupt
+ * Some interrupt not handled
+ * but not enabled also (for debug)
*/
- jwrite32(jme, JME_IEVE, INTR_RX0EMP);
-
- dprintk("Received Queue 0 is running-out.\n");
}
- if(intrstat & INTR_TX0) {
- /*
- * Process event
- */
- jme_process_tx_complete(netdev);
-
- /*
- * Write 1 clear Interrupt
- */
- jwrite32(jme, JME_IEVE, INTR_TX0);
-
- dprintk("Queue 0 transmit complete.\n");
- }
+out_deassert:
+ /*
+ * Deassert interrupts
+ */
+ jwrite32f(jme, JME_IEVE, intrstat);
-out:
+ /*
+ * Re-enable interrupt
+ */
+ jwrite32f(jme, JME_IENS, INTR_ENABLE);
-#if 0
+out_inc:
/*
- * Re-enable interrupts
+ * Enable next interrupt handling
*/
- wmb();
- jwrite32(jme, JME_IENS, INTR_ENABLE);
-#endif
+ atomic_inc(&jme->intr_sem);
+
+out:
return rc;
}
-static int jme_open(struct net_device *netdev)
+static void
+jme_restart_an(struct jme_adapter *jme)
{
- struct jme_adapter *jme = netdev_priv(netdev);
- int CHECK_VAR;
+ __u32 bmcr;
- CHECK_AND_GOTO(request_irq(jme->pdev->irq, jme_intr, IRQF_SHARED,
- netdev->name, netdev),
- err_out,
- "Requesting IRQ error.")
-
- CHECK_AND_GOTO(jme_setup_rx_resources(jme),
- err_out_free_irq,
- "Error allocating resources for RX.")
+ bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+}
- CHECK_AND_GOTO(jme_setup_tx_resources(jme),
- err_out_free_rx_resources,
- "Error allocating resources for TX.")
+static int
+jme_open(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ int rc, timeout = 100;
+
+ while(
+ --timeout > 0 &&
+ (
+ atomic_read(&jme->link_changing) != 1 ||
+ atomic_read(&jme->rx_cleaning) != 1 ||
+ atomic_read(&jme->tx_cleaning) != 1
+ )
+ )
+ msleep(10);
jme_reset_mac_processor(jme);
- jme_check_link(netdev);
+
+ rc = request_irq(jme->pdev->irq, jme_intr,
+ IRQF_SHARED, netdev->name, netdev);
+ if(rc) {
+ printk(KERN_ERR PFX "Requesting IRQ error.\n");
+ goto err_out;
+ }
+ jme_enable_shadow(jme);
jme_start_irq(jme);
- jme_enable_rx_engine(jme);
- jme_enable_tx_engine(jme);
- netif_start_queue(netdev);
+ jme_restart_an(jme);
return 0;
-err_out_free_rx_resources:
- jme_free_rx_resources(jme);
-err_out_free_irq:
- free_irq(jme->pdev->irq, jme->dev);
err_out:
netif_stop_queue(netdev);
netif_carrier_off(netdev);
- return CHECK_VAR;
+ return rc;
}
-static int jme_close(struct net_device *netdev)
+static int
+jme_close(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
netif_carrier_off(netdev);
jme_stop_irq(jme);
+ jme_disable_shadow(jme);
free_irq(jme->pdev->irq, jme->dev);
+ tasklet_kill(&jme->linkch_task);
+ tasklet_kill(&jme->txclean_task);
+ tasklet_kill(&jme->rxclean_task);
+ tasklet_kill(&jme->rxempty_task);
jme_disable_rx_engine(jme);
jme_disable_tx_engine(jme);
jme_free_rx_resources(jme);
return 0;
}
-static int jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+/*
+ * This function is already protected by netif_tx_lock()
+ */
+static int
+jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
- struct jme_ring *txring = &(jme->txring[0]);
- struct TxDesc *txdesc = txring->desc;
- int idx;
-
+ int rc;
- /*
- * Check if transmit queue is already full
- * and take one descriptor to use
- */
- spin_lock(&jme->xmit_lock);
- idx = txring->next_to_use;
- if(unlikely(txdesc[idx].desc1.flags & TXFLAG_OWN)) {
- spin_unlock(&jme->xmit_lock);
+ if(unlikely(netif_queue_stopped(jme->dev)))
return NETDEV_TX_BUSY;
- }
- if(unlikely(++(txring->next_to_use) == RING_DESC_NR))
- txring->next_to_use = 0;
- spin_unlock(&jme->xmit_lock);
+ rc = jme_set_new_txdesc(jme, skb);
- /*
- * Fill up TX descriptors
- */
- skb_copy_from_linear_data(skb,
- (void*)ALIGN((unsigned long)(txring->buf_virt[idx]), 8),
- skb->len);
- jme_set_new_txdesc(jme, idx, skb->len);
+ if(unlikely(rc != NETDEV_TX_OK))
+ return rc;
- /*
- * Since still using copy now. we could free it here.
- */
- dev_kfree_skb(skb);
-
- /*
- * Tell MAC HW to send
- */
- jwrite32(jme, JME_TXCS, TXCS_QUEUE0S |
- TXCS_DEFAULT |
- TXCS_SELECT_QUEUE0 |
- TXCS_ENABLE);
-
- netdev->stats.tx_bytes += skb->len;
- ++(netdev->stats.tx_packets);
+ jwrite32(jme, JME_TXCS, jme->reg_txcs |
+ TXCS_SELECT_QUEUE0 |
+ TXCS_QUEUE0S |
+ TXCS_ENABLE);
netdev->trans_start = jiffies;
- return 0;
+ return NETDEV_TX_OK;
}
-static int jme_set_macaddr(struct net_device *netdev, void *p)
+static int
+jme_set_macaddr(struct net_device *netdev, void *p)
{
struct jme_adapter *jme = netdev_priv(netdev);
struct sockaddr *addr = p;
addr->sa_data[2] << 16 |
addr->sa_data[1] << 8 |
addr->sa_data[0];
- jwrite32(jme, JME_RXUMA, val);
+ jwrite32(jme, JME_RXUMA_LO, val);
val = addr->sa_data[5] << 8 |
addr->sa_data[4];
- jwrite32(jme, JME_RXUMA+4, val);
+ jwrite32(jme, JME_RXUMA_HI, val);
spin_unlock(&jme->macaddr_lock);
return 0;
}
-static void jme_set_multi(struct net_device *netdev)
+static void
+jme_set_multi(struct net_device *netdev)
{
- struct jme_adapter *jme = netdev_priv(netdev);
+ struct jme_adapter *jme = netdev_priv(netdev);
u32 mc_hash[2] = {};
__u32 val;
int i;
+ val = jme->reg_rxmcs | RXMCS_BRDFRAME | RXMCS_UNIFRAME;
- spin_lock(&jme->macaddr_lock);
- val = RXMCS_BRDFRAME | RXMCS_UNIFRAME;
-
- if (netdev->flags & IFF_PROMISC)
+ if (netdev->flags & IFF_PROMISC) {
val |= RXMCS_ALLFRAME;
- else if (netdev->flags & IFF_ALLMULTI)
+ }
+ else if (netdev->flags & IFF_ALLMULTI) {
val |= RXMCS_ALLMULFRAME;
+ }
else if(netdev->flags & IFF_MULTICAST) {
- struct dev_mc_list *mclist;
- int bit_nr;
+ struct dev_mc_list *mclist;
+ int bit_nr;
val |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
- for (i = 0, mclist = netdev->mc_list;
- mclist && i < netdev->mc_count;
- ++i, mclist = mclist->next) {
+ for (i = 0, mclist = netdev->mc_list;
+ mclist && i < netdev->mc_count;
+ ++i, mclist = mclist->next) {
+
bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
- dprintk("Adding MCAddr: "
- "%02x:%02x:%02x:%02x:%02x:%02x (%d)\n",
- mclist->dmi_addr[0],
- mclist->dmi_addr[1],
- mclist->dmi_addr[2],
- mclist->dmi_addr[3],
- mclist->dmi_addr[4],
- mclist->dmi_addr[5],
- bit_nr);
}
- jwrite32(jme, JME_RXMCHT, mc_hash[0]);
- jwrite32(jme, JME_RXMCHT+4, mc_hash[1]);
+ jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
+ jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
}
wmb();
jwrite32(jme, JME_RXMCS, val);
- spin_unlock(&jme->macaddr_lock);
-
- dprintk("RX Mode changed: %08x\n", val);
}
-static int jme_change_mtu(struct net_device *dev, int new_mtu)
+static int
+jme_change_mtu(struct net_device *dev, int new_mtu)
{
/*
- * Do not support MTU change for now.
+ * Not supporting MTU change for now.
*/
return -EINVAL;
}
-static void jme_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *info)
+static void
+jme_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
{
struct jme_adapter *jme = netdev_priv(netdev);
strcpy(info->bus_info, pci_name(jme->pdev));
}
-static int jme_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int
+jme_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
{
struct jme_adapter *jme = netdev_priv(netdev);
int rc;
return rc;
}
-static int jme_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int
+jme_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
{
struct jme_adapter *jme = netdev_priv(netdev);
int rc;
- spin_lock(&jme->phy_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&jme->phy_lock, flags);
rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
- spin_unlock(&jme->phy_lock);
+ spin_unlock_irqrestore(&jme->phy_lock, flags);
+
return rc;
}
-static u32 jme_get_link(struct net_device *netdev) {
+static __u32
+jme_get_link(struct net_device *netdev)
+{
struct jme_adapter *jme = netdev_priv(netdev);
return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
}
.get_link = jme_get_link,
};
-static int __devinit jme_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int
+jme_pci_dma64(struct pci_dev *pdev)
+{
+ if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
+ if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
+ return 1;
+
+ if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
+ if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
+ return 0;
+
+ return -1;
+}
+
+static int __devinit
+jme_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- int CHECK_VAR = 0;
+ int rc = 0, using_dac;
struct net_device *netdev;
struct jme_adapter *jme;
- DECLARE_MAC_BUF(mac);
/*
* set up PCI device basics
*/
- CHECK_AND_GOTO(pci_enable_device(pdev),
- err_out,
- "Cannot enable PCI device.")
+ rc = pci_enable_device(pdev);
+ if(rc) {
+ printk(KERN_ERR PFX "Cannot enable PCI device.\n");
+ goto err_out;
+ }
- CHECK_AND_GOTO(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM),
- err_out_disable_pdev,
- "No PCI resource region found.")
+ using_dac = jme_pci_dma64(pdev);
+ if(using_dac < 0) {
+ printk(KERN_ERR PFX "Cannot set PCI DMA Mask.\n");
+ rc = -EIO;
+ goto err_out_disable_pdev;
+ }
+
+ if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ printk(KERN_ERR PFX "No PCI resource region found.\n");
+ rc = -ENOMEM;
+ goto err_out_disable_pdev;
+ }
- CHECK_AND_GOTO(pci_request_regions(pdev, DRV_NAME),
- err_out_disable_pdev,
- "Cannot obtain PCI resource region.")
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if(rc) {
+ printk(KERN_ERR PFX "Cannot obtain PCI resource region.\n");
+ goto err_out_disable_pdev;
+ }
pci_set_master(pdev);
/*
* alloc and init net device
*/
- netdev = alloc_etherdev(sizeof(struct jme_adapter));
+ netdev = alloc_etherdev(sizeof(*jme));
if(!netdev) {
- CHECK_VAR = -ENOMEM;
- goto err_out_disable_pdev;
+ rc = -ENOMEM;
+ goto err_out_release_regions;
}
netdev->open = jme_open;
netdev->stop = jme_close;
netdev->set_multicast_list = jme_set_multi;
netdev->change_mtu = jme_change_mtu;
netdev->ethtool_ops = &jme_ethtool_ops;
+ NETDEV_GET_STATS(netdev, &jme_get_stats);
+
+ if(using_dac)
+ netdev->features = NETIF_F_HIGHDMA;
SET_NETDEV_DEV(netdev, &pdev->dev);
pci_set_drvdata(pdev, netdev);
jme = netdev_priv(netdev);
jme->pdev = pdev;
jme->dev = netdev;
+ jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
+ jme->phylink = 0;
jme->regs = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
- if (!jme->regs) {
+ if (!(jme->regs)) {
rc = -ENOMEM;
goto err_out_free_netdev;
}
- spin_lock_init(&jme->xmit_lock);
- spin_lock_init(&jme->recv_lock);
- spin_lock_init(&jme->macaddr_lock);
+ jme->shadow_regs = pci_alloc_consistent(pdev,
+ sizeof(__u32) * SHADOW_REG_NR,
+ &(jme->shadow_dma));
+ if (!(jme->shadow_regs)) {
+ rc = -ENOMEM;
+ goto err_out_unmap;
+ }
+
+ spin_lock_init(&jme->tx_lock);
spin_lock_init(&jme->phy_lock);
+ spin_lock_init(&jme->macaddr_lock);
+
+ atomic_set(&jme->intr_sem, 1);
+ atomic_set(&jme->link_changing, 1);
+ atomic_set(&jme->rx_cleaning, 1);
+ atomic_set(&jme->tx_cleaning, 1);
+
+ tasklet_init(&jme->linkch_task,
+ &jme_link_change_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->txclean_task,
+ &jme_tx_clean_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->rxclean_task,
+ &jme_rx_clean_tasklet,
+ (unsigned long) jme);
+ tasklet_init(&jme->rxempty_task,
+ &jme_rx_empty_tasklet,
+ (unsigned long) jme);
jme->mii_if.dev = netdev;
jme->mii_if.phy_id = 1;
jme->mii_if.supports_gmii = 1;
jme->mii_if.mdio_read = jme_mdio_read;
jme->mii_if.mdio_write = jme_mdio_write;
+ /*
+ * Get Max Read Req Size from PCI Config Space
+ */
+ pci_read_config_byte(pdev, PCI_CONF_DCSR_MRRS, &jme->mrrs);
+ switch(jme->mrrs) {
+ case MRRS_128B:
+ jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
+ break;
+ case MRRS_256B:
+ jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
+ break;
+ default:
+ jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
+ break;
+ };
+
+
/*
* Reset MAC processor and reload EEPROM for MAC Address
*/
jme_clear_pm(jme);
+ jme_reset_phy_processor(jme);
jme_reset_mac_processor(jme);
- CHECK_AND_GOTO(jme_reload_eeprom(jme),
- err_out_unmap,
- "Rload eeprom for reading MAC Address error.");
+ rc = jme_reload_eeprom(jme);
+ if(rc) {
+ printk(KERN_ERR PFX
+ "Rload eeprom for reading MAC Address error.\n");
+ goto err_out_free_shadow;
+ }
jme_load_macaddr(netdev);
/*
* Register netdev
*/
- CHECK_AND_GOTO(register_netdev(netdev),
- err_out_unmap,
- "Cannot register net device.")
-
- printk(KERN_INFO "%s: JMC250 gigabit eth at %llx, %s, IRQ %d\n",
- netdev->name,
- (unsigned long long) pci_resource_start(pdev, 0),
- print_mac(mac, netdev->dev_addr),
- pdev->irq);
+ rc = register_netdev(netdev);
+ if(rc) {
+ printk(KERN_ERR PFX "Cannot register net device.\n");
+ goto err_out_free_shadow;
+ }
- pci_set_drvdata(pdev, netdev);
+ jprintk(netdev->name,
+ "JMC250 gigabit eth at %llx, "
+ "%02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n",
+ (unsigned long long) pci_resource_start(pdev, 0),
+ netdev->dev_addr[0],
+ netdev->dev_addr[1],
+ netdev->dev_addr[2],
+ netdev->dev_addr[3],
+ netdev->dev_addr[4],
+ netdev->dev_addr[5],
+ pdev->irq);
return 0;
+err_out_free_shadow:
+ pci_free_consistent(pdev,
+ sizeof(__u32) * SHADOW_REG_NR,
+ jme->shadow_regs,
+ jme->shadow_dma);
err_out_unmap:
iounmap(jme->regs);
err_out_free_netdev:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
+err_out_release_regions:
+ pci_release_regions(pdev);
err_out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
- return CHECK_VAR;
+ return rc;
}
-static void __devexit jme_remove_one(struct pci_dev *pdev)
-{
+static void __devexit
+jme_remove_one(struct pci_dev *pdev)
+{
struct net_device *netdev = pci_get_drvdata(pdev);
struct jme_adapter *jme = netdev_priv(netdev);
unregister_netdev(netdev);
+ pci_free_consistent(pdev,
+ sizeof(__u32) * SHADOW_REG_NR,
+ jme->shadow_regs,
+ jme->shadow_dma);
iounmap(jme->regs);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
#endif
};
-static int __init jme_init_module(void)
+static int __init
+jme_init_module(void)
{
- printk(KERN_INFO "jme: JMicron JMC250 gigabit ethernet "
- "driver version %s\n", DRV_VERSION);
+ printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
+ "driver version %s\n", DRV_VERSION);
return pci_register_driver(&jme_driver);
}
-static void __exit jme_cleanup_module(void)
+static void __exit
+jme_cleanup_module(void)
{
pci_unregister_driver(&jme_driver);
}
module_init(jme_init_module);
module_exit(jme_cleanup_module);
-MODULE_AUTHOR("David Tseng <cooldavid@cooldavid.org>");
+MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);