*
*/
-/*
- * Note:
- * Watchdog:
- * check if rx queue stoped.
- * And restart it after rx ring cleaned.
- */
-
/*
* Timeline before release:
- * Stage 2: Error handling.
- * - Wathch dog
- * - Transmit timeout
- *
- * Stage 3: Basic offloading support.
- * - Use pci_map_page on scattered sk_buff for HIGHMEM support
- * - Implement scatter-gather offloading.
- * A system page per RX (buffer|descriptor)?
- * Handle fraged sk_buff to TX descriptors.
- * - Implement tx/rx ipv6/ip/tcp/udp checksum offloading
- *
* Stage 4: Basic feature support.
+ * - Implement scatter-gather offloading.
+ * Use pci_map_page on scattered sk_buff for HIGHMEM support
* - Implement Power Managemt related functions.
* - Implement Jumboframe.
* - Implement MSI.
* Along with multiple RX queue, for CPU load balancing.
*
* Stage 7:
- * - Use NAPI instead of rx_tasklet?
- * PCC Support Both Packet Counter and Timeout Interrupt for
- * receive and transmit complete, does NAPI really needed?
* - Cleanup/re-orginize code, performence tuneing(alignment etc...).
* - Test and Release 1.0
+ *
+ * Non-Critical:
+ * - Use NAPI instead of rx_tasklet?
+ * PCC Support Both Packet Counter and Timeout Interrupt for
+ * receive and transmit complete, does NAPI really needed?
+ * - Decode register dump for ethtool.
*/
#include <linux/version.h>
#include <linux/mii.h>
#include <linux/crc32.h>
#include <linux/delay.h>
+#include <linux/in.h>
+#include <linux/ip.h>
#include "jme.h"
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
jme_mdio_write(jme->dev,
jme->mii_if.phy_id,
- MII_ADVERTISE, ADVERTISE_ALL);
+ MII_ADVERTISE, ADVERTISE_ALL |
+ ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
jme_mdio_write(jme->dev,
jme->mii_if.phy_id,
dpi->cnt = 0;
jwrite32(jme, JME_PCCTX,
- ((60000 << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
- ((8 << PCCTX_SHIFT) & PCCTX_MASK) |
+ ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
+ ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
PCCTXQ0_EN
);
jme_check_link(struct net_device *netdev, int testonly)
{
struct jme_adapter *jme = netdev_priv(netdev);
- __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT;
+ __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
char linkmsg[32];
int rc = 0;
phylink = jread32(jme, JME_PHY_LINK);
if (phylink & PHY_LINK_UP) {
- /*
- * Keep polling for autoneg complete
- */
- while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && --cnt > 0) {
- udelay(1);
- phylink = jread32(jme, JME_PHY_LINK);
+ if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
+ /*
+ * If we did not enable AN
+ * Speed/Duplex Info should be obtained from SMI
+ */
+ phylink = PHY_LINK_UP;
+
+ bmcr = jme_mdio_read(jme->dev,
+ jme->mii_if.phy_id,
+ MII_BMCR);
+
+ phylink |= ((bmcr & BMCR_SPEED1000) &&
+ (bmcr & BMCR_SPEED100) == 0) ?
+ PHY_LINK_SPEED_1000M :
+ (bmcr & BMCR_SPEED100) ?
+ PHY_LINK_SPEED_100M :
+ PHY_LINK_SPEED_10M;
+
+ phylink |= (bmcr & BMCR_FULLDPLX) ?
+ PHY_LINK_DUPLEX : 0;
+ }
+ else {
+ /*
+ * Keep polling for speed/duplex resolve complete
+ */
+ while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
+ --cnt) {
+
+ udelay(1);
+ phylink = jread32(jme, JME_PHY_LINK);
+
+ }
+
+ if(!cnt)
+ jeprintk(netdev->name,
+ "Waiting speed resolve timeout.\n");
}
if(jme->phylink == phylink) {
jme->phylink = phylink;
- if(!cnt)
- jeprintk(netdev->name,
- "Waiting speed resolve timeout.\n");
-
- if(!(phylink & PHY_LINK_AUTONEG_COMPLETE))
- jprintk(netdev->name,
- "Link partener does not support AN.\n");
-
switch(phylink & PHY_LINK_SPEED_MASK) {
case PHY_LINK_SPEED_10M:
ghc = GHC_SPEED_10M;
"Half-Duplex, ");
if(phylink & PHY_LINK_MDI_STAT)
- strcat(linkmsg, "MDI");
- else
strcat(linkmsg, "MDI-X");
+ else
+ strcat(linkmsg, "MDI");
if(phylink & PHY_LINK_DUPLEX)
jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
- else
+ else {
jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
TXMCS_BACKOFF |
TXMCS_CARRIERSENSE |
TXMCS_COLLISION);
+ jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
+ ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
+ TXTRHD_TXREN |
+ ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
+ }
jme->reg_ghc = ghc;
jwrite32(jme, JME_GHC, ghc);
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
dma_addr_t dmaaddr;
int i, idx, nr_desc;
+ __u8 flags;
nr_desc = 2;
idx = jme_alloc_txdesc(jme, nr_desc);
* Other fields are already filled correctly.
*/
wmb();
- ctxdesc->desc1.flags = TXFLAG_OWN | TXFLAG_INT;
+ flags = TXFLAG_OWN | TXFLAG_INT;
+ if(skb->ip_summed == CHECKSUM_PARTIAL) {
+ //flags |= TXFLAG_IPCS;
+
+ switch(ip_hdr(skb)->protocol) {
+ case IPPROTO_TCP:
+ flags |= TXFLAG_TCPCS;
+ break;
+ case IPPROTO_UDP:
+ flags |= TXFLAG_UDPCS;
+ break;
+ default:
+ break;
+ }
+ }
+ ctxdesc->desc1.flags = flags;
/*
* Set tx buffer info after telling NIC to send
* For better tx_clean timing
val = jread32(jme, JME_TXCS);
}
- if(!i)
+ if(!i) {
jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
+ jme_reset_mac_processor(jme);
+ }
}
/*
* Setup Unicast Filter
*/
- jme->reg_rxmcs = RXMCS_VTAGRM | RXMCS_PREPAD;
jme_set_multi(jme->dev);
/*
skb_put(skb, framesize);
skb->protocol = eth_type_trans(skb, jme->dev);
+ if(jme->reg_rxmcs & RXMCS_CHECKSUM)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
netif_rx(skb);
if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
}
+static int
+jme_rxsum_bad(struct jme_adapter *jme, __u16 flags)
+{
+ if(jme->reg_rxmcs & RXMCS_CHECKSUM) {
+ return ((flags & RXWBFLAG_IPV4) &&
+ !(flags & RXWBFLAG_IPCS)) ||
+ ((flags & RXWBFLAG_IPV6) &&
+ !(flags & RXWBFLAG_IPCS)) ||
+ ((flags & RXWBFLAG_TCPON) &&
+ !(flags & RXWBFLAG_TCPCS)) ||
+ ((flags & RXWBFLAG_UDPON) &&
+ !(flags & RXWBFLAG_UDPCS));
+ }
+ else {
+ return 0;
+ }
+}
+
static int
jme_process_receive(struct jme_adapter *jme, int limit)
{
rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
- if(desccnt > 1 ||
- rxdesc->descwb.errstat & RXWBERR_ALLERR) {
+ if(unlikely(desccnt > 1 ||
+ rxdesc->descwb.errstat & RXWBERR_ALLERR ||
+ jme_rxsum_bad(jme, rxdesc->descwb.flags))) {
if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
++(NET_STAT(jme).rx_crc_errors);
struct jme_ring *txring = &(jme->txring[0]);
volatile struct txdesc *txdesc = txring->desc;
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
- int i, j, cnt = 0, max;
+ int i, j, cnt = 0, max, err;
if(!atomic_dec_and_test(&jme->tx_cleaning))
goto out;
ctxbi = txbi + i;
- if(ctxbi->skb && !(txdesc[i].desc1.flags & TXFLAG_OWN)) {
+ if(ctxbi->skb && !(txdesc[i].descwb.flags & TXWBFLAG_OWN)) {
+
+ err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
tx_dbg(jme->dev->name,
"Tx Tasklet: Clean %d+%d\n",
ttxbi->len,
PCI_DMA_TODEVICE);
- NET_STAT(jme).tx_bytes += ttxbi->len;
+ if(likely(!err))
+ NET_STAT(jme).tx_bytes += ttxbi->len;
+
ttxbi->mapping = 0;
ttxbi->len = 0;
}
cnt += ctxbi->nr_desc;
- ++(NET_STAT(jme).tx_packets);
+ if(unlikely(err))
+ ++(NET_STAT(jme).tx_carrier_errors);
+ else
+ ++(NET_STAT(jme).tx_packets);
}
else {
if(!ctxbi->skb)
tasklet_kill(&jme->txclean_task);
tasklet_kill(&jme->rxclean_task);
tasklet_kill(&jme->rxempty_task);
- jme_disable_rx_engine(jme);
- jme_disable_tx_engine(jme);
+
+ jme_reset_mac_processor(jme);
jme_free_rx_resources(jme);
jme_free_tx_resources(jme);
{
struct jme_adapter *jme = netdev_priv(netdev);
u32 mc_hash[2] = {};
- __u32 val;
int i;
+ unsigned long flags;
- val = jme->reg_rxmcs | RXMCS_BRDFRAME | RXMCS_UNIFRAME;
+ spin_lock_irqsave(&jme->rxmcs_lock, flags);
+
+ jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
if (netdev->flags & IFF_PROMISC) {
- val |= RXMCS_ALLFRAME;
+ jme->reg_rxmcs |= RXMCS_ALLFRAME;
}
else if (netdev->flags & IFF_ALLMULTI) {
- val |= RXMCS_ALLMULFRAME;
+ jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
}
else if(netdev->flags & IFF_MULTICAST) {
struct dev_mc_list *mclist;
int bit_nr;
- val |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
+ jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
for (i = 0, mclist = netdev->mc_list;
mclist && i < netdev->mc_count;
++i, mclist = mclist->next) {
jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
}
-
wmb();
- jwrite32(jme, JME_RXMCS, val);
+ jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+
+ spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
}
static int
-jme_change_mtu(struct net_device *dev, int new_mtu)
+jme_change_mtu(struct net_device *netdev, int new_mtu)
{
/*
* Not supporting MTU change for now.
return -EINVAL;
}
+static void
+jme_tx_timeout(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ /*
+ * Reset the link
+ * And the link change will reinitiallize all RX/TX resources
+ */
+ jme_restart_an(jme);
+}
+
static void
jme_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
strcpy(info->bus_info, pci_name(jme->pdev));
}
+static int
+jme_get_regs_len(struct net_device *netdev)
+{
+ return 0x400;
+}
+
+static void
+mmapio_memcpy(struct jme_adapter *jme, __u32 *p, __u32 reg, int len)
+{
+ int i;
+
+ for(i = 0 ; i < len ; i += 4)
+ p[i>>2] = jread32(jme, reg + i);
+
+}
+
+static void
+jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ __u32 *p32 = (__u32*)p;
+
+ memset(p, 0, 0x400);
+
+ regs->version = 1;
+ mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
+
+ p32 += 0x100 >> 2;
+ mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
+
+ p32 += 0x100 >> 2;
+ mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
+
+ p32 += 0x100 >> 2;
+ mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
+
+}
+
+static int
+jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ ecmd->use_adaptive_rx_coalesce = true;
+ ecmd->tx_coalesce_usecs = PCC_TX_TO;
+ ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
+
+ switch(jme->dpi.cur) {
+ case PCC_P1:
+ ecmd->rx_coalesce_usecs = PCC_P1_TO;
+ ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
+ break;
+ case PCC_P2:
+ ecmd->rx_coalesce_usecs = PCC_P2_TO;
+ ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
+ break;
+ case PCC_P3:
+ ecmd->rx_coalesce_usecs = PCC_P3_TO;
+ ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void
+jme_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *ecmd)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ unsigned long flags;
+ __u32 val;
+
+ ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
+ ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
+
+ spin_lock_irqsave(&jme->phy_lock, flags);
+ val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
+ spin_unlock_irqrestore(&jme->phy_lock, flags);
+ ecmd->autoneg = (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
+}
+
+static int
+jme_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *ecmd)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ unsigned long flags;
+ __u32 val;
+
+ if( ((jme->reg_txpfc & TXPFC_PF_EN) != 0) !=
+ (ecmd->tx_pause != 0)) {
+
+ if(ecmd->tx_pause)
+ jme->reg_txpfc |= TXPFC_PF_EN;
+ else
+ jme->reg_txpfc &= ~TXPFC_PF_EN;
+
+ jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
+ }
+
+ spin_lock_irqsave(&jme->rxmcs_lock, flags);
+ if( ((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) !=
+ (ecmd->rx_pause != 0)) {
+
+ if(ecmd->rx_pause)
+ jme->reg_rxmcs |= RXMCS_FLOWCTRL;
+ else
+ jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
+
+ jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+ }
+ spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
+
+ spin_lock_irqsave(&jme->phy_lock, flags);
+ val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
+ if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) !=
+ (ecmd->autoneg != 0)) {
+
+ if(ecmd->autoneg)
+ val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ else
+ val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE, val);
+ }
+ spin_unlock_irqrestore(&jme->phy_lock, flags);
+
+ return 0;
+}
+
static int
jme_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct jme_adapter *jme = netdev_priv(netdev);
int rc;
+
spin_lock(&jme->phy_lock);
rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
spin_unlock(&jme->phy_lock);
int rc;
unsigned long flags;
+ if(ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
spin_lock_irqsave(&jme->phy_lock, flags);
rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
spin_unlock_irqrestore(&jme->phy_lock, flags);
return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
}
+static u32
+jme_get_rx_csum(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ return jme->reg_rxmcs & RXMCS_CHECKSUM;
+}
+
+static int
+jme_set_rx_csum(struct net_device *netdev, u32 on)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&jme->rxmcs_lock, flags);
+ if(on)
+ jme->reg_rxmcs |= RXMCS_CHECKSUM;
+ else
+ jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
+ jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+ spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
+
+ return 0;
+}
+
+static int
+jme_set_tx_csum(struct net_device *netdev, u32 on)
+{
+ if(on)
+ netdev->features |= NETIF_F_HW_CSUM;
+ else
+ netdev->features &= ~NETIF_F_HW_CSUM;
+
+ return 0;
+}
+
+static int
+jme_nway_reset(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ jme_restart_an(jme);
+ return 0;
+}
+
static const struct ethtool_ops jme_ethtool_ops = {
.get_drvinfo = jme_get_drvinfo,
+ .get_regs_len = jme_get_regs_len,
+ .get_regs = jme_get_regs,
+ .get_coalesce = jme_get_coalesce,
+ .get_pauseparam = jme_get_pauseparam,
+ .set_pauseparam = jme_set_pauseparam,
.get_settings = jme_get_settings,
.set_settings = jme_set_settings,
.get_link = jme_get_link,
+ .get_rx_csum = jme_get_rx_csum,
+ .set_rx_csum = jme_set_rx_csum,
+ .set_tx_csum = jme_set_tx_csum,
+ .nway_reset = jme_nway_reset,
};
static int
if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
return 1;
+ if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
+ if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
+ return 1;
+
if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
return 0;
netdev->set_multicast_list = jme_set_multi;
netdev->change_mtu = jme_change_mtu;
netdev->ethtool_ops = &jme_ethtool_ops;
+ netdev->tx_timeout = jme_tx_timeout;
+ netdev->watchdog_timeo = TX_TIMEOUT;
NETDEV_GET_STATS(netdev, &jme_get_stats);
-
+ netdev->features = NETIF_F_HW_CSUM;
if(using_dac)
- netdev->features = NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HIGHDMA;
SET_NETDEV_DEV(netdev, &pdev->dev);
pci_set_drvdata(pdev, netdev);
jme = netdev_priv(netdev);
jme->pdev = pdev;
jme->dev = netdev;
- jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
jme->phylink = 0;
jme->regs = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
spin_lock_init(&jme->tx_lock);
spin_lock_init(&jme->phy_lock);
spin_lock_init(&jme->macaddr_lock);
+ spin_lock_init(&jme->rxmcs_lock);
atomic_set(&jme->intr_sem, 1);
atomic_set(&jme->link_changing, 1);
jme->mii_if.mdio_read = jme_mdio_read;
jme->mii_if.mdio_write = jme_mdio_write;
+ jme->dpi.cur = PCC_P1;
+
+ jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
+ jme->reg_rxmcs = RXMCS_DEFAULT;
+ jme->reg_txpfc = 0;
/*
* Get Max Read Req Size from PCI Config Space
*/
}
jprintk(netdev->name,
- "JMC250 gigabit eth at %llx, "
- "%02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n",
- (unsigned long long) pci_resource_start(pdev, 0),
+ "JMC250 gigabit eth %02x:%02x:%02x:%02x:%02x:%02x\n",
netdev->dev_addr[0],
netdev->dev_addr[1],
netdev->dev_addr[2],
netdev->dev_addr[3],
netdev->dev_addr[4],
- netdev->dev_addr[5],
- pdev->irq);
+ netdev->dev_addr[5]);
return 0;
#include <linux/version.h>
#define DRV_NAME "jme"
-#define DRV_VERSION "0.4"
+#define DRV_VERSION "0.5"
#define PFX DRV_NAME ": "
#ifdef DEBUG
#define dprintk(devname, fmt, args...) \
- printk(KERN_DEBUG PFX "%s: " fmt, devname, ## args)
+ printk(KERN_DEBUG "%s: " fmt, devname, ## args)
#else
#define dprintk(devname, fmt, args...)
#endif
#endif
#define jprintk(devname, fmt, args...) \
- printk(KERN_INFO PFX "%s: " fmt, devname, ## args)
+ printk(KERN_INFO "%s: " fmt, devname, ## args)
#define jeprintk(devname, fmt, args...) \
- printk(KERN_ERR PFX "%s: " fmt, devname, ## args)
+ printk(KERN_ERR "%s: " fmt, devname, ## args)
#define USE_IEVE_SHADOW 0
#define PCC_INTERVAL (HZ / 10)
#define PCC_P3_THRESHOLD 3*1024*1024
#define PCC_P2_THRESHOLD 1000
+#define PCC_TX_TO 60000
+#define PCC_TX_CNT 8
/*
* TX/RX Descriptors
/* DW3 */
__u32 bufaddrl;
} desc2;
+ struct {
+ /* DW0 */
+ __u8 ehdrsz;
+ __u8 rsv1;
+ __u8 rsv2;
+ __u8 flags;
+
+ /* DW1 */
+ __u16 trycnt;
+ __u16 segcnt;
+
+ /* DW2 */
+ __u16 pktsz;
+ __u16 rsv3;
+
+ /* DW3 */
+ __u32 bufaddrl;
+ } descwb;
};
};
-enum jme_txdesc_flag_bits {
+enum jme_txdesc_flags_bits {
TXFLAG_OWN = 0x80,
TXFLAG_INT = 0x40,
TXFLAG_64BIT = 0x20,
TXFLAG_LSEN = 0x02,
TXFLAG_TAGON = 0x01,
};
+enum jme_rxdescwb_flags_bits {
+ TXWBFLAG_OWN = 0x80,
+ TXWBFLAG_INT = 0x40,
+ TXWBFLAG_TMOUT = 0x20,
+ TXWBFLAG_TRYOUT = 0x10,
+ TXWBFLAG_COL = 0x08,
+
+ TXWBFLAG_ALLERR = TXWBFLAG_TMOUT |
+ TXWBFLAG_TRYOUT |
+ TXWBFLAG_COL,
+};
#define RX_DESC_SIZE 16
#define RX_RING_SIZE (RING_DESC_NR * RX_DESC_SIZE)
#define RX_BUF_DMA_ALIGN 8
-//#define RX_BUF_SIZE 1600
-#define RX_BUF_SIZE 9200
-//#define RX_BUF_SIZE 4000
+#define RX_BUF_SIZE 9216
#define RX_PREPAD_SIZE 10
/*
spinlock_t tx_lock;
spinlock_t phy_lock;
spinlock_t macaddr_lock;
+ spinlock_t rxmcs_lock;
struct tasklet_struct rxempty_task;
struct tasklet_struct rxclean_task;
struct tasklet_struct txclean_task;
struct tasklet_struct linkch_task;
+ __u32 features;
__u32 reg_txcs;
+ __u32 reg_txpfc;
__u32 reg_rxmcs;
__u32 reg_ghc;
__u32 phylink;
enum shadow_reg_val {
SHADOW_IEVE = 0,
};
+enum jme_features_bits {
+ JME_FEATURE_LALALA = 0x00000001,
+};
#define WAIT_TASKLET_TIMEOUT 500 /* 500 ms */
+#define TX_TIMEOUT (5*HZ)
+
/*
* MMaped I/O Resters
JME_RSS = 0x0C00,
};
+enum jme_iomap_lens {
+ JME_MAC_LEN = 0x80,
+ JME_PHY_LEN = 0x58,
+ JME_MISC_LEN = 0x98,
+ JME_RSS_LEN = 0xFF,
+};
+
enum jme_iomap_regs {
JME_TXCS = JME_MAC | 0x00, /* Transmit Control and Status */
JME_TXDBA_LO = JME_MAC | 0x04, /* Transmit Queue Desc Base Addr */
TXCS_DEFAULT = TXCS_FIFOTH_4QW |
TXCS_BURST,
};
-#define JME_TX_DISABLE_TIMEOUT 100 /* 100 msec */
+#define JME_TX_DISABLE_TIMEOUT 5 /* 5 msec */
/*
* TX MAC Control/Status Bits
TXMCS_PADDING,
};
+enum jme_txpfc_bits_masks {
+ TXPFC_VLAN_TAG = 0xFFFF0000,
+ TXPFC_VLAN_EN = 0x00008000,
+ TXPFC_PF_EN = 0x00000001,
+};
+
+enum jme_txtrhd_bits_masks {
+ TXTRHD_TXPEN = 0x80000000,
+ TXTRHD_TXP = 0x7FFFFF00,
+ TXTRHD_TXREN = 0x00000080,
+ TXTRHD_TXRL = 0x0000007F,
+};
+enum jme_txtrhd_shifts {
+ TXTRHD_TXP_SHIFT = 8,
+ TXTRHD_TXRL_SHIFT = 0,
+};
+
/*
* RX Control/Status Bits
RXCS_RETRYCNT_60 = 0x00000F00,
RXCS_DEFAULT = RXCS_FIFOTHTP_128T |
- RXCS_FIFOTHNP_128QW |
+ //RXCS_FIFOTHNP_128QW |
+ RXCS_FIFOTHNP_32QW |
RXCS_DMAREQSZ_128B |
RXCS_RETRYGAP_256ns |
RXCS_RETRYCNT_32,
};
-#define JME_RX_DISABLE_TIMEOUT 100 /* 100 msec */
+#define JME_RX_DISABLE_TIMEOUT 5 /* 5 msec */
/*
* RX MAC Control/Status Bits
RXMCS_VTAGRM = 0x00000004,
RXMCS_PREPAD = 0x00000002,
RXMCS_CHECKSUM = 0x00000001,
+
+ RXMCS_DEFAULT = RXMCS_VTAGRM |
+ RXMCS_PREPAD |
+ RXMCS_FLOWCTRL |
+ RXMCS_CHECKSUM,
};
/*