/*
* TODO:
- * - Implement MSI-X.
- * Along with multiple RX queue, for CPU load balancing.
* - Decode register dump for ethtool.
*/
smi_reg_addr(reg));
wmb();
- for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
- udelay(1);
+ for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
+ udelay(20);
val = jread32(jme, JME_SMI);
if ((val & SMI_OP_REQ) == 0)
break;
}
if (i == 0) {
- jeprintk(netdev->name, "phy read timeout : %d\n", reg);
+ jeprintk("jme", "phy(%d) read timeout : %d\n", phy, reg);
return 0;
}
smi_phy_addr(phy) | smi_reg_addr(reg));
wmb();
- for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
- udelay(1);
+ for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
+ udelay(20);
val = jread32(jme, JME_SMI);
if ((val & SMI_OP_REQ) == 0)
break;
}
if (i == 0)
- jeprintk(netdev->name, "phy write timeout : %d\n", reg);
+ jeprintk("jme", "phy(%d) write timeout : %d\n", phy, reg);
return;
}
{
__u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0,0,0,0};
__u32 crc = 0xCDCDCDCD;
+ __u32 gpreg0;
int i;
jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
for(i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
jme_setup_wakeup_frame(jme, mask, crc, i);
- jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
+ if(jme->fpgaver)
+ gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
+ else
+ gpreg0 = GPREG0_DEFAULT;
+ jwrite32(jme, JME_GPREG0, gpreg0);
jwrite32(jme, JME_GPREG1, 0);
}
jwrite32(jme, JME_SHBA_LO, 0x0);
}
+static __u32
+jme_linkstat_from_phy(struct jme_adapter *jme)
+{
+ __u32 phylink, bmsr;
+
+ phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
+ bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
+ if(bmsr & BMCR_ANCOMP)
+ phylink |= PHY_LINK_AUTONEG_COMPLETE;
+
+ return phylink;
+}
+
static int
jme_check_link(struct net_device *netdev, int testonly)
{
int rc = 0;
linkmsg[0] = '\0';
- phylink = jread32(jme, JME_PHY_LINK);
+
+ if(jme->fpgaver)
+ phylink = jme_linkstat_from_phy(jme);
+ else
+ phylink = jread32(jme, JME_PHY_LINK);
if (phylink & PHY_LINK_UP) {
if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
--cnt) {
udelay(1);
- phylink = jread32(jme, JME_PHY_LINK);
+ if(jme->fpgaver)
+ phylink = jme_linkstat_from_phy(jme);
+ else
+ phylink = jread32(jme, JME_PHY_LINK);
}
if(!cnt)
jme->phylink = phylink;
+ ghc = jme->reg_ghc & ~(GHC_SPEED_10M |
+ GHC_SPEED_100M |
+ GHC_SPEED_1000M |
+ GHC_DPX);
switch(phylink & PHY_LINK_SPEED_MASK) {
case PHY_LINK_SPEED_10M:
- ghc = GHC_SPEED_10M;
+ ghc |= GHC_SPEED_10M;
strcat(linkmsg, "10 Mbps, ");
break;
case PHY_LINK_SPEED_100M:
- ghc = GHC_SPEED_100M;
+ ghc |= GHC_SPEED_100M;
strcat(linkmsg, "100 Mbps, ");
break;
case PHY_LINK_SPEED_1000M:
- ghc = GHC_SPEED_1000M;
+ ghc |= GHC_SPEED_1000M;
strcat(linkmsg, "1000 Mbps, ");
break;
default:
- ghc = 0;
break;
}
ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
RING_DESC_ALIGN);
txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
txring->next_to_use = 0;
- txring->next_to_clean = 0;
+ atomic_set(&txring->next_to_clean, 0);
atomic_set(&txring->nr_free, jme->tx_ring_size);
/*
txring->dma = 0;
}
txring->next_to_use = 0;
- txring->next_to_clean = 0;
+ atomic_set(&txring->next_to_clean, 0);
atomic_set(&txring->nr_free, 0);
}
rxring->dma = 0;
}
rxring->next_to_use = 0;
- rxring->next_to_clean = 0;
+ atomic_set(&rxring->next_to_clean, 0);
}
static int
RING_DESC_ALIGN);
rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
rxring->next_to_use = 0;
- rxring->next_to_clean = 0;
+ atomic_set(&rxring->next_to_clean, 0);
/*
* Initiallize Receive Descriptors
if(unlikely((flags & RXWBFLAG_TCPON) &&
!(flags & RXWBFLAG_TCPCS))) {
csum_dbg(jme->dev->name, "TCP Checksum error.\n");
- return false;
+ goto out_sumerr;
}
if(unlikely((flags & RXWBFLAG_UDPON) &&
!(flags & RXWBFLAG_UDPCS))) {
csum_dbg(jme->dev->name, "UDP Checksum error.\n");
- return false;
+ goto out_sumerr;
}
if(unlikely((flags & RXWBFLAG_IPV4) &&
!(flags & RXWBFLAG_IPCS))) {
csum_dbg(jme->dev->name, "IPv4 Checksum error.\n");
- return false;
+ goto out_sumerr;
}
return true;
+
+out_sumerr:
+ csum_dbg(jme->dev->name, "%s%s%s%s\n",
+ (flags & RXWBFLAG_IPV4)?"IPv4 ":"",
+ (flags & RXWBFLAG_IPV6)?"IPv6 ":"",
+ (flags & RXWBFLAG_UDPON)?"UDP ":"",
+ (flags & RXWBFLAG_TCPON)?"TCP":"");
+ return false;
}
static void
if(jme->vlgrp) {
vlan_dbg(jme->dev->name,
"VLAN Passed to kernel.\n");
- vlan_hwaccel_rx(skb, jme->vlgrp,
+ jme->jme_vlan_rx(skb, jme->vlgrp,
le32_to_cpu(rxdesc->descwb.vlan));
NET_STAT(jme).rx_bytes += 4;
}
}
else {
- netif_rx(skb);
+ jme->jme_rx(skb);
}
if((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
if(unlikely(!netif_carrier_ok(jme->dev)))
goto out_inc;
- i = rxring->next_to_clean;
+ i = atomic_read(&rxring->next_to_clean);
while( limit-- > 0 )
{
rxdesc = rxring->desc;
(jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
>> 4);
- rxring->next_to_clean = i;
+ atomic_set(&rxring->next_to_clean, i);
out_inc:
atomic_inc(&jme->rx_cleaning);
jme_free_rx_resources(jme);
jme_free_tx_resources(jme);
- if(jme->flags & JME_FLAG_POLL) {
+ if(jme->flags & JME_FLAG_POLL)
jme_polling_mode(jme);
- napi_disable(&jme->napi);
- }
}
jme_check_link(netdev, 0);
netif_start_queue(netdev);
- if(jme->flags & JME_FLAG_POLL) {
- napi_enable(&jme->napi);
+ if(jme->flags & JME_FLAG_POLL)
jme_interrupt_mode(jme);
- }
jme_start_pcc_timer(jme);
}
}
static int
-jme_poll(struct napi_struct *napi, int budget)
+jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
{
- struct jme_adapter *jme = container_of(napi, struct jme_adapter, napi);
+ struct jme_adapter *jme = jme_napi_priv(holder);
struct net_device *netdev = jme->dev;
int rest;
- rest = jme_process_receive(jme, budget);
+ rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
- while(!atomic_dec_and_test(&jme->rx_empty)) {
+ while(atomic_read(&jme->rx_empty) > 0) {
+ atomic_dec(&jme->rx_empty);
++(NET_STAT(jme).rx_dropped);
jme_restart_rx_engine(jme);
}
atomic_inc(&jme->rx_empty);
if(rest) {
- netif_rx_complete(netdev, napi);
+ JME_RX_COMPLETE(netdev, holder);
jme_interrupt_mode(jme);
}
- return budget - rest;
+ JME_NAPI_WEIGHT_SET(budget, rest);
+ return JME_NAPI_WEIGHT_VAL(budget) - rest;
}
static void
queue_dbg(jme->dev->name, "RX Queue Full!\n");
jme_rx_clean_tasklet(arg);
- jme_restart_rx_engine(jme);
+
+ while(atomic_read(&jme->rx_empty) > 0) {
+ atomic_dec(&jme->rx_empty);
+ ++(NET_STAT(jme).rx_dropped);
+ jme_restart_rx_engine(jme);
+ }
+ atomic_inc(&jme->rx_empty);
}
static void
tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
- for(i = txring->next_to_clean ; cnt < max ; ) {
+ for(i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
ctxbi = txbi + i;
ctxbi->skb = NULL;
ctxbi->len = 0;
+ ctxbi->start_xmit = 0;
}
else {
if(!ctxbi->skb)
tx_dbg(jme->dev->name,
"Tx Tasklet: Stop %d Jiffies %lu\n",
i, jiffies);
- txring->next_to_clean = i;
+ atomic_set(&txring->next_to_clean, i);
atomic_add(cnt, &txring->nr_free);
jme_wake_queue_if_stopped(jme);
atomic_inc(&jme->rx_empty);
if((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
- if(likely(
- netif_rx_schedule_prep(jme->dev, &jme->napi))) {
+ if(likely(JME_RX_SCHEDULE_PREP(jme))) {
jme_polling_mode(jme);
- __netif_rx_schedule(jme->dev, &jme->napi);
+ JME_RX_SCHEDULE(jme);
}
}
}
else {
- if(intrstat & INTR_RX0EMP)
+ if(intrstat & INTR_RX0EMP) {
+ atomic_inc(&jme->rx_empty);
tasklet_schedule(&jme->rxempty_task);
+ }
if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
tasklet_schedule(&jme->rxclean_task);
jme_open(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
- int rc, timeout = 100;
+ int rc, timeout = 10;
while(
--timeout > 0 &&
atomic_read(&jme->tx_cleaning) != 1
)
)
- msleep(10);
+ msleep(1);
if(!timeout) {
rc = -EBUSY;
jme_clear_pm(jme);
jme_reset_mac_processor(jme);
+ JME_NAPI_ENABLE(jme);
rc = jme_request_irq(jme);
if(rc)
if (bmcr != tmp)
jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
- jwrite32(jme, JME_GHC, GHC_SPEED_100M);
+ if(jme->fpgaver)
+ jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
+ else
+ jwrite32(jme, JME_GHC, GHC_SPEED_100M);
}
static void
jme_disable_shadow(jme);
jme_free_irq(jme);
- if(jme->flags & JME_FLAG_POLL)
- napi_disable(&jme->napi);
+ JME_NAPI_DISABLE(jme);
tasklet_kill(&jme->linkch_task);
tasklet_kill(&jme->txclean_task);
txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
txbi->skb = skb;
txbi->len = skb->len;
+ if(!(txbi->start_xmit = jiffies))
+ txbi->start_xmit = 1;
return 0;
}
jme_stop_queue_if_full(struct jme_adapter *jme)
{
struct jme_ring *txring = jme->txring;
+ struct jme_buffer_info *txbi = txring->bufinf;
+
+ txbi += atomic_read(&txring->next_to_clean);
smp_wmb();
if(unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
}
}
+ if(unlikely( txbi->start_xmit &&
+ (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
+ txbi->skb)) {
+ netif_stop_queue(jme->dev);
+ }
}
/*
{
struct jme_adapter *jme = netdev_priv(netdev);
+ jme->phylink = 0;
+ jme_reset_phy_processor(jme);
+ if(jme->flags & JME_FLAG_SSET)
+ jme_set_settings(netdev, &jme->old_ecmd);
+
/*
- * Reset the link
- * And the link change will reinitialize all RX/TX resources
+ * Force to Reset the link again
*/
- jme->phylink = 0;
jme_reset_link(jme);
}
{
struct jme_adapter *jme = netdev_priv(netdev);
- if(jme->flags & JME_FLAG_POLL)
- ecmd->use_adaptive_rx_coalesce = false;
- else
- ecmd->use_adaptive_rx_coalesce = true;
-
ecmd->tx_coalesce_usecs = PCC_TX_TO;
ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
+ if(jme->flags & JME_FLAG_POLL) {
+ ecmd->use_adaptive_rx_coalesce = false;
+ ecmd->rx_coalesce_usecs = 0;
+ ecmd->rx_max_coalesced_frames = 0;
+ return 0;
+ }
+
+ ecmd->use_adaptive_rx_coalesce = true;
+
switch(jme->dpi.cur) {
case PCC_P1:
ecmd->rx_coalesce_usecs = PCC_P1_TO;
struct jme_adapter *jme = netdev_priv(netdev);
struct dynpcc_info *dpi = &(jme->dpi);
+ if(netif_running(netdev))
+ return -EBUSY;
+
if(ecmd->use_adaptive_rx_coalesce
&& (jme->flags & JME_FLAG_POLL)) {
jme->flags &= ~JME_FLAG_POLL;
- napi_disable(&jme->napi);
+ jme->jme_rx = netif_rx;
+ jme->jme_vlan_rx = vlan_hwaccel_rx;
dpi->cur = PCC_P1;
dpi->attempt = PCC_P1;
dpi->cnt = 0;
else if(!(ecmd->use_adaptive_rx_coalesce)
&& !(jme->flags & JME_FLAG_POLL)) {
jme->flags |= JME_FLAG_POLL;
- napi_enable(&jme->napi);
+ jme->jme_rx = netif_receive_skb;
+ jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
jme_interrupt_mode(jme);
}
}
__always_inline static void
-jme_set_phy_ps(struct jme_adapter *jme)
+jme_phy_init(struct jme_adapter *jme)
+{
+ __u16 reg26;
+
+ reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
+}
+
+__always_inline static void
+jme_set_gmii(struct jme_adapter *jme)
+{
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
+}
+
+static void
+jme_check_hw_ver(struct jme_adapter *jme)
{
- jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, 0x00001000);
+ __u32 chipmode;
+
+ chipmode = jread32(jme, JME_CHIPMODE);
+
+ jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
+ jme->chipver = (chipmode & CM_CHIPVER_MASK) >> CM_CHIPVER_SHIFT;
}
static int __devinit
jme_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int rc = 0, using_dac;
+ int rc = 0, using_dac, i;
struct net_device *netdev;
struct jme_adapter *jme;
+ __u16 bmcr, bmsr;
/*
* set up PCI device basics
jme = netdev_priv(netdev);
jme->pdev = pdev;
jme->dev = netdev;
+ jme->jme_rx = netif_rx;
+ jme->jme_vlan_rx = vlan_hwaccel_rx;
jme->old_mtu = netdev->mtu = 1500;
jme->phylink = 0;
jme->tx_ring_size = 1 << 10;
goto err_out_unmap;
}
- netif_napi_add(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2);
+ NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
spin_lock_init(&jme->phy_lock);
spin_lock_init(&jme->macaddr_lock);
tasklet_init(&jme->rxempty_task,
&jme_rx_empty_tasklet,
(unsigned long) jme);
- jme->mii_if.dev = netdev;
- jme->mii_if.phy_id = 1;
- jme->mii_if.supports_gmii = 1;
- jme->mii_if.mdio_read = jme_mdio_read;
- jme->mii_if.mdio_write = jme_mdio_write;
-
jme->dpi.cur = PCC_P1;
jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
jme->reg_rxmcs = RXMCS_DEFAULT;
jme->reg_txpfc = 0;
jme->reg_pmcs = PMCS_LFEN | PMCS_LREN | PMCS_MFEN;
- jme->flags = JME_FLAG_TXCSUM | JME_FLAG_TSO | JME_FLAG_POLL;
+ jme->flags = JME_FLAG_TXCSUM | JME_FLAG_TSO;
/*
* Get Max Read Req Size from PCI Config Space
/*
- * Reset MAC processor and reload EEPROM for MAC Address
+ * Must check before reset_mac_processor
*/
+ jme_check_hw_ver(jme);
+ jme->mii_if.dev = netdev;
+ if(jme->fpgaver) {
+ jme->mii_if.phy_id = 0;
+ for(i = 1 ; i < 32 ; ++i) {
+ bmcr = jme_mdio_read(netdev, i, MII_BMCR);
+ bmsr = jme_mdio_read(netdev, i, MII_BMSR);
+ if(bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
+ jme->mii_if.phy_id = i;
+ break;
+ }
+ }
+
+ if(!jme->mii_if.phy_id) {
+ rc = -EIO;
+ printk(KERN_ERR PFX "Can not find phy_id.\n");
+ goto err_out_free_shadow;
+ }
+
+ jme->reg_ghc |= GHC_LINK_POLL;
+ }
+ else {
+ jme->mii_if.phy_id = 1;
+ }
+ jme->mii_if.supports_gmii = 1;
+ jme->mii_if.mdio_read = jme_mdio_read;
+ jme->mii_if.mdio_write = jme_mdio_write;
+
jme_clear_pm(jme);
- jme_set_phy_ps(jme);
+ if(jme->fpgaver)
+ jme_set_gmii(jme);
+ else
+ jme_phy_init(jme);
jme_phy_off(jme);
+
+ /*
+ * Reset MAC processor and reload EEPROM for MAC Address
+ */
jme_reset_mac_processor(jme);
rc = jme_reload_eeprom(jme);
if(rc) {
}
jprintk(netdev->name,
- "JMC250 gigabit eth %02x:%02x:%02x:%02x:%02x:%02x\n",
+ "JMC250 gigabit%s ver:%u eth %02x:%02x:%02x:%02x:%02x:%02x\n",
+ (jme->fpgaver != 0)?" (FPGA)":"",
+ (jme->fpgaver != 0)?jme->fpgaver:jme->chipver,
netdev->dev_addr[0],
netdev->dev_addr[1],
netdev->dev_addr[2],
netif_carrier_off(netdev);
jme->phylink = 0;
- if(jme->flags & JME_FLAG_POLL) {
+ if(jme->flags & JME_FLAG_POLL)
jme_polling_mode(jme);
- napi_disable(&jme->napi);
- }
}
#include <linux/version.h>
#define DRV_NAME "jme"
-#define DRV_VERSION "0.9a"
+#define DRV_VERSION "0.9b"
#define PFX DRV_NAME ": "
#ifdef DEBUG
};
#define PCC_INTERVAL_US 100000
#define PCC_INTERVAL (HZ / (1000000/PCC_INTERVAL_US))
-#define PCC_P3_THRESHOLD 3*1024*1024
+#define PCC_P3_THRESHOLD 2*1024*1024
#define PCC_P2_THRESHOLD 800
#define PCC_INTR_THRESHOLD 800
#define PCC_TX_TO 333
dma_addr_t mapping;
int len;
int nr_desc;
+ unsigned long start_xmit;
};
#define MAX_RING_DESC_NR 1024
struct jme_buffer_info bufinf[MAX_RING_DESC_NR];
int next_to_use;
- int next_to_clean;
-
+ atomic_t next_to_clean;
atomic_t nr_free;
};
#define DECLARE_NET_DEVICE_STATS
#endif
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
+#define DECLARE_NAPI_STRUCT
+#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
+ dev->poll = pollfn; \
+ dev->weight = q;
+#define JME_NAPI_HOLDER(holder) struct net_device *holder
+#define JME_NAPI_WEIGHT(w) int *w
+#define JME_NAPI_WEIGHT_VAL(w) *w
+#define JME_NAPI_WEIGHT_SET(w, r) *w = r
+#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev)
+#define JME_NAPI_ENABLE(priv) netif_poll_enable(priv->dev);
+#define JME_NAPI_DISABLE(priv) netif_poll_disable(priv->dev);
+#define JME_RX_SCHEDULE_PREP(priv) \
+ netif_rx_schedule_prep(priv->dev)
+#define JME_RX_SCHEDULE(priv) \
+ __netif_rx_schedule(priv->dev);
+#else
+#define DECLARE_NAPI_STRUCT struct napi_struct napi;
+#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
+ netif_napi_add(dev, napis, pollfn, q);
+#define JME_NAPI_HOLDER(holder) struct napi_struct *holder
+#define JME_NAPI_WEIGHT(w) int w
+#define JME_NAPI_WEIGHT_VAL(w) w
+#define JME_NAPI_WEIGHT_SET(w, r)
+#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev, napis)
+#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi);
+#define JME_NAPI_DISABLE(priv) \
+ if(!napi_disable_pending(&priv->napi)) \
+ napi_disable(&priv->napi);
+#define JME_RX_SCHEDULE_PREP(priv) \
+ netif_rx_schedule_prep(priv->dev, &priv->napi)
+#define JME_RX_SCHEDULE(priv) \
+ __netif_rx_schedule(priv->dev, &priv->napi);
+#endif
+
/*
* Jmac Adapter Private data
*/
__u32 rx_ring_size;
__u32 rx_ring_mask;
__u8 mrrs;
+ __u32 fpgaver;
+ __u32 chipver;
struct ethtool_cmd old_ecmd;
unsigned int old_mtu;
struct vlan_group* vlgrp;
atomic_t tx_cleaning;
atomic_t rx_cleaning;
atomic_t rx_empty;
- struct napi_struct napi;
+ int (*jme_rx)(struct sk_buff *skb);
+ int (*jme_vlan_rx)(struct sk_buff *skb,
+ struct vlan_group *grp,
+ unsigned short vlan_tag);
+ DECLARE_NAPI_STRUCT
DECLARE_NET_DEVICE_STATS
};
enum shadow_reg_val {
#define WAIT_TASKLET_TIMEOUT 500 /* 500 ms */
#define TX_TIMEOUT (5*HZ)
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
+__always_inline static struct jme_adapter*
+jme_napi_priv(struct net_device *holder)
+{
+ struct jme_adapter* jme;
+ jme = netdev_priv(holder);
+ return jme;
+}
+#else
+__always_inline static struct jme_adapter*
+jme_napi_priv(struct napi_struct *napi)
+{
+ struct jme_adapter* jme;
+ jme = container_of(napi, struct jme_adapter, napi);
+ return jme;
+}
+#endif
/*
* MMaped I/O Resters
JME_IENC = JME_MISC| 0x2C, /* Interrupt Enable - Clear Port */
JME_PCCRX0 = JME_MISC| 0x30, /* PCC Control for RX Queue 0 */
JME_PCCTX = JME_MISC| 0x40, /* PCC Control for TX Queues */
+ JME_CHIPMODE = JME_MISC| 0x44, /* Identify FPGA Version */
JME_SHBA_HI = JME_MISC| 0x48, /* Shadow Register Base HI */
JME_SHBA_LO = JME_MISC| 0x4C, /* Shadow Register Base LO */
JME_PCCSRX0 = JME_MISC| 0x80, /* PCC Status of RX0 */
{
return (((x) << SMI_PHY_ADDR_SHIFT) & SMI_PHY_ADDR_MASK);
}
-#define JME_PHY_TIMEOUT 1000 /* 1000 usec */
+#define JME_PHY_TIMEOUT 1000 /* 1000 msec */
/*
* Global Host Control
GPREG0_DISSH = 0xFF000000,
GPREG0_PCIRLMT = 0x00300000,
GPREG0_PCCNOMUTCLR = 0x00040000,
+ GPREG0_LNKINTPOLL = 0x00001000,
GPREG0_PCCTMR = 0x00000300,
GPREG0_PHYADDR = 0x0000001F,
};
static const __u32 INTR_ENABLE = INTR_SWINTR |
INTR_TMINTR |
INTR_LINKCH |
- INTR_RX0EMP |
INTR_PCCRX0TO |
INTR_PCCRX0 |
INTR_PCCTXTO |
- INTR_PCCTX;
+ INTR_PCCTX |
+ INTR_RX0EMP;
/*
* PCC Control Registers
PCCTXQ7_EN = 0x00000080,
};
+/*
+ * Chip Mode Register
+ */
+enum jme_chipmode_bit_masks {
+ CM_FPGAVER_MASK = 0xFFFF0000,
+ CM_CHIPVER_MASK = 0x0000FF00,
+ CM_CHIPMODE_MASK = 0x0000000F,
+};
+enum jme_chipmode_shifts {
+ CM_FPGAVER_SHIFT = 16,
+ CM_CHIPVER_SHIFT = 8,
+};
/*
* Shadow base address register bits
readl((__u8*)jme->regs + reg);
}
+/*
+ * PHY Regs
+ */
+enum jme_phy_reg17_bit_masks {
+ PREG17_SPEED = 0xC000,
+ PREG17_DUPLEX = 0x2000,
+ PREG17_SPDRSV = 0x0800,
+ PREG17_LNKUP = 0x0400,
+ PREG17_MDI = 0x0040,
+};
+enum jme_phy_reg17_vals {
+ PREG17_SPEED_10M = 0x0000,
+ PREG17_SPEED_100M = 0x4000,
+ PREG17_SPEED_1000M = 0x8000,
+};
+#define BMCR_ANCOMP 0x0020
+
/*
* Function prototypes for ethtool
*/
static int jme_set_macaddr(struct net_device *netdev, void *p);
static void jme_set_multi(struct net_device *netdev);
-