jwrite32f(jme, JME_IENC, INTR_ENABLE);
}
-static inline void
-jme_enable_shadow(struct jme_adapter *jme)
-{
- jwrite32(jme,
- JME_SHBA_LO,
- ((u32)jme->shadow_dma & ~((u32)0x1F)) | SHBA_POSTEN);
-}
-
-static inline void
-jme_disable_shadow(struct jme_adapter *jme)
-{
- jwrite32(jme, JME_SHBA_LO, 0x0);
-}
-
static u32
jme_linkstat_from_phy(struct jme_adapter *jme)
{
&(txring->dmaalloc),
GFP_ATOMIC);
- if (!txring->alloc) {
- txring->desc = NULL;
- txring->dmaalloc = 0;
- txring->dma = 0;
- return -ENOMEM;
- }
+ if (!txring->alloc)
+ goto err_set_null;
/*
* 16 Bytes align
atomic_set(&txring->next_to_clean, 0);
atomic_set(&txring->nr_free, jme->tx_ring_size);
+ txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
+ jme->tx_ring_size, GFP_ATOMIC);
+ if (unlikely(!(txring->bufinf)))
+ goto err_free_txring;
+
/*
* Initialize Transmit Descriptors
*/
sizeof(struct jme_buffer_info) * jme->tx_ring_size);
return 0;
+
+err_free_txring:
+ dma_free_coherent(&(jme->pdev->dev),
+ TX_RING_ALLOC_SIZE(jme->tx_ring_size),
+ txring->alloc,
+ txring->dmaalloc);
+
+err_set_null:
+ txring->desc = NULL;
+ txring->dmaalloc = 0;
+ txring->dma = 0;
+ txring->bufinf = NULL;
+
+ return -ENOMEM;
}
static void
{
int i;
struct jme_ring *txring = &(jme->txring[0]);
- struct jme_buffer_info *txbi = txring->bufinf;
+ struct jme_buffer_info *txbi;
if (txring->alloc) {
- for (i = 0 ; i < jme->tx_ring_size ; ++i) {
- txbi = txring->bufinf + i;
- if (txbi->skb) {
- dev_kfree_skb(txbi->skb);
- txbi->skb = NULL;
+ if (txring->bufinf) {
+ for (i = 0 ; i < jme->tx_ring_size ; ++i) {
+ txbi = txring->bufinf + i;
+ if (txbi->skb) {
+ dev_kfree_skb(txbi->skb);
+ txbi->skb = NULL;
+ }
+ txbi->mapping = 0;
+ txbi->len = 0;
+ txbi->nr_desc = 0;
+ txbi->start_xmit = 0;
}
- txbi->mapping = 0;
- txbi->len = 0;
- txbi->nr_desc = 0;
- txbi->start_xmit = 0;
+ kfree(txring->bufinf);
}
dma_free_coherent(&(jme->pdev->dev),
txring->desc = NULL;
txring->dmaalloc = 0;
txring->dma = 0;
+ txring->bufinf = NULL;
}
txring->next_to_use = 0;
atomic_set(&txring->next_to_clean, 0);
atomic_set(&txring->nr_free, 0);
-
}
static inline void
static void
jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
{
- struct jme_ring *rxring = jme->rxring;
+ struct jme_ring *rxring = &(jme->rxring[0]);
register struct rxdesc *rxdesc = rxring->desc;
struct jme_buffer_info *rxbi = rxring->bufinf;
rxdesc += i;
struct jme_ring *rxring = &(jme->rxring[0]);
if (rxring->alloc) {
- for (i = 0 ; i < jme->rx_ring_size ; ++i)
- jme_free_rx_buf(jme, i);
+ if (rxring->bufinf) {
+ for (i = 0 ; i < jme->rx_ring_size ; ++i)
+ jme_free_rx_buf(jme, i);
+ kfree(rxring->bufinf);
+ }
dma_free_coherent(&(jme->pdev->dev),
RX_RING_ALLOC_SIZE(jme->rx_ring_size),
rxring->desc = NULL;
rxring->dmaalloc = 0;
rxring->dma = 0;
+ rxring->bufinf = NULL;
}
rxring->next_to_use = 0;
atomic_set(&rxring->next_to_clean, 0);
RX_RING_ALLOC_SIZE(jme->rx_ring_size),
&(rxring->dmaalloc),
GFP_ATOMIC);
- if (!rxring->alloc) {
- rxring->desc = NULL;
- rxring->dmaalloc = 0;
- rxring->dma = 0;
- return -ENOMEM;
- }
+ if (!rxring->alloc)
+ goto err_set_null;
/*
* 16 Bytes align
rxring->next_to_use = 0;
atomic_set(&rxring->next_to_clean, 0);
+ rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
+ jme->rx_ring_size, GFP_ATOMIC);
+ if (unlikely(!(rxring->bufinf)))
+ goto err_free_rxring;
+
/*
* Initiallize Receive Descriptors
*/
+ memset(rxring->bufinf, 0,
+ sizeof(struct jme_buffer_info) * jme->rx_ring_size);
for (i = 0 ; i < jme->rx_ring_size ; ++i) {
if (unlikely(jme_make_new_rx_buf(jme, i))) {
jme_free_rx_resources(jme);
}
return 0;
+
+err_free_rxring:
+ dma_free_coherent(&(jme->pdev->dev),
+ RX_RING_ALLOC_SIZE(jme->rx_ring_size),
+ rxring->alloc,
+ rxring->dmaalloc);
+err_set_null:
+ rxring->desc = NULL;
+ rxring->dmaalloc = 0;
+ rxring->dma = 0;
+ rxring->bufinf = NULL;
+
+ return -ENOMEM;
}
static inline void
/*
* Setup RX DMA Bass Address
*/
- jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
+ jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
- jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
+ jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
/*
* Setup RX Descriptor Count
if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
return false;
- if (unlikely(!(flags & RXWBFLAG_MF) &&
- (flags & RXWBFLAG_TCPON) && !(flags & RXWBFLAG_TCPCS))) {
- msg_rx_err(jme, "TCP Checksum error.\n");
- goto out_sumerr;
+ if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
+ == RXWBFLAG_TCPON)) {
+ if (flags & RXWBFLAG_IPV4)
+ msg_rx_err(jme, "TCP Checksum error\n");
+ return false;
}
- if (unlikely(!(flags & RXWBFLAG_MF) &&
- (flags & RXWBFLAG_UDPON) && !(flags & RXWBFLAG_UDPCS))) {
- msg_rx_err(jme, "UDP Checksum error.\n");
- goto out_sumerr;
+ if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
+ == RXWBFLAG_UDPON)) {
+ if (flags & RXWBFLAG_IPV4)
+ msg_rx_err(jme, "UDP Checksum error.\n");
+ return false;
}
- if (unlikely((flags & RXWBFLAG_IPV4) && !(flags & RXWBFLAG_IPCS))) {
+ if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
+ == RXWBFLAG_IPV4)) {
msg_rx_err(jme, "IPv4 Checksum error.\n");
- goto out_sumerr;
+ return false;
}
return true;
-
-out_sumerr:
- return false;
}
static void
goto out_inc;
i = atomic_read(&rxring->next_to_clean);
- while (limit-- > 0) {
+ while (limit > 0) {
rxdesc = rxring->desc;
rxdesc += i;
if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
!(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
goto out;
+ --limit;
desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
static void
jme_wake_queue_if_stopped(struct jme_adapter *jme)
{
- struct jme_ring *txring = jme->txring;
+ struct jme_ring *txring = &(jme->txring[0]);
smp_wmb();
if (unlikely(netif_queue_stopped(jme->dev) &&
struct jme_adapter *jme = netdev_priv(netdev);
u32 intrstat;
- pci_dma_sync_single_for_cpu(jme->pdev,
- jme->shadow_dma,
- sizeof(u32) * SHADOW_REG_NR,
- PCI_DMA_FROMDEVICE);
- intrstat = jme->shadow_regs[SHADOW_IEVE];
- jme->shadow_regs[SHADOW_IEVE] = 0;
+ intrstat = jread32(jme, JME_IEVE);
jme_intr_msi(jme, intrstat);
jme_clear_pm(jme);
JME_NAPI_ENABLE(jme);
+ tasklet_enable(&jme->linkch_task);
tasklet_enable(&jme->txclean_task);
tasklet_hi_enable(&jme->rxclean_task);
tasklet_hi_enable(&jme->rxempty_task);
if (rc)
goto err_out;
- jme_enable_shadow(jme);
jme_start_irq(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
netif_carrier_off(netdev);
jme_stop_irq(jme);
- jme_disable_shadow(jme);
jme_free_irq(jme);
JME_NAPI_DISABLE(jme);
- tasklet_kill(&jme->linkch_task);
- tasklet_kill(&jme->txclean_task);
- tasklet_kill(&jme->rxclean_task);
- tasklet_kill(&jme->rxempty_task);
+ tasklet_disable(&jme->linkch_task);
+ tasklet_disable(&jme->txclean_task);
+ tasklet_disable(&jme->rxclean_task);
+ tasklet_disable(&jme->rxempty_task);
jme_reset_ghc_speed(jme);
jme_disable_rx_engine(jme);
jme_alloc_txdesc(struct jme_adapter *jme,
struct sk_buff *skb)
{
- struct jme_ring *txring = jme->txring;
+ struct jme_ring *txring = &(jme->txring[0]);
int idx, nr_alloc, mask = jme->tx_ring_mask;
idx = txring->next_to_use;
static void
jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
{
- struct jme_ring *txring = jme->txring;
+ struct jme_ring *txring = &(jme->txring[0]);
struct txdesc *txdesc = txring->desc, *ctxdesc;
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
{
if (unlikely(
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,16)
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)
skb_shinfo(skb)->tso_size
#else
skb_shinfo(skb)->gso_size
static int
jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
{
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,16)
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)
*mss = cpu_to_le16(skb_shinfo(skb)->tso_size << TXDESC_MSS_SHIFT);
#else
*mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
static int
jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
{
- struct jme_ring *txring = jme->txring;
+ struct jme_ring *txring = &(jme->txring[0]);
struct txdesc *txdesc;
struct jme_buffer_info *txbi;
u8 flags;
static void
jme_stop_queue_if_full(struct jme_adapter *jme)
{
- struct jme_ring *txring = jme->txring;
+ struct jme_ring *txring = &(jme->txring[0]);
struct jme_buffer_info *txbi = txring->bufinf;
int idx = atomic_read(&txring->next_to_clean);
TXCS_SELECT_QUEUE0 |
TXCS_QUEUE0S |
TXCS_ENABLE);
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,29)
netdev->trans_start = jiffies;
+#endif
tx_dbg(jme, "xmit: %d+%d@%lu\n", idx,
skb_shinfo(skb)->nr_frags + 2,
jme_pci_dma64(struct pci_dev *pdev)
{
if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
- !pci_set_dma_mask(pdev, DMA_64BIT_MASK))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+#else
+ !pci_set_dma_mask(pdev, DMA_64BIT_MASK)
+#endif
+ )
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
+ if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+#else
if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
+#endif
return 1;
if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
- !pci_set_dma_mask(pdev, DMA_40BIT_MASK))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))
+#else
+ !pci_set_dma_mask(pdev, DMA_40BIT_MASK)
+#endif
+ )
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
+ if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)))
+#else
if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
+#endif
return 1;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+#else
if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
if (!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
+#endif
return 0;
return -1;
jme->jme_vlan_rx = vlan_hwaccel_rx;
jme->old_mtu = netdev->mtu = 1500;
jme->phylink = 0;
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
- jme->tx_ring_size = 1 << 9;
- jme->tx_wake_threshold = 1 << 8;
- jme->rx_ring_size = 1 << 8;
-#else
jme->tx_ring_size = 1 << 10;
+ jme->tx_ring_mask = jme->tx_ring_size - 1;
jme->tx_wake_threshold = 1 << 9;
jme->rx_ring_size = 1 << 9;
-#endif
- jme->tx_ring_mask = jme->tx_ring_size - 1;
jme->rx_ring_mask = jme->rx_ring_size - 1;
jme->msg_enable = JME_DEF_MSG_ENABLE;
jme->regs = ioremap(pci_resource_start(pdev, 0),
rc = -ENOMEM;
goto err_out_free_netdev;
}
- jme->shadow_regs = pci_alloc_consistent(pdev,
- sizeof(u32) * SHADOW_REG_NR,
- &(jme->shadow_dma));
- if (!(jme->shadow_regs)) {
- jeprintk(pdev, "Allocating shadow register mapping error.\n");
- rc = -ENOMEM;
- goto err_out_unmap;
- }
if (no_pseudohp) {
apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
tasklet_init(&jme->rxempty_task,
&jme_rx_empty_tasklet,
(unsigned long) jme);
+ tasklet_disable_nosync(&jme->linkch_task);
tasklet_disable_nosync(&jme->txclean_task);
tasklet_disable_nosync(&jme->rxclean_task);
tasklet_disable_nosync(&jme->rxempty_task);
if (!jme->mii_if.phy_id) {
rc = -EIO;
jeprintk(pdev, "Can not find phy_id.\n");
- goto err_out_free_shadow;
+ goto err_out_unmap;
}
jme->reg_ghc |= GHC_LINK_POLL;
if (rc) {
jeprintk(pdev,
"Reload eeprom for reading MAC Address error.\n");
- goto err_out_free_shadow;
+ goto err_out_unmap;
}
jme_load_macaddr(netdev);
rc = register_netdev(netdev);
if (rc) {
jeprintk(pdev, "Cannot register net device.\n");
- goto err_out_free_shadow;
+ goto err_out_unmap;
}
- msg_probe(jme, "%s%s ver:%x rev:%x macaddr:%pM\n",
+ msg_probe(jme, "%s%s ver:%x rev:%x "
+ "macaddr: %02x:%02x:%02x:%02x:%02x:%02x\n",
(jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
"JMC250 Gigabit Ethernet" :
(jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
"JMC260 Fast Ethernet" : "Unknown",
(jme->fpgaver != 0) ? " (FPGA)" : "",
(jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
- jme->rev, netdev->dev_addr);
+ jme->rev,
+ netdev->dev_addr[0],
+ netdev->dev_addr[1],
+ netdev->dev_addr[2],
+ netdev->dev_addr[3],
+ netdev->dev_addr[4],
+ netdev->dev_addr[5]);
return 0;
-err_out_free_shadow:
- pci_free_consistent(pdev,
- sizeof(u32) * SHADOW_REG_NR,
- jme->shadow_regs,
- jme->shadow_dma);
err_out_unmap:
iounmap(jme->regs);
err_out_free_netdev:
struct jme_adapter *jme = netdev_priv(netdev);
unregister_netdev(netdev);
- pci_free_consistent(pdev,
- sizeof(u32) * SHADOW_REG_NR,
- jme->shadow_regs,
- jme->shadow_dma);
iounmap(jme->regs);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
tasklet_disable(&jme->rxclean_task);
tasklet_disable(&jme->rxempty_task);
- jme_disable_shadow(jme);
-
if (netif_carrier_ok(netdev)) {
if (test_bit(JME_FLAG_POLL, &jme->flags))
jme_polling_mode(jme);
else
jme_reset_phy_processor(jme);
- jme_enable_shadow(jme);
jme_start_irq(jme);
netif_device_attach(netdev);
#define __JME_H_INCLUDED__
#define DRV_NAME "jme"
-#define DRV_VERSION "1.0.4"
+#define DRV_VERSION "1.0.5"
#define PFX DRV_NAME ": "
#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
};
#define TXDESC_MSS_SHIFT 2
-enum jme_rxdescwb_flags_bits {
+enum jme_txwbdesc_flags_bits {
TXWBFLAG_OWN = 0x80,
TXWBFLAG_INT = 0x40,
TXWBFLAG_TMOUT = 0x20,
/*
* The structure holding buffer information and ring descriptors all together.
*/
-#include <linux/version.h>
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
-#define MAX_RING_DESC_NR 512
-#else
-#define MAX_RING_DESC_NR 1024
-#endif
-
struct jme_ring {
void *alloc; /* pointer to allocated memory */
void *desc; /* pointer to ring memory */
dma_addr_t dma; /* phys address for ring dma */
/* Buffer information corresponding to each descriptor */
- struct jme_buffer_info bufinf[MAX_RING_DESC_NR];
+ struct jme_buffer_info *bufinf;
int next_to_use;
atomic_t next_to_clean;
atomic_t nr_free;
};
+#include <linux/version.h>
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
#define false 0
#define true 0
return skb->h.th;
}
#else
-#define NET_STAT(priv) priv->dev->stats
+#define NET_STAT(priv) (priv->dev->stats)
#define NETDEV_GET_STATS(netdev, fun_ptr)
#define DECLARE_NET_DEVICE_STATS
#endif
netif_rx_schedule_prep(priv->dev)
#define JME_RX_SCHEDULE(priv) \
__netif_rx_schedule(priv->dev);
-#elif LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,28)
+#else
#define DECLARE_NAPI_STRUCT struct napi_struct napi;
#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
netif_napi_add(dev, napis, pollfn, q);
napi_schedule_prep(&priv->napi)
#define JME_RX_SCHEDULE(priv) \
__napi_schedule(&priv->napi);
-#else
-#define DECLARE_NAPI_STRUCT struct napi_struct napi;
-#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
- netif_napi_add(dev, napis, pollfn, q);
-#define JME_NAPI_HOLDER(holder) struct napi_struct *holder
-#define JME_NAPI_WEIGHT(w) int w
-#define JME_NAPI_WEIGHT_VAL(w) w
-#define JME_NAPI_WEIGHT_SET(w, r)
-#define DECLARE_NETDEV struct net_device *netdev = jme->dev;
-#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev, napis)
-#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi);
-#define JME_NAPI_DISABLE(priv) \
- if (!napi_disable_pending(&priv->napi)) \
- napi_disable(&priv->napi);
-#define JME_RX_SCHEDULE_PREP(priv) \
- netif_rx_schedule_prep(priv->dev, &priv->napi)
-#define JME_RX_SCHEDULE(priv) \
- __netif_rx_schedule(priv->dev, &priv->napi);
#endif
/*
* Jmac Adapter Private data
*/
-#define SHADOW_REG_NR 8
struct jme_adapter {
struct pci_dev *pdev;
struct net_device *dev;
void __iomem *regs;
- dma_addr_t shadow_dma;
- u32 *shadow_regs;
struct mii_if_info mii_if;
struct jme_ring rxring[RX_RING_NR];
struct jme_ring txring[TX_RING_NR];
}
#endif
-enum shadow_reg_val {
- SHADOW_IEVE = 0,
-};
-
enum jme_flags_bits {
JME_FLAG_MSI = 1,
JME_FLAG_SSET = 2,
CM_CHIPREV_SHIFT = 8,
};
-/*
- * Shadow base address register bits
- */
-enum jme_shadow_base_address_bits {
- SHBA_POSTEN = 0x1,
-};
-
/*
* Aggressive Power Mode Control
*/