/*
* TODO:
- * - Implement MSI-X.
- * Along with multiple RX queue, for CPU load balancing.
* - Decode register dump for ethtool.
- * - Implement NAPI?
- * PCC Support Both Packet Counter and Timeout Interrupt for
- * receive and transmit complete, does NAPI really needed?
*/
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/irq.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
+#include <linux/net.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
jme_mdio_read(struct net_device *netdev, int phy, int reg)
{
struct jme_adapter *jme = netdev_priv(netdev);
- int i, val;
+ int i, val, again = (reg == MII_BMSR)?1:0;
+read_again:
jwrite32(jme, JME_SMI, SMI_OP_REQ |
smi_phy_addr(phy) |
smi_reg_addr(reg));
wmb();
- for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
- udelay(1);
+ for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
+ udelay(20);
val = jread32(jme, JME_SMI);
if ((val & SMI_OP_REQ) == 0)
break;
}
if (i == 0) {
- jeprintk(netdev->name, "phy read timeout : %d\n", reg);
+ jeprintk("jme", "phy(%d) read timeout : %d\n", phy, reg);
return 0;
}
+ if(again--)
+ goto read_again;
+
return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
}
smi_phy_addr(phy) | smi_reg_addr(reg));
wmb();
- for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
- udelay(1);
- val = jread32(jme, JME_SMI);
- if ((val & SMI_OP_REQ) == 0)
+ for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
+ udelay(20);
+ if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
break;
}
if (i == 0)
- jeprintk(netdev->name, "phy write timeout : %d\n", reg);
+ jeprintk("jme", "phy(%d) write timeout : %d\n", phy, reg);
return;
}
MII_ADVERTISE, ADVERTISE_ALL |
ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
- jme_mdio_write(jme->dev,
- jme->mii_if.phy_id,
- MII_CTRL1000,
- ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+ if(jme->pdev->device == JME_GE_DEVICE)
+ jme_mdio_write(jme->dev,
+ jme->mii_if.phy_id,
+ MII_CTRL1000,
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF);
val = jme_mdio_read(jme->dev,
jme->mii_if.phy_id,
{
__u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0,0,0,0};
__u32 crc = 0xCDCDCDCD;
+ __u32 gpreg0;
int i;
jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
for(i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
jme_setup_wakeup_frame(jme, mask, crc, i);
- jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
+ if(jme->fpgaver)
+ gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
+ else
+ gpreg0 = GPREG0_DEFAULT;
+ jwrite32(jme, JME_GPREG0, gpreg0);
jwrite32(jme, JME_GPREG1, 0);
}
jwrite32(jme, JME_SMBCSR, val);
mdelay(12);
- for (i = JME_SMB_TIMEOUT; i > 0; --i)
+ for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i)
{
mdelay(1);
if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
}
if(i == 0) {
- jeprintk(jme->dev->name, "eeprom reload timeout\n");
+ jeprintk("jme", "eeprom reload timeout\n");
return -EIO;
}
}
- else
- return -EIO;
return 0;
}
jme_set_rx_pcc(struct jme_adapter *jme, int p)
{
switch(p) {
+ case PCC_OFF:
+ jwrite32(jme, JME_PCCRX0,
+ ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
+ ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
+ break;
case PCC_P1:
jwrite32(jme, JME_PCCRX0,
((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
default:
break;
}
+ wmb();
- dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
+ if(!(jme->flags & JME_FLAG_POLL))
+ dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
}
static void
jwrite32(jme, JME_SHBA_LO, 0x0);
}
+static __u32
+jme_linkstat_from_phy(struct jme_adapter *jme)
+{
+ __u32 phylink, bmsr;
+
+ phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
+ bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
+ if(bmsr & BMSR_ANCOMP)
+ phylink |= PHY_LINK_AUTONEG_COMPLETE;
+
+ return phylink;
+}
+
static int
jme_check_link(struct net_device *netdev, int testonly)
{
int rc = 0;
linkmsg[0] = '\0';
- phylink = jread32(jme, JME_PHY_LINK);
+
+ if(jme->fpgaver)
+ phylink = jme_linkstat_from_phy(jme);
+ else
+ phylink = jread32(jme, JME_PHY_LINK);
if (phylink & PHY_LINK_UP) {
if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
--cnt) {
udelay(1);
- phylink = jread32(jme, JME_PHY_LINK);
+ if(jme->fpgaver)
+ phylink = jme_linkstat_from_phy(jme);
+ else
+ phylink = jread32(jme, JME_PHY_LINK);
}
if(!cnt)
jme->phylink = phylink;
+ ghc = jme->reg_ghc & ~(GHC_SPEED_10M |
+ GHC_SPEED_100M |
+ GHC_SPEED_1000M |
+ GHC_DPX);
switch(phylink & PHY_LINK_SPEED_MASK) {
case PHY_LINK_SPEED_10M:
- ghc = GHC_SPEED_10M;
+ ghc |= GHC_SPEED_10M;
strcat(linkmsg, "10 Mbps, ");
break;
case PHY_LINK_SPEED_100M:
- ghc = GHC_SPEED_100M;
+ ghc |= GHC_SPEED_100M;
strcat(linkmsg, "100 Mbps, ");
break;
case PHY_LINK_SPEED_1000M:
- ghc = GHC_SPEED_1000M;
+ ghc |= GHC_SPEED_1000M;
strcat(linkmsg, "1000 Mbps, ");
break;
default:
- ghc = 0;
break;
}
ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
RING_DESC_ALIGN);
txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
txring->next_to_use = 0;
- txring->next_to_clean = 0;
+ atomic_set(&txring->next_to_clean, 0);
atomic_set(&txring->nr_free, jme->tx_ring_size);
/*
txring->dma = 0;
}
txring->next_to_use = 0;
- txring->next_to_clean = 0;
+ atomic_set(&txring->next_to_clean, 0);
atomic_set(&txring->nr_free, 0);
}
static void
jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
{
- struct jme_ring *rxring = jme->rxring;
+ struct jme_ring *rxring = &(jme->rxring[0]);
register volatile struct rxdesc* rxdesc = rxring->desc;
struct jme_buffer_info *rxbi = rxring->bufinf;
rxdesc += i;
rxring->dma = 0;
}
rxring->next_to_use = 0;
- rxring->next_to_clean = 0;
+ atomic_set(&rxring->next_to_clean, 0);
}
static int
RING_DESC_ALIGN);
rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
rxring->next_to_use = 0;
- rxring->next_to_clean = 0;
+ atomic_set(&rxring->next_to_clean, 0);
/*
* Initiallize Receive Descriptors
}
+static int
+jme_rxsum_ok(struct jme_adapter *jme, __u16 flags)
+{
+ if(!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
+ return false;
+
+ if(unlikely((flags & RXWBFLAG_TCPON) &&
+ !(flags & RXWBFLAG_TCPCS))) {
+ csum_dbg(jme->dev->name, "TCP Checksum error.\n");
+ goto out_sumerr;
+ }
+
+ if(unlikely((flags & RXWBFLAG_UDPON) &&
+ !(flags & RXWBFLAG_UDPCS))) {
+ csum_dbg(jme->dev->name, "UDP Checksum error.\n");
+ goto out_sumerr;
+ }
+
+ if(unlikely((flags & RXWBFLAG_IPV4) &&
+ !(flags & RXWBFLAG_IPCS))) {
+ csum_dbg(jme->dev->name, "IPv4 Checksum error.\n");
+ goto out_sumerr;
+ }
+
+ return true;
+
+out_sumerr:
+ csum_dbg(jme->dev->name, "%s%s%s%s\n",
+ (flags & RXWBFLAG_IPV4)?"IPv4 ":"",
+ (flags & RXWBFLAG_IPV6)?"IPv6 ":"",
+ (flags & RXWBFLAG_UDPON)?"UDP ":"",
+ (flags & RXWBFLAG_TCPON)?"TCP":"");
+ return false;
+}
+
static void
jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
{
skb_put(skb, framesize);
skb->protocol = eth_type_trans(skb, jme->dev);
- if((rxdesc->descwb.flags &
- (RXWBFLAG_TCPON |
- RXWBFLAG_UDPON |
- RXWBFLAG_IPV4)))
+ if(jme_rxsum_ok(jme, rxdesc->descwb.flags))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
if(jme->vlgrp) {
vlan_dbg(jme->dev->name,
"VLAN Passed to kernel.\n");
- vlan_hwaccel_rx(skb, jme->vlgrp,
+ jme->jme_vlan_rx(skb, jme->vlgrp,
le32_to_cpu(rxdesc->descwb.vlan));
NET_STAT(jme).rx_bytes += 4;
}
}
else {
- netif_rx(skb);
+ jme->jme_rx(skb);
}
if((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
}
-static int
-jme_rxsum_bad(struct jme_adapter *jme, __u16 flags)
-{
- if(unlikely((flags & RXWBFLAG_TCPON) &&
- !(flags & RXWBFLAG_TCPCS))) {
- csum_dbg(jme->dev->name, "TCP Checksum error.\n");
- return 1;
- }
- else if(unlikely((flags & RXWBFLAG_UDPON) &&
- !(flags & RXWBFLAG_UDPCS))) {
- csum_dbg(jme->dev->name, "UDP Checksum error.\n");
- return 1;
- }
- else if(unlikely((flags & RXWBFLAG_IPV4) &&
- !(flags & RXWBFLAG_IPCS))) {
- csum_dbg(jme->dev->name, "IPv4 Checksum error.\n");
- return 1;
- }
- else {
- return 0;
- }
-}
+
static int
jme_process_receive(struct jme_adapter *jme, int limit)
volatile struct rxdesc *rxdesc = rxring->desc;
int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
- i = rxring->next_to_clean;
+ if(unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
+ goto out_inc;
+
+ if(unlikely(atomic_read(&jme->link_changing) != 1))
+ goto out_inc;
+
+ if(unlikely(!netif_carrier_ok(jme->dev)))
+ goto out_inc;
+
+ i = atomic_read(&rxring->next_to_clean);
while( limit-- > 0 )
{
rxdesc = rxring->desc;
rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
if(unlikely(desccnt > 1 ||
- rxdesc->descwb.errstat & RXWBERR_ALLERR ||
- jme_rxsum_bad(jme, rxdesc->descwb.flags))) {
+ rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
++(NET_STAT(jme).rx_crc_errors);
for(j = i, ccnt = desccnt ; ccnt-- ; ) {
jme_set_clean_rxdesc(jme, j);
-
j = (j + 1) & (mask);
}
i = (i + desccnt) & (mask);
}
+
out:
rx_dbg(jme->dev->name, "RX: Stop at %d\n", i);
rx_dbg(jme->dev->name, "RX: RXNDA offset %d\n",
(jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
>> 4);
- rxring->next_to_clean = i;
+ atomic_set(&rxring->next_to_clean, i);
+
+out_inc:
+ atomic_inc(&jme->rx_cleaning);
return limit > 0 ? limit : 0;
static void
jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
{
- if(likely(atmp == dpi->cur))
+ if(likely(atmp == dpi->cur)) {
+ dpi->cnt = 0;
return;
+ }
if(dpi->attempt == atmp) {
++(dpi->cnt);
if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
jme_attempt_pcc(dpi, PCC_P3);
- else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P2_THRESHOLD
+ else if((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD
|| dpi->intr_cnt > PCC_INTR_THRESHOLD)
jme_attempt_pcc(dpi, PCC_P2);
else
jme_attempt_pcc(dpi, PCC_P1);
- if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 20)) {
+ if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
jme_set_rx_pcc(jme, dpi->attempt);
dpi->cur = dpi->attempt;
dpi->cnt = 0;
TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
}
-static void
+__always_inline static void
jme_stop_pcc_timer(struct jme_adapter *jme)
{
jwrite32(jme, JME_TMCSR, 0);
struct jme_adapter *jme = (struct jme_adapter*)arg;
struct net_device *netdev = jme->dev;
-
if(unlikely(!netif_carrier_ok(netdev) ||
(atomic_read(&jme->link_changing) != 1)
)) {
return;
}
- jme_dynamic_pcc(jme);
+ if(!(jme->flags & JME_FLAG_POLL))
+ jme_dynamic_pcc(jme);
+
jme_start_pcc_timer(jme);
}
+__always_inline static void
+jme_polling_mode(struct jme_adapter *jme)
+{
+ jme_set_rx_pcc(jme, PCC_OFF);
+}
+
+__always_inline static void
+jme_interrupt_mode(struct jme_adapter *jme)
+{
+ jme_set_rx_pcc(jme, PCC_P1);
+}
+
static void
jme_link_change_tasklet(unsigned long arg)
{
jme_reset_mac_processor(jme);
jme_free_rx_resources(jme);
jme_free_tx_resources(jme);
+
+ if(jme->flags & JME_FLAG_POLL)
+ jme_polling_mode(jme);
}
jme_check_link(netdev, 0);
jme_enable_tx_engine(jme);
netif_start_queue(netdev);
+
+ if(jme->flags & JME_FLAG_POLL)
+ jme_interrupt_mode(jme);
+
jme_start_pcc_timer(jme);
}
struct jme_adapter *jme = (struct jme_adapter*)arg;
struct dynpcc_info *dpi = &(jme->dpi);
- if(unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
- goto out;
+ jme_process_receive(jme, jme->rx_ring_size);
+ ++(dpi->intr_cnt);
- if(unlikely(atomic_read(&jme->link_changing) != 1))
- goto out;
+}
- if(unlikely(!netif_carrier_ok(jme->dev)))
- goto out;
+static int
+jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
+{
+ struct jme_adapter *jme = jme_napi_priv(holder);
+ struct net_device *netdev = jme->dev;
+ int rest;
- jme_process_receive(jme, jme->rx_ring_size);
- ++(dpi->intr_cnt);
+ rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
-out:
- atomic_inc(&jme->rx_cleaning);
+ while(atomic_read(&jme->rx_empty) > 0) {
+ atomic_dec(&jme->rx_empty);
+ ++(NET_STAT(jme).rx_dropped);
+ jme_restart_rx_engine(jme);
+ }
+ atomic_inc(&jme->rx_empty);
+
+ if(rest) {
+ JME_RX_COMPLETE(netdev, holder);
+ jme_interrupt_mode(jme);
+ }
+
+ JME_NAPI_WEIGHT_SET(budget, rest);
+ return JME_NAPI_WEIGHT_VAL(budget) - rest;
}
static void
queue_dbg(jme->dev->name, "RX Queue Full!\n");
jme_rx_clean_tasklet(arg);
- jme_restart_rx_engine(jme);
+
+ while(atomic_read(&jme->rx_empty) > 0) {
+ atomic_dec(&jme->rx_empty);
+ ++(NET_STAT(jme).rx_dropped);
+ jme_restart_rx_engine(jme);
+ }
+ atomic_inc(&jme->rx_empty);
}
static void
tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
- for(i = txring->next_to_clean ; cnt < max ; ) {
+ for(i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
ctxbi = txbi + i;
ctxbi->skb = NULL;
ctxbi->len = 0;
+ ctxbi->start_xmit = 0;
}
else {
if(!ctxbi->skb)
tx_dbg(jme->dev->name,
"Tx Tasklet: Stop %d Jiffies %lu\n",
i, jiffies);
- txring->next_to_clean = i;
+ atomic_set(&txring->next_to_clean, i);
atomic_add(cnt, &txring->nr_free);
jme_wake_queue_if_stopped(jme);
static void
jme_intr_msi(struct jme_adapter *jme, __u32 intrstat)
{
- __u32 handled;
-
/*
* Disable interrupt
*/
jwrite32f(jme, JME_IENC, INTR_ENABLE);
- /*
- * Write 1 clear interrupt status
- */
- jwrite32f(jme, JME_IEVE, intrstat);
-
if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
tasklet_schedule(&jme->linkch_task);
goto out_reenable;
if(intrstat & INTR_TMINTR)
tasklet_schedule(&jme->pcc_task);
- if(intrstat & INTR_RX0EMP)
- tasklet_schedule(&jme->rxempty_task);
-
- if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
- tasklet_schedule(&jme->rxclean_task);
-
if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
tasklet_schedule(&jme->txclean_task);
- handled = INTR_ENABLE | INTR_RX0 | INTR_TX0 | INTR_PAUSERCV;
- if((intrstat & ~(handled)) != 0) {
- /*
- * Some interrupt not handled
- * but not enabled also (for debug)
- */
- dprintk(jme->dev->name,
- "UN-handled interrupt.(%08x)\n",
- intrstat & ~(handled));
+ if(jme->flags & JME_FLAG_POLL) {
+ if(intrstat & INTR_RX0EMP)
+ atomic_inc(&jme->rx_empty);
+
+ if((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
+ if(likely(JME_RX_SCHEDULE_PREP(jme))) {
+ jme_polling_mode(jme);
+ JME_RX_SCHEDULE(jme);
+ }
+ }
+ }
+ else {
+ if(intrstat & INTR_RX0EMP) {
+ atomic_inc(&jme->rx_empty);
+ tasklet_schedule(&jme->rxempty_task);
+ }
+ else if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
+ tasklet_schedule(&jme->rxclean_task);
}
out_reenable:
+ /*
+ * Write 1 clear interrupt status
+ */
+ jwrite32f(jme, JME_IEVE, intrstat);
+
/*
* Re-enable interrupt
*/
return IRQ_HANDLED;
}
+static irqreturn_t
+jme_msix_misc(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct jme_adapter *jme = netdev_priv(netdev);
+ __u32 intrstat;
+
+ pci_dma_sync_single_for_cpu(jme->pdev,
+ jme->shadow_dma,
+ sizeof(__u32) * SHADOW_REG_NR,
+ PCI_DMA_FROMDEVICE);
+ intrstat = jme->shadow_regs[SHADOW_IEVE];
+ jme->shadow_regs[SHADOW_IEVE] &= ~INTR_EN_MISC;
+
+ /*
+ * Disable interrupt
+ */
+ jwrite32f(jme, JME_IENC, INTR_EN_MISC);
+
+ if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
+ tasklet_schedule(&jme->linkch_task);
+ goto out_reenable;
+ }
+
+ if(intrstat & INTR_TMINTR)
+ tasklet_schedule(&jme->pcc_task);
+
+out_reenable:
+ /*
+ * Write 1 clear interrupt status
+ */
+ jwrite32f(jme, JME_IEVE, INTR_EN_MISC);
+
+ /*
+ * Re-enable interrupt
+ */
+ jwrite32f(jme, JME_IENS, INTR_EN_MISC);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+jme_msix_tx(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ /*
+ * Disable interrupt
+ */
+ jwrite32f(jme, JME_IENC, INTR_EN_TX);
+
+ if(unlikely(atomic_read(&jme->link_changing) != 1))
+ goto out_reenable;
+
+ tasklet_schedule(&jme->txclean_task);
+
+out_reenable:
+ /*
+ * Write 1 clear interrupt status
+ */
+ jwrite32f(jme, JME_IEVE, INTR_EN_TX | INTR_TX0);
+
+ /*
+ * Re-enable interrupt
+ */
+ jwrite32f(jme, JME_IENS, INTR_EN_TX);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+jme_msix_rx(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct jme_adapter *jme = netdev_priv(netdev);
+ __u32 intrstat;
+
+ pci_dma_sync_single_for_cpu(jme->pdev,
+ jme->shadow_dma,
+ sizeof(__u32) * SHADOW_REG_NR,
+ PCI_DMA_FROMDEVICE);
+ intrstat = jme->shadow_regs[SHADOW_IEVE];
+ jme->shadow_regs[SHADOW_IEVE] &= ~INTR_EN_RX0;
+
+ /*
+ * Disable interrupt
+ */
+ jwrite32f(jme, JME_IENC, INTR_EN_RX0);
+
+ if(unlikely(atomic_read(&jme->link_changing) != 1))
+ goto out_reenable;
+
+ if(jme->flags & JME_FLAG_POLL) {
+ if(intrstat & INTR_RX0EMP)
+ atomic_inc(&jme->rx_empty);
+
+ if(likely(JME_RX_SCHEDULE_PREP(jme))) {
+ jme_polling_mode(jme);
+ JME_RX_SCHEDULE(jme);
+ }
+ }
+ else {
+ if(intrstat & INTR_RX0EMP) {
+ atomic_inc(&jme->rx_empty);
+ tasklet_schedule(&jme->rxempty_task);
+ }
+ else if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
+ tasklet_schedule(&jme->rxclean_task);
+ }
+
+out_reenable:
+ /*
+ * Write 1 clear interrupt status
+ */
+ jwrite32f(jme, JME_IEVE, INTR_EN_RX0 | INTR_RX0);
+
+ /*
+ * Re-enable interrupt
+ */
+ jwrite32f(jme, JME_IENS, INTR_EN_RX0);
+
+ return IRQ_HANDLED;
+}
static void
jme_reset_link(struct jme_adapter *jme)
spin_unlock_irqrestore(&jme->phy_lock, flags);
}
+static void
+jme_setup_msix_info(struct jme_adapter *jme, struct msix_entry *msix_ent)
+{
+ int i;
+
+ for (i = 0; i < JME_MSIX_VEC_NR; i++) {
+ jme->msix[i].requested = false;
+ jme->msix[i].vector = msix_ent[i].vector;
+ strcpy(jme->msix[i].name, jme->dev->name);
+ }
+
+ jme->msix[0].handler = jme_msix_misc;
+ jme->msix[1].handler = jme_msix_tx;
+ jme->msix[2].handler = jme_msix_rx;
+
+ strcat(jme->msix[0].name, "-misc");
+ strcat(jme->msix[1].name, "-tx");
+ strcat(jme->msix[2].name, "-rx");
+}
+
+static void
+jme_fill_msix_regs(struct jme_adapter *jme)
+{
+ __u32 mask = 1, reg_msix = 0;
+ int i, vec;
+
+ for(i = 0 ; i < 32 ; ++i) {
+ if(mask & INTR_EN_TX)
+ vec = 1;
+ else if(mask & INTR_EN_RX0)
+ vec = 2;
+ else
+ vec = 0;
+
+ if(!(i & 7))
+ reg_msix = 0;
+ reg_msix |= (vec & 7) << ((i & 7) << 2);
+ if((i & 7) == 7)
+ jwrite32(jme,
+ JME_MSIX_ENT + ((i >> 3) << 2),
+ reg_msix);
+
+ mask <<= 1;
+ }
+}
+
+static int
+jme_request_msix_irq(struct jme_adapter *jme)
+{
+ int i, rc;
+ struct jme_msix_info *msix_info;
+
+ for (i = 0; i < JME_MSIX_VEC_NR; i++) {
+ msix_info = jme->msix + i;
+ rc = request_irq(msix_info->vector,
+ msix_info->handler,
+ 0,
+ msix_info->name,
+ jme->dev);
+ if(rc)
+ break;
+#if 0
+#ifdef CONFIG_SMP
+ /*
+ * Try to set different cpumask for each irq,
+ * ignoring assign fail since it has no critical
+ * effect to the working function.
+ */
+ if(irq_can_set_affinity(msix_info->vector))
+ irq_set_affinity(msix_info->vector,
+ cpumask_of_cpu(i % num_online_cpus()));
+#endif
+#endif
+ msix_info->requested = true;
+ }
+
+ return rc;
+}
+
+static void
+jme_free_msix(struct jme_adapter *jme)
+{
+ int i;
+ struct jme_msix_info *msix_info;
+
+ for (i = 0; i < JME_MSIX_VEC_NR; i++) {
+ msix_info = jme->msix + i;
+ if(msix_info->requested)
+ free_irq(msix_info->vector, jme->dev);
+ else
+ break;
+ msix_info->requested = false;
+ }
+ pci_disable_msix(jme->pdev);
+}
+
+static int
+jme_request_msix(struct jme_adapter *jme)
+{
+ int i, rc;
+ struct msix_entry msix_ent[JME_MSIX_VEC_NR];
+
+ for (i = 0; i < JME_MSIX_VEC_NR; i++) {
+ msix_ent[i].entry = i;
+ msix_ent[i].vector = 0;
+ }
+
+ rc = pci_enable_msix(jme->pdev, msix_ent, JME_MSIX_VEC_NR);
+ if (rc)
+ goto out;
+
+ jme_setup_msix_info(jme, msix_ent);
+ jme_fill_msix_regs(jme);
+
+ rc = jme_request_msix_irq(jme);
+ if(rc)
+ goto out_free_msix;
+
+ return 0;
+
+out_free_msix:
+ jme_free_msix(jme);
+out:
+ return rc;
+}
+
static int
jme_request_irq(struct jme_adapter *jme)
{
irq_handler_t handler = jme_intr;
int irq_flags = IRQF_SHARED;
- if (!pci_enable_msi(jme->pdev)) {
+
+ if(!jme_request_msix(jme)) {
+ jme->flags |= JME_FLAG_MSIX;
+ return 0;
+ }
+
+ if(!pci_enable_msi(jme->pdev)) {
jme->flags |= JME_FLAG_MSI;
handler = jme_msi;
irq_flags = 0;
static void
jme_free_irq(struct jme_adapter *jme)
{
- free_irq(jme->pdev->irq, jme->dev);
- if (jme->flags & JME_FLAG_MSI) {
- pci_disable_msi(jme->pdev);
- jme->flags &= ~JME_FLAG_MSI;
- jme->dev->irq = jme->pdev->irq;
- }
+ if(jme->flags & JME_FLAG_MSIX) {
+ jme_free_msix(jme);
+ jme->flags &= ~JME_FLAG_MSIX;
+ }
+ else {
+ free_irq(jme->pdev->irq, jme->dev);
+ if (jme->flags & JME_FLAG_MSI) {
+ pci_disable_msi(jme->pdev);
+ jme->flags &= ~JME_FLAG_MSI;
+ jme->dev->irq = jme->pdev->irq;
+ }
+ }
}
static int
jme_open(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
- int rc, timeout = 100;
+ int rc, timeout = 10;
while(
--timeout > 0 &&
atomic_read(&jme->tx_cleaning) != 1
)
)
- msleep(10);
+ msleep(1);
if(!timeout) {
rc = -EBUSY;
jme_clear_pm(jme);
jme_reset_mac_processor(jme);
+ JME_NAPI_ENABLE(jme);
rc = jme_request_irq(jme);
if(rc)
if (bmcr != tmp)
jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
- jwrite32(jme, JME_GHC, GHC_SPEED_100M);
+ if(jme->fpgaver)
+ jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
+ else
+ jwrite32(jme, JME_GHC, GHC_SPEED_100M);
}
static void
jme_disable_shadow(jme);
jme_free_irq(jme);
+ JME_NAPI_DISABLE(jme);
+
tasklet_kill(&jme->linkch_task);
tasklet_kill(&jme->txclean_task);
tasklet_kill(&jme->rxclean_task);
txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
txbi->skb = skb;
txbi->len = skb->len;
+ if(!(txbi->start_xmit = jiffies))
+ txbi->start_xmit = (0UL-1);
return 0;
}
jme_stop_queue_if_full(struct jme_adapter *jme)
{
struct jme_ring *txring = jme->txring;
+ struct jme_buffer_info *txbi = txring->bufinf;
+
+ txbi += atomic_read(&txring->next_to_clean);
smp_wmb();
if(unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
}
}
+ if(unlikely( txbi->start_xmit &&
+ (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
+ txbi->skb)) {
+ netif_stop_queue(jme->dev);
+ }
}
/*
spin_lock(&jme->macaddr_lock);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- val = addr->sa_data[3] << 24 |
- addr->sa_data[2] << 16 |
- addr->sa_data[1] << 8 |
- addr->sa_data[0];
+ val = (addr->sa_data[3] & 0xff) << 24 |
+ (addr->sa_data[2] & 0xff) << 16 |
+ (addr->sa_data[1] & 0xff) << 8 |
+ (addr->sa_data[0] & 0xff);
jwrite32(jme, JME_RXUMA_LO, val);
- val = addr->sa_data[5] << 8 |
- addr->sa_data[4];
+ val = (addr->sa_data[5] & 0xff) << 8 |
+ (addr->sa_data[4] & 0xff);
jwrite32(jme, JME_RXUMA_HI, val);
spin_unlock(&jme->macaddr_lock);
{
struct jme_adapter *jme = netdev_priv(netdev);
+ jme->phylink = 0;
+ jme_reset_phy_processor(jme);
+ if(jme->flags & JME_FLAG_SSET)
+ jme_set_settings(netdev, &jme->old_ecmd);
+
/*
- * Reset the link
- * And the link change will reinitialize all RX/TX resources
+ * Force to Reset the link again
*/
- jme->phylink = 0;
jme_reset_link(jme);
}
static int
jme_get_regs_len(struct net_device *netdev)
{
- return 0x400;
+ return JME_REG_LEN;
}
static void
for(i = 0 ; i < len ; i += 4)
p[i >> 2] = jread32(jme, reg + i);
+}
+
+static void
+mdio_memcpy(struct jme_adapter *jme, __u32 *p, int reg_nr)
+{
+ int i;
+ __u16 *p16 = (__u16*)p;
+ for(i = 0 ; i < reg_nr ; ++i)
+ p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
}
static void
struct jme_adapter *jme = netdev_priv(netdev);
__u32 *p32 = (__u32*)p;
- memset(p, 0, 0x400);
+ memset(p, 0xFF, JME_REG_LEN);
regs->version = 1;
mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
p32 += 0x100 >> 2;
mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
+ p32 += 0x100 >> 2;
+ mdio_memcpy(jme, p32, JME_PHY_REG_NR);
}
static int
{
struct jme_adapter *jme = netdev_priv(netdev);
- ecmd->use_adaptive_rx_coalesce = true;
ecmd->tx_coalesce_usecs = PCC_TX_TO;
ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
+ if(jme->flags & JME_FLAG_POLL) {
+ ecmd->use_adaptive_rx_coalesce = false;
+ ecmd->rx_coalesce_usecs = 0;
+ ecmd->rx_max_coalesced_frames = 0;
+ return 0;
+ }
+
+ ecmd->use_adaptive_rx_coalesce = true;
+
switch(jme->dpi.cur) {
case PCC_P1:
ecmd->rx_coalesce_usecs = PCC_P1_TO;
return 0;
}
+static int
+jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ struct dynpcc_info *dpi = &(jme->dpi);
+
+ if(netif_running(netdev))
+ return -EBUSY;
+
+ if(ecmd->use_adaptive_rx_coalesce
+ && (jme->flags & JME_FLAG_POLL)) {
+ jme->flags &= ~JME_FLAG_POLL;
+ jme->jme_rx = netif_rx;
+ jme->jme_vlan_rx = vlan_hwaccel_rx;
+ dpi->cur = PCC_P1;
+ dpi->attempt = PCC_P1;
+ dpi->cnt = 0;
+ jme_set_rx_pcc(jme, PCC_P1);
+ jme_interrupt_mode(jme);
+ }
+ else if(!(ecmd->use_adaptive_rx_coalesce)
+ && !(jme->flags & JME_FLAG_POLL)) {
+ jme->flags |= JME_FLAG_POLL;
+ jme->jme_rx = netif_receive_skb;
+ jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
+ jme_interrupt_mode(jme);
+ }
+
+ return 0;
+}
+
static void
jme_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *ecmd)
return 0;
}
+static __u8
+jme_smb_read(struct jme_adapter *jme, unsigned int addr)
+{
+ __u32 val;
+ int to;
+
+ val = jread32(jme, JME_SMBCSR);
+ to = JME_SMB_BUSY_TIMEOUT;
+ while((val & SMBCSR_BUSY) && --to) {
+ msleep(1);
+ val = jread32(jme, JME_SMBCSR);
+ }
+ if(!to) {
+ jeprintk(jme->dev->name, "SMB Bus Busy.\n");
+ return 0xFF;
+ }
+
+ jwrite32(jme, JME_SMBINTF,
+ ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
+ SMBINTF_HWRWN_READ |
+ SMBINTF_HWCMD);
+
+ val = jread32(jme, JME_SMBINTF);
+ to = JME_SMB_BUSY_TIMEOUT;
+ while((val & SMBINTF_HWCMD) && --to) {
+ msleep(1);
+ val = jread32(jme, JME_SMBINTF);
+ }
+ if(!to) {
+ jeprintk(jme->dev->name, "SMB Bus Busy.\n");
+ return 0xFF;
+ }
+
+ return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
+}
+
+static void
+jme_smb_write(struct jme_adapter *jme, unsigned int addr, __u8 data)
+{
+ __u32 val;
+ int to;
+
+ val = jread32(jme, JME_SMBCSR);
+ to = JME_SMB_BUSY_TIMEOUT;
+ while((val & SMBCSR_BUSY) && --to) {
+ msleep(1);
+ val = jread32(jme, JME_SMBCSR);
+ }
+ if(!to) {
+ jeprintk(jme->dev->name, "SMB Bus Busy.\n");
+ return;
+ }
+
+ jwrite32(jme, JME_SMBINTF,
+ ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
+ ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
+ SMBINTF_HWRWN_WRITE |
+ SMBINTF_HWCMD);
+
+ val = jread32(jme, JME_SMBINTF);
+ to = JME_SMB_BUSY_TIMEOUT;
+ while((val & SMBINTF_HWCMD) && --to) {
+ msleep(1);
+ val = jread32(jme, JME_SMBINTF);
+ }
+ if(!to) {
+ jeprintk(jme->dev->name, "SMB Bus Busy.\n");
+ return;
+ }
+
+ mdelay(2);
+}
+
+static int
+jme_get_eeprom_len(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ __u32 val;
+ val = jread32(jme, JME_SMBCSR);
+ return (val & SMBCSR_EEPROMD)?JME_SMB_LEN:0;
+}
+
+static int
+jme_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ int i, offset = eeprom->offset, len = eeprom->len, idx;
+
+ /*
+ * ethtool will check the boundary for us
+ */
+ memset(data, 0xFF, len);
+ eeprom->magic = JME_EEPROM_MAGIC;
+ for(i = 0 ; i < len ; ++i) {
+ idx = i + offset;
+ data[i] = jme_smb_read(jme, idx);
+ if(data[i] == 0xFF)
+ break;
+ if((idx > 1) && !((idx - 2) % 3) && (data[i] & 0x80))
+ len = (len > i + 3)?i + 3:len;
+ }
+
+ return 0;
+}
+
+static int
+jme_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ int i, offset = eeprom->offset, len = eeprom->len;
+
+ if (eeprom->magic != JME_EEPROM_MAGIC)
+ return -EINVAL;
+
+ /*
+ * ethtool will check the boundary for us
+ */
+ for(i = 0 ; i < len ; ++i)
+ jme_smb_write(jme, i + offset, data[i]);
+
+ return 0;
+}
+
static const struct ethtool_ops jme_ethtool_ops = {
.get_drvinfo = jme_get_drvinfo,
.get_regs_len = jme_get_regs_len,
.get_regs = jme_get_regs,
.get_coalesce = jme_get_coalesce,
+ .set_coalesce = jme_set_coalesce,
.get_pauseparam = jme_get_pauseparam,
.set_pauseparam = jme_set_pauseparam,
.get_wol = jme_get_wol,
.set_tso = jme_set_tso,
.set_sg = ethtool_op_set_sg,
.nway_reset = jme_nway_reset,
+ .get_eeprom_len = jme_get_eeprom_len,
+ .get_eeprom = jme_get_eeprom,
+ .set_eeprom = jme_set_eeprom,
};
static int
}
__always_inline static void
-jme_set_phy_ps(struct jme_adapter *jme)
+jme_phy_init(struct jme_adapter *jme)
{
- jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, 0x00001000);
+ __u16 reg26;
+
+ reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
+}
+
+__always_inline static void
+jme_set_gmii(struct jme_adapter *jme)
+{
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
+}
+
+static void
+jme_check_hw_ver(struct jme_adapter *jme)
+{
+ __u32 chipmode;
+
+ chipmode = jread32(jme, JME_CHIPMODE);
+
+ jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
+ jme->chipver = (chipmode & CM_CHIPVER_MASK) >> CM_CHIPVER_SHIFT;
}
static int __devinit
jme_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int rc = 0, using_dac;
+ int rc = 0, using_dac, i;
struct net_device *netdev;
struct jme_adapter *jme;
+ __u16 bmcr, bmsr;
/*
* set up PCI device basics
jme = netdev_priv(netdev);
jme->pdev = pdev;
jme->dev = netdev;
+ jme->jme_rx = netif_rx;
+ jme->jme_vlan_rx = vlan_hwaccel_rx;
jme->old_mtu = netdev->mtu = 1500;
jme->phylink = 0;
jme->tx_ring_size = 1 << 10;
goto err_out_unmap;
}
+ NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
+
spin_lock_init(&jme->phy_lock);
spin_lock_init(&jme->macaddr_lock);
spin_lock_init(&jme->rxmcs_lock);
atomic_set(&jme->link_changing, 1);
atomic_set(&jme->rx_cleaning, 1);
atomic_set(&jme->tx_cleaning, 1);
+ atomic_set(&jme->rx_empty, 1);
tasklet_init(&jme->pcc_task,
&jme_pcc_tasklet,
tasklet_init(&jme->rxempty_task,
&jme_rx_empty_tasklet,
(unsigned long) jme);
- jme->mii_if.dev = netdev;
- jme->mii_if.phy_id = 1;
- jme->mii_if.supports_gmii = 1;
- jme->mii_if.mdio_read = jme_mdio_read;
- jme->mii_if.mdio_write = jme_mdio_write;
-
jme->dpi.cur = PCC_P1;
- jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
+ if(pdev->device == JME_GE_DEVICE)
+ jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
+ else
+ jme->reg_ghc = GHC_DPX | GHC_SPEED_100M;
jme->reg_rxcs = RXCS_DEFAULT;
jme->reg_rxmcs = RXMCS_DEFAULT;
jme->reg_txpfc = 0;
jme->reg_pmcs = PMCS_LFEN | PMCS_LREN | PMCS_MFEN;
jme->flags = JME_FLAG_TXCSUM | JME_FLAG_TSO;
+
/*
* Get Max Read Req Size from PCI Config Space
*/
/*
- * Reset MAC processor and reload EEPROM for MAC Address
+ * Must check before reset_mac_processor
*/
+ jme_check_hw_ver(jme);
+ jme->mii_if.dev = netdev;
+ if(jme->fpgaver) {
+ jme->mii_if.phy_id = 0;
+ for(i = 1 ; i < 32 ; ++i) {
+ bmcr = jme_mdio_read(netdev, i, MII_BMCR);
+ bmsr = jme_mdio_read(netdev, i, MII_BMSR);
+ if(bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
+ jme->mii_if.phy_id = i;
+ break;
+ }
+ }
+
+ if(!jme->mii_if.phy_id) {
+ rc = -EIO;
+ printk(KERN_ERR PFX "Can not find phy_id.\n");
+ goto err_out_free_shadow;
+ }
+
+ jme->reg_ghc |= GHC_LINK_POLL;
+ }
+ else {
+ jme->mii_if.phy_id = 1;
+ }
+ if(pdev->device == JME_GE_DEVICE)
+ jme->mii_if.supports_gmii = true;
+ else
+ jme->mii_if.supports_gmii = false;
+ jme->mii_if.mdio_read = jme_mdio_read;
+ jme->mii_if.mdio_write = jme_mdio_write;
+
jme_clear_pm(jme);
- jme_set_phy_ps(jme);
+ if(jme->fpgaver)
+ jme_set_gmii(jme);
+ else
+ jme_phy_init(jme);
jme_phy_off(jme);
+
+ /*
+ * Reset MAC processor and reload EEPROM for MAC Address
+ */
jme_reset_mac_processor(jme);
rc = jme_reload_eeprom(jme);
if(rc) {
}
jprintk(netdev->name,
- "JMC250 gigabit eth %02x:%02x:%02x:%02x:%02x:%02x\n",
+ "JMC250 gigabit%s ver:%u eth %02x:%02x:%02x:%02x:%02x:%02x\n",
+ (jme->fpgaver != 0)?" (FPGA)":"",
+ (jme->fpgaver != 0)?jme->fpgaver:jme->chipver,
netdev->dev_addr[0],
netdev->dev_addr[1],
netdev->dev_addr[2],
jme_free_tx_resources(jme);
netif_carrier_off(netdev);
jme->phylink = 0;
+
+ if(jme->flags & JME_FLAG_POLL)
+ jme_polling_mode(jme);
}
}
static struct pci_device_id jme_pci_tbl[] = {
- { PCI_VDEVICE(JMICRON, 0x250) },
+ { PCI_VDEVICE(JMICRON, JME_GE_DEVICE) },
+ { PCI_VDEVICE(JMICRON, JME_FE_DEVICE) },
{ }
};