*/
/*
- * Note:
- * Backdoor for changing "FIFO Threshold for processing next packet"
- * Using:
- * ethtool -C eth1 adaptive-rx on adaptive-tx on \
- * rx-usecs 250 rx-frames-low N
- * N := 16 | 32 | 64 | 128
- */
-
-/*
- * Timeline before release:
- * Stage 5: Advanced offloading support.
- * 0.8:
- * - Implement VLAN offloading.
- * 0.9:
- * - Implement scatter-gather offloading.
- * Use pci_map_page on scattered sk_buff for HIGHMEM support
- * - Implement TCP Segement offloading.
- * Due to TX FIFO size, we should turn off tso when mtu > 1500.
- *
- * Stage 6: CPU Load balancing.
- * 1.0:
+ * TODO:
* - Implement MSI-X.
* Along with multiple RX queue, for CPU load balancing.
- *
- * Stage 7:
- * - Cleanup/re-orginize code, performence tuneing(alignment etc...).
- * - Test and Release 1.0
- *
- * Non-Critical:
- * - Use NAPI instead of rx_tasklet?
+ * - Decode register dump for ethtool.
+ * - Implement NAPI?
* PCC Support Both Packet Counter and Timeout Interrupt for
* receive and transmit complete, does NAPI really needed?
- * - Decode register dump for ethtool.
*/
#include <linux/version.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
+#include <linux/if_vlan.h>
#include "jme.h"
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
wmb();
for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
udelay(1);
- if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
+ val = jread32(jme, JME_SMI);
+ if ((val & SMI_OP_REQ) == 0)
break;
}
wmb();
for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
udelay(1);
- if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
+ val = jread32(jme, JME_SMI);
+ if ((val & SMI_OP_REQ) == 0)
break;
}
return;
}
+static void
+jme_setup_wakeup_frame(struct jme_adapter *jme,
+ __u32 *mask, __u32 crc, int fnr)
+{
+ int i;
+
+ /*
+ * Setup CRC pattern
+ */
+ jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
+ wmb();
+ jwrite32(jme, JME_WFODP, crc);
+ wmb();
+
+ /*
+ * Setup Mask
+ */
+ for(i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
+ jwrite32(jme, JME_WFOI,
+ ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
+ (fnr & WFOI_FRAME_SEL));
+ wmb();
+ jwrite32(jme, JME_WFODP, mask[i]);
+ wmb();
+ }
+}
__always_inline static void
jme_reset_mac_processor(struct jme_adapter *jme)
{
+ __u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0,0,0,0};
+ __u32 crc = 0xCDCDCDCD;
+ int i;
+
jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
udelay(2);
jwrite32(jme, JME_GHC, jme->reg_ghc);
jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
- jwrite32(jme, JME_WFODP, 0);
- jwrite32(jme, JME_WFOI, 0);
+ for(i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
+ jme_setup_wakeup_frame(jme, mask, crc, i);
jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
jwrite32(jme, JME_GPREG1, 0);
}
{
jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
pci_set_power_state(jme->pdev, PCI_D0);
- pci_enable_wake(jme->pdev, PCI_D0, 0);
+ pci_enable_wake(jme->pdev, PCI_D0, false);
}
static int
char linkmsg[64];
int rc = 0;
+ linkmsg[0] = '\0';
phylink = jread32(jme, JME_PHY_LINK);
if (phylink & PHY_LINK_UP) {
phylink |= (bmcr & BMCR_FULLDPLX) ?
PHY_LINK_DUPLEX : 0;
- strcpy(linkmsg, "Forced: ");
+ strcat(linkmsg, "Forced: ");
}
else {
/*
jeprintk(netdev->name,
"Waiting speed resolve timeout.\n");
- strcpy(linkmsg, "ANed: ");
+ strcat(linkmsg, "ANed: ");
}
if(jme->phylink == phylink) {
switch(phylink & PHY_LINK_SPEED_MASK) {
case PHY_LINK_SPEED_10M:
ghc = GHC_SPEED_10M;
- strcpy(linkmsg, "10 Mbps, ");
+ strcat(linkmsg, "10 Mbps, ");
break;
case PHY_LINK_SPEED_100M:
ghc = GHC_SPEED_100M;
- strcpy(linkmsg, "100 Mbps, ");
+ strcat(linkmsg, "100 Mbps, ");
break;
case PHY_LINK_SPEED_1000M:
ghc = GHC_SPEED_1000M;
- strcpy(linkmsg, "1000 Mbps, ");
+ strcat(linkmsg, "1000 Mbps, ");
break;
default:
ghc = 0;
return rc;
}
-
-static int
-jme_alloc_txdesc(struct jme_adapter *jme,
- int nr_alloc)
-{
- struct jme_ring *txring = jme->txring;
- int idx;
-
- idx = txring->next_to_use;
-
- if(unlikely(atomic_read(&txring->nr_free) < nr_alloc))
- return -1;
-
- atomic_sub(nr_alloc, &txring->nr_free);
-
- if((txring->next_to_use += nr_alloc) >= RING_DESC_NR)
- txring->next_to_use -= RING_DESC_NR;
-
- return idx;
-}
-
-static void
-jme_tx_csum(struct sk_buff *skb, unsigned mtu, __u8 *flags)
-{
- if(skb->ip_summed == CHECKSUM_PARTIAL) {
- __u8 ip_proto;
-
- switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
- ip_proto = ip_hdr(skb)->protocol;
- break;
- case __constant_htons(ETH_P_IPV6):
- ip_proto = ipv6_hdr(skb)->nexthdr;
- break;
- default:
- ip_proto = 0;
- break;
- }
-
-
- switch(ip_proto) {
- case IPPROTO_TCP:
- *flags |= TXFLAG_TCPCS;
- break;
- case IPPROTO_UDP:
- *flags |= TXFLAG_UDPCS;
- break;
- default:
- jeprintk("jme", "Error upper layer protocol.\n");
- break;
- }
- }
-}
-
-static int
-jme_set_new_txdesc(struct jme_adapter *jme,
- struct sk_buff *skb)
-{
- struct jme_ring *txring = jme->txring;
- volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
- struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
- dma_addr_t dmaaddr;
- int i, idx, nr_desc;
- __u8 flags;
-
- nr_desc = 2;
- idx = jme_alloc_txdesc(jme, nr_desc);
-
- if(unlikely(idx<0))
- return NETDEV_TX_BUSY;
-
- for(i = 1 ; i < nr_desc ; ++i) {
- ctxdesc = txdesc + ((idx + i) & (RING_DESC_NR-1));
- ctxbi = txbi + ((idx + i) & (RING_DESC_NR-1));
-
- dmaaddr = pci_map_single(jme->pdev,
- skb->data,
- skb->len,
- PCI_DMA_TODEVICE);
-
- pci_dma_sync_single_for_device(jme->pdev,
- dmaaddr,
- skb->len,
- PCI_DMA_TODEVICE);
-
- ctxdesc->dw[0] = 0;
- ctxdesc->dw[1] = 0;
- ctxdesc->desc2.flags = TXFLAG_OWN;
- if(jme->dev->features & NETIF_F_HIGHDMA)
- ctxdesc->desc2.flags |= TXFLAG_64BIT;
- ctxdesc->desc2.datalen = cpu_to_le16(skb->len);
- ctxdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
- ctxdesc->desc2.bufaddrl = cpu_to_le32(
- (__u64)dmaaddr & 0xFFFFFFFFUL);
-
- ctxbi->mapping = dmaaddr;
- ctxbi->len = skb->len;
- }
-
- ctxdesc = txdesc + idx;
- ctxbi = txbi + idx;
-
- ctxdesc->dw[0] = 0;
- ctxdesc->dw[1] = 0;
- ctxdesc->dw[2] = 0;
- ctxdesc->dw[3] = 0;
- ctxdesc->desc1.pktsize = cpu_to_le16(skb->len);
- /*
- * Set OWN bit at final.
- * When kernel transmit faster than NIC.
- * And NIC trying to send this descriptor before we tell
- * it to start sending this TX queue.
- * Other fields are already filled correctly.
- */
- wmb();
- flags = TXFLAG_OWN | TXFLAG_INT;
- jme_tx_csum(skb, jme->dev->mtu, &flags);
- ctxdesc->desc1.flags = flags;
- /*
- * Set tx buffer info after telling NIC to send
- * For better tx_clean timing
- */
- wmb();
- ctxbi->nr_desc = nr_desc;
- ctxbi->skb = skb;
-
- tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, nr_desc);
-
- return 0;
-}
-
-
static int
jme_setup_tx_resources(struct jme_adapter *jme)
{
struct jme_ring *txring = &(jme->txring[0]);
txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
- TX_RING_ALLOC_SIZE,
- &(txring->dmaalloc),
- GFP_ATOMIC);
+ TX_RING_ALLOC_SIZE(jme->tx_ring_size),
+ &(txring->dmaalloc),
+ GFP_ATOMIC);
if(!txring->alloc) {
txring->desc = NULL;
txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
txring->next_to_use = 0;
txring->next_to_clean = 0;
- atomic_set(&txring->nr_free, RING_DESC_NR);
+ atomic_set(&txring->nr_free, jme->tx_ring_size);
/*
- * Initiallize Transmit Descriptors
+ * Initialize Transmit Descriptors
*/
- memset(txring->alloc, 0, TX_RING_ALLOC_SIZE);
+ memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
memset(txring->bufinf, 0,
- sizeof(struct jme_buffer_info) * RING_DESC_NR);
+ sizeof(struct jme_buffer_info) * jme->tx_ring_size);
return 0;
}
struct jme_buffer_info *txbi = txring->bufinf;
if(txring->alloc) {
- for(i = 0 ; i < RING_DESC_NR ; ++i) {
+ for(i = 0 ; i < jme->tx_ring_size ; ++i) {
txbi = txring->bufinf + i;
if(txbi->skb) {
dev_kfree_skb(txbi->skb);
}
dma_free_coherent(&(jme->pdev->dev),
- TX_RING_ALLOC_SIZE,
+ TX_RING_ALLOC_SIZE(jme->tx_ring_size),
txring->alloc,
txring->dmaalloc);
/*
* Setup TX Descptor Count
*/
- jwrite32(jme, JME_TXQDC, RING_DESC_NR);
+ jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
/*
* Enable TX Engine
jme_make_new_rx_buf(struct jme_adapter *jme, int i)
{
struct jme_ring *rxring = &(jme->rxring[0]);
- struct jme_buffer_info *rxbi = rxring->bufinf;
+ struct jme_buffer_info *rxbi = rxring->bufinf + i;
unsigned long offset;
struct sk_buff* skb;
if(unlikely(!skb))
return -ENOMEM;
- if(unlikely(skb_is_nonlinear(skb))) {
- dprintk(jme->dev->name,
- "Allocated skb fragged(%d).\n",
- skb_shinfo(skb)->nr_frags);
- dev_kfree_skb(skb);
- return -ENOMEM;
- }
-
if(unlikely(offset =
(unsigned long)(skb->data)
& ((unsigned long)RX_BUF_DMA_ALIGN - 1)))
skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
- rxbi += i;
rxbi->skb = skb;
rxbi->len = skb_tailroom(skb);
- rxbi->mapping = pci_map_single(jme->pdev,
- skb->data,
- rxbi->len,
- PCI_DMA_FROMDEVICE);
+ rxbi->mapping = pci_map_page(jme->pdev,
+ virt_to_page(skb->data),
+ offset_in_page(skb->data),
+ rxbi->len,
+ PCI_DMA_FROMDEVICE);
return 0;
}
rxbi += i;
if(rxbi->skb) {
- pci_unmap_single(jme->pdev,
+ pci_unmap_page(jme->pdev,
rxbi->mapping,
rxbi->len,
PCI_DMA_FROMDEVICE);
struct jme_ring *rxring = &(jme->rxring[0]);
if(rxring->alloc) {
- for(i = 0 ; i < RING_DESC_NR ; ++i)
+ for(i = 0 ; i < jme->rx_ring_size ; ++i)
jme_free_rx_buf(jme, i);
dma_free_coherent(&(jme->pdev->dev),
- RX_RING_ALLOC_SIZE,
+ RX_RING_ALLOC_SIZE(jme->rx_ring_size),
rxring->alloc,
rxring->dmaalloc);
rxring->alloc = NULL;
struct jme_ring *rxring = &(jme->rxring[0]);
rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
- RX_RING_ALLOC_SIZE,
- &(rxring->dmaalloc),
- GFP_ATOMIC);
+ RX_RING_ALLOC_SIZE(jme->rx_ring_size),
+ &(rxring->dmaalloc),
+ GFP_ATOMIC);
if(!rxring->alloc) {
rxring->desc = NULL;
rxring->dmaalloc = 0;
/*
* Initiallize Receive Descriptors
*/
- for(i = 0 ; i < RING_DESC_NR ; ++i) {
+ for(i = 0 ; i < jme->rx_ring_size ; ++i) {
if(unlikely(jme_make_new_rx_buf(jme, i))) {
jme_free_rx_resources(jme);
return -ENOMEM;
jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
/*
- * Setup RX Descptor Count
+ * Setup RX Descriptor Count
*/
- jwrite32(jme, JME_RXQDC, RING_DESC_NR);
+ jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
/*
* Setup Unicast Filter
}
static void
-jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx, int summed)
+jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
{
struct jme_ring *rxring = &(jme->rxring[0]);
volatile struct rxdesc *rxdesc = rxring->desc;
skb_put(skb, framesize);
skb->protocol = eth_type_trans(skb, jme->dev);
- if(summed)
+ if((rxdesc->descwb.flags &
+ (RXWBFLAG_TCPON |
+ RXWBFLAG_UDPON |
+ RXWBFLAG_IPV4)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
- netif_rx(skb);
- if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
+ if(rxdesc->descwb.flags & RXWBFLAG_TAGON) {
+ vlan_dbg(jme->dev->name, "VLAN: %04x\n",
+ rxdesc->descwb.vlan);
+ if(jme->vlgrp) {
+ vlan_dbg(jme->dev->name,
+ "VLAN Passed to kernel.\n");
+ vlan_hwaccel_rx(skb, jme->vlgrp,
+ le32_to_cpu(rxdesc->descwb.vlan));
+ NET_STAT(jme).rx_bytes += 4;
+ }
+ }
+ else {
+ netif_rx(skb);
+ }
+
+ if((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
+ RXWBFLAG_DEST_MUL)
++(NET_STAT(jme).multicast);
jme->dev->last_rx = jiffies;
}
else if(unlikely((flags & RXWBFLAG_IPV4) &&
!(flags & RXWBFLAG_IPCS))) {
- csum_dbg(jme->dev->name, "IPV4 Checksum error.\n");
+ csum_dbg(jme->dev->name, "IPv4 Checksum error.\n");
return 1;
}
else {
{
struct jme_ring *rxring = &(jme->rxring[0]);
volatile struct rxdesc *rxdesc = rxring->desc;
- int i, j, ccnt, desccnt;
+ int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
i = rxring->next_to_clean;
while( limit-- > 0 )
for(j = i, ccnt = desccnt ; ccnt-- ; ) {
jme_set_clean_rxdesc(jme, j);
- if(unlikely(++j == RING_DESC_NR))
- j = 0;
+ j = (j + 1) & (mask);
}
}
else {
- jme_alloc_and_feed_skb(jme, i,
- (rxdesc->descwb.flags &
- (RXWBFLAG_TCPON |
- RXWBFLAG_UDPON |
- RXWBFLAG_IPV4)));
+ jme_alloc_and_feed_skb(jme, i);
}
- if((i += desccnt) >= RING_DESC_NR)
- i -= RING_DESC_NR;
+ i = (i + desccnt) & (mask);
}
out:
struct net_device *netdev = jme->dev;
- if(unlikely(netif_queue_stopped(netdev) ||
+ if(unlikely(!netif_carrier_ok(netdev) ||
(atomic_read(&jme->link_changing) != 1)
)) {
jme_stop_pcc_timer(jme);
if(unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
goto out;
-
+
if(unlikely(atomic_read(&jme->link_changing) != 1))
goto out;
- if(unlikely(netif_queue_stopped(jme->dev)))
+ if(unlikely(!netif_carrier_ok(jme->dev)))
goto out;
- jme_process_receive(jme, RING_DESC_NR);
+ jme_process_receive(jme, jme->rx_ring_size);
++(dpi->intr_cnt);
out:
if(unlikely(atomic_read(&jme->link_changing) != 1))
return;
- if(unlikely(netif_queue_stopped(jme->dev)))
+ if(unlikely(!netif_carrier_ok(jme->dev)))
return;
- queue_dbg(jme->dev->name, "RX Queue empty!\n");
+ queue_dbg(jme->dev->name, "RX Queue Full!\n");
jme_rx_clean_tasklet(arg);
jme_restart_rx_engine(jme);
}
+static void
+jme_wake_queue_if_stopped(struct jme_adapter *jme)
+{
+ struct jme_ring *txring = jme->txring;
+
+ smp_wmb();
+ if(unlikely(netif_queue_stopped(jme->dev) &&
+ atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
+
+ queue_dbg(jme->dev->name, "TX Queue Waked.\n");
+ netif_wake_queue(jme->dev);
+
+ }
+
+}
+
static void
jme_tx_clean_tasklet(unsigned long arg)
{
struct jme_ring *txring = &(jme->txring[0]);
volatile struct txdesc *txdesc = txring->desc;
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
- int i, j, cnt = 0, max, err;
+ int i, j, cnt = 0, max, err, mask;
if(unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
goto out;
if(unlikely(atomic_read(&jme->link_changing) != 1))
goto out;
- if(unlikely(netif_queue_stopped(jme->dev)))
+ if(unlikely(!netif_carrier_ok(jme->dev)))
goto out;
- max = RING_DESC_NR - atomic_read(&txring->nr_free);
+ max = jme->tx_ring_size - atomic_read(&txring->nr_free);
+ mask = jme->tx_ring_mask;
tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
ctxbi = txbi + i;
- if(ctxbi->skb && !(txdesc[i].descwb.flags & TXWBFLAG_OWN)) {
+ if(likely(ctxbi->skb &&
+ !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
i, ctxbi->nr_desc);
for(j = 1 ; j < ctxbi->nr_desc ; ++j) {
- ttxbi = txbi + ((i + j) & (RING_DESC_NR - 1));
- txdesc[(i+j)&(RING_DESC_NR-1)].dw[0] = 0;
+ ttxbi = txbi + ((i + j) & (mask));
+ txdesc[(i + j) & (mask)].dw[0] = 0;
- pci_unmap_single(jme->pdev,
+ pci_unmap_page(jme->pdev,
ttxbi->mapping,
ttxbi->len,
PCI_DMA_TODEVICE);
- if(likely(!err))
- NET_STAT(jme).tx_bytes += ttxbi->len;
-
ttxbi->mapping = 0;
ttxbi->len = 0;
}
dev_kfree_skb(ctxbi->skb);
- ctxbi->skb = NULL;
cnt += ctxbi->nr_desc;
if(unlikely(err))
++(NET_STAT(jme).tx_carrier_errors);
- else
+ else {
++(NET_STAT(jme).tx_packets);
+ NET_STAT(jme).tx_bytes += ctxbi->len;
+ }
+
+ ctxbi->skb = NULL;
+ ctxbi->len = 0;
}
else {
if(!ctxbi->skb)
tx_dbg(jme->dev->name,
"Tx Tasklet:"
- " Stoped due to no skb.\n");
+ " Stopped due to no skb.\n");
else
tx_dbg(jme->dev->name,
"Tx Tasklet:"
- "Stoped due to not done.\n");
+ "Stopped due to not done.\n");
break;
}
- if(unlikely((i += ctxbi->nr_desc) >= RING_DESC_NR))
- i -= RING_DESC_NR;
+ i = (i + ctxbi->nr_desc) & mask;
ctxbi->nr_desc = 0;
}
atomic_add(cnt, &txring->nr_free);
+ jme_wake_queue_if_stopped(jme);
+
out:
atomic_inc(&jme->tx_cleaning);
}
static void
jme_intr_msi(struct jme_adapter *jme, __u32 intrstat)
{
+ __u32 handled;
+
/*
* Disable interrupt
*/
if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
tasklet_schedule(&jme->txclean_task);
- if((intrstat & ~INTR_ENABLE) != 0) {
+ handled = INTR_ENABLE | INTR_RX0 | INTR_TX0 | INTR_PAUSERCV;
+ if((intrstat & ~(handled)) != 0) {
/*
* Some interrupt not handled
* but not enabled also (for debug)
*/
+ dprintk(jme->dev->name,
+ "UN-handled interrupt.(%08x)\n",
+ intrstat & ~(handled));
}
out_reenable:
static void
jme_reset_link(struct jme_adapter *jme)
{
- jme->phylink = 0;
jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
}
netdev);
if(rc) {
jeprintk(netdev->name,
- "Unable to allocate %s interrupt (return: %d)\n",
+ "Unable to request %s interrupt (return: %d)\n",
jme->flags & JME_FLAG_MSI ? "MSI":"INTx", rc);
if(jme->flags & JME_FLAG_MSI) {
goto err_out;
}
+ jme_clear_pm(jme);
jme_reset_mac_processor(jme);
rc = jme_request_irq(jme);
jme_enable_shadow(jme);
jme_start_irq(jme);
+
+ if(jme->flags & JME_FLAG_SSET)
+ jme_set_settings(netdev, &jme->old_ecmd);
+ else
+ jme_reset_phy_processor(jme);
+
jme_reset_link(jme);
return 0;
return rc;
}
+static void
+jme_set_100m_half(struct jme_adapter *jme)
+{
+ __u32 bmcr, tmp;
+
+ bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
+ BMCR_SPEED1000 | BMCR_FULLDPLX);
+ tmp |= BMCR_SPEED100;
+
+ if (bmcr != tmp)
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
+
+ jwrite32(jme, JME_GHC, GHC_SPEED_100M);
+}
+
+static void
+jme_phy_off(struct jme_adapter *jme)
+{
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
+}
+
+
static int
jme_close(struct net_device *netdev)
{
jme_reset_mac_processor(jme);
jme_free_rx_resources(jme);
jme_free_tx_resources(jme);
+ jme->phylink = 0;
+ jme_phy_off(jme);
+
+ return 0;
+}
+
+static int
+jme_alloc_txdesc(struct jme_adapter *jme,
+ struct sk_buff *skb)
+{
+ struct jme_ring *txring = jme->txring;
+ int idx, nr_alloc, mask = jme->tx_ring_mask;
+
+ idx = txring->next_to_use;
+ nr_alloc = skb_shinfo(skb)->nr_frags + 2;
+
+ if(unlikely(atomic_read(&txring->nr_free) < nr_alloc))
+ return -1;
+
+ atomic_sub(nr_alloc, &txring->nr_free);
+
+ txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
+
+ return idx;
+}
+
+static void
+jme_fill_tx_map(struct pci_dev *pdev,
+ volatile struct txdesc *txdesc,
+ struct jme_buffer_info *txbi,
+ struct page *page,
+ __u32 page_offset,
+ __u32 len,
+ __u8 hidma)
+{
+ dma_addr_t dmaaddr;
+
+ dmaaddr = pci_map_page(pdev,
+ page,
+ page_offset,
+ len,
+ PCI_DMA_TODEVICE);
+
+ pci_dma_sync_single_for_device(pdev,
+ dmaaddr,
+ len,
+ PCI_DMA_TODEVICE);
+
+ txdesc->dw[0] = 0;
+ txdesc->dw[1] = 0;
+ txdesc->desc2.flags = TXFLAG_OWN;
+ txdesc->desc2.flags |= (hidma)?TXFLAG_64BIT:0;
+ txdesc->desc2.datalen = cpu_to_le16(len);
+ txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
+ txdesc->desc2.bufaddrl = cpu_to_le32(
+ (__u64)dmaaddr & 0xFFFFFFFFUL);
+
+ txbi->mapping = dmaaddr;
+ txbi->len = len;
+}
+
+static void
+jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
+{
+ struct jme_ring *txring = jme->txring;
+ volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
+ struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+ __u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
+ int i, nr_frags = skb_shinfo(skb)->nr_frags;
+ int mask = jme->tx_ring_mask;
+ struct skb_frag_struct *frag;
+ __u32 len;
+
+ for(i = 0 ; i < nr_frags ; ++i) {
+ frag = &skb_shinfo(skb)->frags[i];
+ ctxdesc = txdesc + ((idx + i + 2) & (mask));
+ ctxbi = txbi + ((idx + i + 2) & (mask));
+
+ jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
+ frag->page_offset, frag->size, hidma);
+ }
+
+ len = skb_is_nonlinear(skb)?skb_headlen(skb):skb->len;
+ ctxdesc = txdesc + ((idx + 1) & (mask));
+ ctxbi = txbi + ((idx + 1) & (mask));
+ jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+ offset_in_page(skb->data), len, hidma);
+
+}
+
+static int
+jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
+{
+ if(unlikely(skb_shinfo(skb)->gso_size &&
+ skb_header_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
+ dev_kfree_skb(skb);
+ return -1;
+ }
return 0;
}
+static int
+jme_tx_tso(struct sk_buff *skb,
+ volatile __u16 *mss, __u8 *flags)
+{
+ if((*mss = (skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT))) {
+ *flags |= TXFLAG_LSEN;
+
+ if(skb->protocol == __constant_htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ }
+ else {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
+ &ip6h->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ }
+
+ return 0;
+ }
+
+ return 1;
+}
+
+static void
+jme_tx_csum(struct sk_buff *skb, __u8 *flags)
+{
+ if(skb->ip_summed == CHECKSUM_PARTIAL) {
+ __u8 ip_proto;
+
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ ip_proto = ip_hdr(skb)->protocol;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ ip_proto = 0;
+ break;
+ }
+
+ switch(ip_proto) {
+ case IPPROTO_TCP:
+ *flags |= TXFLAG_TCPCS;
+ break;
+ case IPPROTO_UDP:
+ *flags |= TXFLAG_UDPCS;
+ break;
+ default:
+ jeprintk("jme", "Error upper layer protocol.\n");
+ break;
+ }
+ }
+}
+
+__always_inline static void
+jme_tx_vlan(struct sk_buff *skb, volatile __u16 *vlan, __u8 *flags)
+{
+ if(vlan_tx_tag_present(skb)) {
+ vlan_dbg("jme", "Tag found!(%04x)\n", vlan_tx_tag_get(skb));
+ *flags |= TXFLAG_TAGON;
+ *vlan = vlan_tx_tag_get(skb);
+ }
+}
+
+static int
+jme_fill_first_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
+{
+ struct jme_ring *txring = jme->txring;
+ volatile struct txdesc *txdesc;
+ struct jme_buffer_info *txbi;
+ __u8 flags;
+
+ txdesc = (volatile struct txdesc*)txring->desc + idx;
+ txbi = txring->bufinf + idx;
+
+ txdesc->dw[0] = 0;
+ txdesc->dw[1] = 0;
+ txdesc->dw[2] = 0;
+ txdesc->dw[3] = 0;
+ txdesc->desc1.pktsize = cpu_to_le16(skb->len);
+ /*
+ * Set OWN bit at final.
+ * When kernel transmit faster than NIC.
+ * And NIC trying to send this descriptor before we tell
+ * it to start sending this TX queue.
+ * Other fields are already filled correctly.
+ */
+ wmb();
+ flags = TXFLAG_OWN | TXFLAG_INT;
+ //Set checksum flags while not tso
+ if(jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
+ jme_tx_csum(skb, &flags);
+ jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
+ txdesc->desc1.flags = flags;
+ /*
+ * Set tx buffer info after telling NIC to send
+ * For better tx_clean timing
+ */
+ wmb();
+ txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
+ txbi->skb = skb;
+ txbi->len = skb->len;
+
+ return 0;
+}
+
+static void
+jme_stop_queue_if_full(struct jme_adapter *jme)
+{
+ struct jme_ring *txring = jme->txring;
+
+ smp_wmb();
+ if(unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
+ netif_stop_queue(jme->dev);
+ queue_dbg(jme->dev->name, "TX Queue Paused.\n");
+ smp_wmb();
+ if (atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold)) {
+ netif_wake_queue(jme->dev);
+ queue_dbg(jme->dev->name, "TX Queue Fast Waked.\n");
+ }
+ }
+
+}
+
/*
* This function is already protected by netif_tx_lock()
*/
jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
- int rc;
+ int idx;
- if(unlikely(netif_queue_stopped(jme->dev)))
- return NETDEV_TX_BUSY;
-
-#if 0
-/*Testing*/
- ("jme", "Frags: %d Headlen: %d Len: %d Sum:%d\n",
- skb_shinfo(skb)->nr_frags,
- skb_headlen(skb),
- skb->len,
- skb->ip_summed);
-/*********/
-#endif
+ if(skb_shinfo(skb)->nr_frags) {
+ tx_dbg(netdev->name, "Frags: %d Headlen: %d Len: %d MSS: %d Sum:%d\n",
+ skb_shinfo(skb)->nr_frags,
+ skb_headlen(skb),
+ skb->len,
+ skb_shinfo(skb)->gso_size,
+ skb->ip_summed);
+ }
+
+ if(unlikely(jme_expand_header(jme, skb))) {
+ ++(NET_STAT(jme).tx_dropped);
+ return NETDEV_TX_OK;
+ }
+
+ idx = jme_alloc_txdesc(jme, skb);
+
+ if(unlikely(idx<0)) {
+ netif_stop_queue(netdev);
+ jeprintk(netdev->name,
+ "BUG! Tx ring full when queue awake!\n");
- rc = jme_set_new_txdesc(jme, skb);
+ return NETDEV_TX_BUSY;
+ }
+
+ jme_map_tx_skb(jme, skb, idx);
+ jme_fill_first_tx_desc(jme, skb, idx);
- if(unlikely(rc != NETDEV_TX_OK))
- return rc;
+ tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, skb_shinfo(skb)->nr_frags + 2);
jwrite32(jme, JME_TXCS, jme->reg_txcs |
TXCS_SELECT_QUEUE0 |
TXCS_ENABLE);
netdev->trans_start = jiffies;
+ jme_stop_queue_if_full(jme);
+
return NETDEV_TX_OK;
}
return 0;
if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
- ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
+ ((new_mtu) < IPV6_MIN_MTU))
return -EINVAL;
if(new_mtu > 4000) {
}
if(new_mtu > 1900) {
- netdev->features &= ~NETIF_F_HW_CSUM;
+ netdev->features &= ~(NETIF_F_HW_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
}
else {
- netdev->features |= NETIF_F_HW_CSUM;
+ if(jme->flags & JME_FLAG_TXCSUM)
+ netdev->features |= NETIF_F_HW_CSUM;
+ if(jme->flags & JME_FLAG_TSO)
+ netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
}
netdev->mtu = new_mtu;
/*
* Reset the link
- * And the link change will reinitiallize all RX/TX resources
+ * And the link change will reinitialize all RX/TX resources
*/
+ jme->phylink = 0;
jme_reset_link(jme);
}
+static void
+jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ jme->vlgrp = grp;
+}
+
static void
jme_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
return 0;
}
-/*
- * It's not actually for coalesce.
- * It changes internell FIFO related setting for testing.
- */
-static int
-jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
-{
- struct jme_adapter *jme = netdev_priv(netdev);
-
- if(ecmd->use_adaptive_rx_coalesce &&
- ecmd->use_adaptive_tx_coalesce &&
- ecmd->rx_coalesce_usecs == 250 &&
- (ecmd->rx_max_coalesced_frames_low == 16 ||
- ecmd->rx_max_coalesced_frames_low == 32 ||
- ecmd->rx_max_coalesced_frames_low == 64 ||
- ecmd->rx_max_coalesced_frames_low == 128)) {
- jme->reg_rxcs &= ~RXCS_FIFOTHNP;
- switch(ecmd->rx_max_coalesced_frames_low) {
- case 16:
- jme->reg_rxcs |= RXCS_FIFOTHNP_16QW;
- break;
- case 32:
- jme->reg_rxcs |= RXCS_FIFOTHNP_32QW;
- break;
- case 64:
- jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
- break;
- case 128:
- default:
- jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
- }
- jme_restart_rx_engine(jme);
- }
- else {
- return -EINVAL;
- }
-
- return 0;
-}
-
static void
jme_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *ecmd)
spin_lock_irqsave(&jme->phy_lock, flags);
val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
spin_unlock_irqrestore(&jme->phy_lock, flags);
- ecmd->autoneg = (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
+
+ ecmd->autoneg =
+ (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
}
static int
spin_lock_irqsave(&jme->phy_lock, flags);
val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
- if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) !=
+ if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) !=
(ecmd->autoneg != 0)) {
if(ecmd->autoneg)
else
val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
- jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE, val);
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id,
+ MII_ADVERTISE, val);
}
spin_unlock_irqrestore(&jme->phy_lock, flags);
if(wol->wolopts & WAKE_MAGIC)
jme->reg_pmcs |= PMCS_MFEN;
+
return 0;
}
-
+
static int
jme_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct jme_adapter *jme = netdev_priv(netdev);
unsigned long flags;
-
+
spin_lock_irqsave(&jme->rxmcs_lock, flags);
if(on)
jme->reg_rxmcs |= RXMCS_CHECKSUM;
static int
jme_set_tx_csum(struct net_device *netdev, u32 on)
{
- if(on)
- netdev->features |= NETIF_F_HW_CSUM;
- else
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ if(on) {
+ jme->flags |= JME_FLAG_TXCSUM;
+ if(netdev->mtu <= 1900)
+ netdev->features |= NETIF_F_HW_CSUM;
+ }
+ else {
+ jme->flags &= ~JME_FLAG_TXCSUM;
netdev->features &= ~NETIF_F_HW_CSUM;
+ }
return 0;
}
+static int
+jme_set_tso(struct net_device *netdev, u32 on)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ if (on) {
+ jme->flags |= JME_FLAG_TSO;
+ if(netdev->mtu <= 1900)
+ netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+ }
+ else {
+ jme->flags &= ~JME_FLAG_TSO;
+ netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ }
+
+ return 0;
+}
+
static int
jme_nway_reset(struct net_device *netdev)
{
.get_regs_len = jme_get_regs_len,
.get_regs = jme_get_regs,
.get_coalesce = jme_get_coalesce,
- .set_coalesce = jme_set_coalesce,
.get_pauseparam = jme_get_pauseparam,
.set_pauseparam = jme_set_pauseparam,
.get_wol = jme_get_wol,
.get_rx_csum = jme_get_rx_csum,
.set_rx_csum = jme_set_rx_csum,
.set_tx_csum = jme_set_tx_csum,
+ .set_tso = jme_set_tso,
+ .set_sg = ethtool_op_set_sg,
.nway_reset = jme_nway_reset,
};
jme_pci_dma64(struct pci_dev *pdev)
{
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
- if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
+ if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
+ dprintk("jme", "64Bit DMA Selected.\n");
return 1;
+ }
if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
- if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
+ if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK)) {
+ dprintk("jme", "40Bit DMA Selected.\n");
return 1;
+ }
if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
- if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
+ if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
+ dprintk("jme", "32Bit DMA Selected.\n");
return 0;
+ }
return -1;
}
+__always_inline static void
+jme_set_phy_ps(struct jme_adapter *jme)
+{
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, 0x00001000);
+}
+
static int __devinit
jme_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
*/
netdev = alloc_etherdev(sizeof(*jme));
if(!netdev) {
+ printk(KERN_ERR PFX "Cannot allocate netdev structure.\n");
rc = -ENOMEM;
goto err_out_release_regions;
}
netdev->ethtool_ops = &jme_ethtool_ops;
netdev->tx_timeout = jme_tx_timeout;
netdev->watchdog_timeo = TX_TIMEOUT;
+ netdev->vlan_rx_register = jme_vlan_rx_register;
NETDEV_GET_STATS(netdev, &jme_get_stats);
- netdev->features = NETIF_F_HW_CSUM;
+ netdev->features = NETIF_F_HW_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX;
if(using_dac)
netdev->features |= NETIF_F_HIGHDMA;
jme->dev = netdev;
jme->old_mtu = netdev->mtu = 1500;
jme->phylink = 0;
+ jme->tx_ring_size = 1 << 10;
+ jme->tx_ring_mask = jme->tx_ring_size - 1;
+ jme->tx_wake_threshold = 1 << 9;
+ jme->rx_ring_size = 1 << 9;
+ jme->rx_ring_mask = jme->rx_ring_size - 1;
jme->regs = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!(jme->regs)) {
+ printk(KERN_ERR PFX "Mapping PCI resource region error.\n");
rc = -ENOMEM;
goto err_out_free_netdev;
}
sizeof(__u32) * SHADOW_REG_NR,
&(jme->shadow_dma));
if (!(jme->shadow_regs)) {
+ printk(KERN_ERR PFX "Allocating shadow register mapping error.\n");
rc = -ENOMEM;
goto err_out_unmap;
}
jme->reg_rxcs = RXCS_DEFAULT;
jme->reg_rxmcs = RXMCS_DEFAULT;
jme->reg_txpfc = 0;
- jme->reg_pmcs = 0;
+ jme->reg_pmcs = PMCS_LFEN | PMCS_LREN | PMCS_MFEN;
+ jme->flags = JME_FLAG_TXCSUM | JME_FLAG_TSO;
/*
* Get Max Read Req Size from PCI Config Space
*/
* Reset MAC processor and reload EEPROM for MAC Address
*/
jme_clear_pm(jme);
- jme_reset_phy_processor(jme);
+ jme_set_phy_ps(jme);
+ jme_phy_off(jme);
jme_reset_mac_processor(jme);
rc = jme_reload_eeprom(jme);
if(rc) {
printk(KERN_ERR PFX
- "Rload eeprom for reading MAC Address error.\n");
+ "Reload eeprom for reading MAC Address error.\n");
goto err_out_free_shadow;
}
jme_load_macaddr(netdev);
}
-static void
-jme_set_10m_half(struct jme_adapter *jme)
-{
- __u32 bmcr, tmp;
-
- bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
- tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
- BMCR_SPEED1000 | BMCR_FULLDPLX);
-
- if (bmcr != tmp)
- jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
-
- jwrite32(jme, JME_GHC, GHC_SPEED_10M);
-}
-
static int
jme_suspend(struct pci_dev *pdev, pm_message_t state)
{
jme->phylink = 0;
}
- jme_set_10m_half(jme);
pci_save_state(pdev);
if(jme->reg_pmcs) {
+ jme_set_100m_half(jme);
jwrite32(jme, JME_PMCS, jme->reg_pmcs);
- pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, true);
+ pci_enable_wake(pdev, PCI_D3cold, true);
}
else {
- pci_enable_wake(pdev, PCI_D3cold, 0);
+ jme_phy_off(jme);
+ pci_enable_wake(pdev, PCI_D3hot, false);
+ pci_enable_wake(pdev, PCI_D3cold, false);
}
pci_set_power_state(pdev, pci_choose_state(pdev, state));