]> bbs.cooldavid.org Git - jme.git/blobdiff - jme.c
Import jme 0.5 source
[jme.git] / jme.c
diff --git a/jme.c b/jme.c
index aa43bb02b0ca9c3b466cae07d8d519c2245f5865..aead26ea3a4a4db30d74e4783be52b1f58d728bf 100644 (file)
--- a/jme.c
+++ b/jme.c
  *
  */
 
-/*
- * Note:
- *     Watchdog:
- *             check if rx queue stoped.
- *             And restart it after rx ring cleaned.
- */
-
 /*
  * Timeline before release:
- *     Stage 2: Error handling.
- *     -  Wathch dog
- *     -  Transmit timeout
- *
- *     Stage 3: Basic offloading support.
- *      -  Use pci_map_page on scattered sk_buff for HIGHMEM support
- *      -  Implement scatter-gather offloading.
- *         A system page per RX (buffer|descriptor)?
- *        Handle fraged sk_buff to TX descriptors.
- *     -  Implement tx/rx ipv6/ip/tcp/udp checksum offloading
- *
  *     Stage 4: Basic feature support.
+ *     -  Implement scatter-gather offloading.
+ *        Use pci_map_page on scattered sk_buff for HIGHMEM support
  *     -  Implement Power Managemt related functions.
  *     -  Implement Jumboframe.
  *     -  Implement MSI.
  *        Along with multiple RX queue, for CPU load balancing.
  *
  *     Stage 7:
- *     -  Use NAPI instead of rx_tasklet?
- *             PCC Support Both Packet Counter and Timeout Interrupt for
- *             receive and transmit complete, does NAPI really needed?
  *     -  Cleanup/re-orginize code, performence tuneing(alignment etc...).
  *     -  Test and Release 1.0
+ *
+ *     Non-Critical:
+ *     -  Use NAPI instead of rx_tasklet?
+ *             PCC Support Both Packet Counter and Timeout Interrupt for
+ *             receive and transmit complete, does NAPI really needed?
+ *     -  Decode register dump for ethtool.
  */
 
 #include <linux/version.h>
@@ -72,6 +59,8 @@
 #include <linux/mii.h>
 #include <linux/crc32.h>
 #include <linux/delay.h>
+#include <linux/in.h>
+#include <linux/ip.h>
 #include "jme.h"
 
 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
@@ -135,36 +124,25 @@ jme_mdio_write(struct net_device *netdev,
 __always_inline static void
 jme_reset_phy_processor(struct jme_adapter *jme)
 {
-       int i, val;
-
-       val = jme_mdio_read(jme->dev,
-                               jme->mii_if.phy_id,
-                               MII_BMCR);
+       __u32 val;
 
        jme_mdio_write(jme->dev,
                        jme->mii_if.phy_id,
-                       MII_BMCR, val | BMCR_RESET);
-
-       for(i = JME_PHY_RST_TIMEOUT ; i > 0 ; --i) {
-               udelay(1);
-               val = jme_mdio_read(jme->dev,
-                                       jme->mii_if.phy_id,
-                                       MII_BMCR);
-               if(!(val & BMCR_RESET))
-                       break;
-       }
-
-       if (i == 0)
-               jeprintk(jme->dev->name, "phy reset timeout\n");
+                       MII_ADVERTISE, ADVERTISE_ALL |
+                       ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
 
        jme_mdio_write(jme->dev,
                        jme->mii_if.phy_id,
-                       MII_ADVERTISE, ADVERTISE_ALL);
+                       MII_CTRL1000,
+                       ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+
+       val = jme_mdio_read(jme->dev,
+                               jme->mii_if.phy_id,
+                               MII_BMCR);
 
        jme_mdio_write(jme->dev,
                        jme->mii_if.phy_id,
-                       MII_CTRL1000,
-                       ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+                       MII_BMCR, val | BMCR_RESET);
 
        return;
 }
@@ -232,7 +210,7 @@ jme_load_macaddr(struct net_device *netdev)
        unsigned char macaddr[6];
        __u32 val;
 
-       spin_lock(&jme->phy_lock);
+       spin_lock(&jme->macaddr_lock);
        val = jread32(jme, JME_RXUMA_LO);
        macaddr[0] = (val >>  0) & 0xFF;
        macaddr[1] = (val >>  8) & 0xFF;
@@ -242,10 +220,10 @@ jme_load_macaddr(struct net_device *netdev)
        macaddr[4] = (val >>  0) & 0xFF;
        macaddr[5] = (val >>  8) & 0xFF;
         memcpy(netdev->dev_addr, macaddr, 6);
-       spin_unlock(&jme->phy_lock);
+       spin_unlock(&jme->macaddr_lock);
 }
 
-static void
+__always_inline static void
 jme_set_rx_pcc(struct jme_adapter *jme, int p)
 {
        switch(p) {
@@ -271,7 +249,7 @@ jme_set_rx_pcc(struct jme_adapter *jme, int p)
        dprintk(jme->dev->name, "Switched to PCC_P%d\n", p);
 }
 
-__always_inline static void
+static void
 jme_start_irq(struct jme_adapter *jme)
 {
        register struct dynpcc_info *dpi = &(jme->dpi);
@@ -286,15 +264,14 @@ jme_start_irq(struct jme_adapter *jme)
        dpi->cnt                = 0;
 
        jwrite32(jme, JME_PCCTX,
-                       ((60000 << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
-                       ((8 << PCCTX_SHIFT) & PCCTX_MASK) |
+                       ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
+                       ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
                        PCCTXQ0_EN
                );
 
        /*
         * Enable Interrupts
         */
-       atomic_set(&jme->intr_sem, 1);
        jwrite32(jme, JME_IENS, INTR_ENABLE);
 }
 
@@ -322,27 +299,63 @@ jme_disable_shadow(struct jme_adapter *jme)
        jwrite32(jme, JME_SHBA_LO, 0x0);
 }
 
-static void
-jme_check_link(struct net_device *netdev)
+static int
+jme_check_link(struct net_device *netdev, int testonly)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
-       __u32 phylink, ghc, cnt = JME_AUTONEG_TIMEOUT;
+       __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
        char linkmsg[32];
+       int rc = 0;
 
-       spin_lock(&jme->phy_lock);
        phylink = jread32(jme, JME_PHY_LINK);
 
         if (phylink & PHY_LINK_UP) {
-               /*
-                * Keep polling for autoneg complete
-                */
-               while(!(phylink & PHY_LINK_AUTONEG_COMPLETE) && --cnt > 0) {
-                       mdelay(1);
-                       phylink = jread32(jme, JME_PHY_LINK);
+               if(!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
+                       /*
+                        * If we did not enable AN
+                        * Speed/Duplex Info should be obtained from SMI
+                        */
+                       phylink = PHY_LINK_UP;
+
+                       bmcr = jme_mdio_read(jme->dev,
+                                               jme->mii_if.phy_id,
+                                               MII_BMCR);
+
+                       phylink |= ((bmcr & BMCR_SPEED1000) &&
+                                       (bmcr & BMCR_SPEED100) == 0) ?
+                                       PHY_LINK_SPEED_1000M :
+                                       (bmcr & BMCR_SPEED100) ?
+                                       PHY_LINK_SPEED_100M :
+                                       PHY_LINK_SPEED_10M;
+
+                       phylink |= (bmcr & BMCR_FULLDPLX) ?
+                                        PHY_LINK_DUPLEX : 0;
                }
+               else {
+                       /*
+                        * Keep polling for speed/duplex resolve complete
+                        */
+                       while(!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
+                               --cnt) {
+
+                               udelay(1);
+                               phylink = jread32(jme, JME_PHY_LINK);
+
+                       }
+
+                       if(!cnt)
+                               jeprintk(netdev->name,
+                                       "Waiting speed resolve timeout.\n");
+               }
+
+               if(jme->phylink == phylink) {
+                       rc = 1;
+                       goto out;
+               }
+               if(testonly)
+                       goto out;
 
-               if(!cnt)
-                       jeprintk(netdev->name, "Waiting autoneg timeout.\n");
+               jme->phylink = phylink;
 
                switch(phylink & PHY_LINK_SPEED_MASK) {
                        case PHY_LINK_SPEED_10M:
@@ -362,28 +375,46 @@ jme_check_link(struct net_device *netdev)
                                break;
                }
                 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
-               jme->reg_ghc = ghc;
-               jwrite32(jme, JME_GHC, ghc);
+
                strcat(linkmsg, (phylink &PHY_LINK_DUPLEX) ?
-                                       "Full-Duplex" :
-                                       "Half-Duplex");
+                                       "Full-Duplex, " :
+                                       "Half-Duplex, ");
+
+               if(phylink & PHY_LINK_MDI_STAT)
+                       strcat(linkmsg, "MDI-X");
+               else
+                       strcat(linkmsg, "MDI");
 
                if(phylink & PHY_LINK_DUPLEX)
                        jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
-               else
+               else {
                        jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
                                                TXMCS_BACKOFF |
                                                TXMCS_CARRIERSENSE |
                                                TXMCS_COLLISION);
+                       jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
+                               ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
+                               TXTRHD_TXREN |
+                               ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
+               }
+
+               jme->reg_ghc = ghc;
+               jwrite32(jme, JME_GHC, ghc);
 
                jprintk(netdev->name, "Link is up at %s.\n", linkmsg);
                 netif_carrier_on(netdev);
        }
         else {
+               if(testonly)
+                       goto out;
+
                jprintk(netdev->name, "Link is down.\n");
+               jme->phylink = 0;
                 netif_carrier_off(netdev);
        }
-       spin_unlock(&jme->phy_lock);
+
+out:
+       return rc;
 }
 
 
@@ -418,6 +449,7 @@ jme_set_new_txdesc(struct jme_adapter *jme,
        struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
        dma_addr_t dmaaddr;
        int i, idx, nr_desc;
+       __u8 flags;
 
        nr_desc = 2;
        idx = jme_alloc_txdesc(jme, nr_desc);
@@ -446,7 +478,8 @@ jme_set_new_txdesc(struct jme_adapter *jme,
                        ctxdesc->desc2.flags |= TXFLAG_64BIT;
                ctxdesc->desc2.datalen  = cpu_to_le16(skb->len);
                ctxdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
-               ctxdesc->desc2.bufaddrl = cpu_to_le32(dmaaddr & 0xFFFFFFFF);
+               ctxdesc->desc2.bufaddrl = cpu_to_le32(
+                                               (__u64)dmaaddr & 0xFFFFFFFFUL);
 
                ctxbi->mapping = dmaaddr;
                ctxbi->len = skb->len;
@@ -468,7 +501,22 @@ jme_set_new_txdesc(struct jme_adapter *jme,
         * Other fields are already filled correctly.
         */
        wmb();
-       ctxdesc->desc1.flags = TXFLAG_OWN | TXFLAG_INT;
+       flags = TXFLAG_OWN | TXFLAG_INT; 
+       if(skb->ip_summed == CHECKSUM_PARTIAL) {
+               //flags |= TXFLAG_IPCS;
+
+               switch(ip_hdr(skb)->protocol) {
+               case IPPROTO_TCP:
+                       flags |= TXFLAG_TCPCS;
+                       break;
+               case IPPROTO_UDP:
+                       flags |= TXFLAG_UDPCS;
+                       break;
+               default:
+                       break;
+               }
+       }
+       ctxdesc->desc1.flags = flags;
        /*
         * Set tx buffer info after telling NIC to send
         * For better tx_clean timing
@@ -491,7 +539,8 @@ jme_setup_tx_resources(struct jme_adapter *jme)
        txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
                                           TX_RING_ALLOC_SIZE,
                                           &(txring->dmaalloc),
-                                          GFP_KERNEL);
+                                          GFP_ATOMIC);
+
        if(!txring->alloc) {
                txring->desc = NULL;
                txring->dmaalloc = 0;
@@ -557,8 +606,6 @@ jme_free_tx_resources(struct jme_adapter *jme)
 __always_inline static void
 jme_enable_tx_engine(struct jme_adapter *jme)
 {
-       __u8 mrrs;
-
        /*
         * Select Queue 0
         */
@@ -567,31 +614,15 @@ jme_enable_tx_engine(struct jme_adapter *jme)
        /*
         * Setup TX Queue 0 DMA Bass Address
         */
-       jwrite32(jme, JME_TXDBA_LO, jme->txring[0].dma);
+       jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
        jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
-       jwrite32(jme, JME_TXNDA, jme->txring[0].dma);
+       jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
 
        /*
         * Setup TX Descptor Count
         */
        jwrite32(jme, JME_TXQDC, RING_DESC_NR);
 
-       /*
-        * Get Max Read Req Size from PCI Config Space
-        */
-       pci_read_config_byte(jme->pdev, PCI_CONF_DCSR_MRRS, &mrrs);
-       switch(mrrs) {
-               case MRRS_128B:
-                       jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
-                       break;
-               case MRRS_256B:
-                       jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
-                       break;
-               default:
-                       jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
-                       break;
-       };
-
        /*
         * Enable TX Engine
         */
@@ -611,17 +642,19 @@ jme_disable_tx_engine(struct jme_adapter *jme)
        /*
         * Disable TX Engine
         */
-       jwrite32(jme, JME_TXCS, jme->reg_txcs);
+       jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
 
        val = jread32(jme, JME_TXCS);
        for(i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i)
        {
-               udelay(1);
+               mdelay(1);
                val = jread32(jme, JME_TXCS);
        }
 
-       if(!i)
+       if(!i) {
                jeprintk(jme->dev->name, "Disable TX engine timeout.\n");
+               jme_reset_mac_processor(jme);
+       }
 
 
 }
@@ -638,7 +671,8 @@ jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
        rxdesc->dw[0] = 0;
        rxdesc->dw[1] = 0;
        rxdesc->desc1.bufaddrh  = cpu_to_le32((__u64)rxbi->mapping >> 32);
-       rxdesc->desc1.bufaddrl  = cpu_to_le32(rxbi->mapping & 0xFFFFFFFF);
+       rxdesc->desc1.bufaddrl  = cpu_to_le32(
+                                       (__u64)rxbi->mapping & 0xFFFFFFFFUL);
        rxdesc->desc1.datalen   = cpu_to_le16(rxbi->len);
        if(jme->dev->features & NETIF_F_HIGHDMA)
                rxdesc->desc1.flags = RXFLAG_64BIT;
@@ -733,7 +767,7 @@ jme_setup_rx_resources(struct jme_adapter *jme)
        rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
                                           RX_RING_ALLOC_SIZE,
                                           &(rxring->dmaalloc),
-                                          GFP_KERNEL);
+                                          GFP_ATOMIC);
        if(!rxring->alloc) {
                rxring->desc = NULL;
                rxring->dmaalloc = 0;
@@ -771,9 +805,9 @@ jme_enable_rx_engine(struct jme_adapter *jme)
        /*
         * Setup RX DMA Bass Address
         */
-       jwrite32(jme, JME_RXDBA_LO, jme->rxring[0].dma);
+       jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
        jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
-       jwrite32(jme, JME_RXNDA, jme->rxring[0].dma);
+       jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
 
        /*
         * Setup RX Descptor Count
@@ -783,7 +817,6 @@ jme_enable_rx_engine(struct jme_adapter *jme)
        /*
         * Setup Unicast Filter
         */
-       jme->reg_rxmcs = RXMCS_VTAGRM | RXMCS_PREPAD;
        jme_set_multi(jme->dev);
 
        /*
@@ -825,7 +858,7 @@ jme_disable_rx_engine(struct jme_adapter *jme)
        val = jread32(jme, JME_RXCS);
        for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
        {
-               udelay(1);
+               mdelay(100);
                val = jread32(jme, JME_RXCS);
        }
 
@@ -852,19 +885,16 @@ jme_dynamic_pcc(struct jme_adapter *jme)
        register struct dynpcc_info *dpi = &(jme->dpi);
 
        if(jiffies >= dpi->check_point) {
-               if(jiffies > (dpi->check_point + PCC_INTERVAL)) {
+               if(jiffies > (dpi->check_point + PCC_INTERVAL))
+                       jme_attempt_pcc(dpi, PCC_P1);
+               else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
+                                                       PCC_P3_THRESHOLD)
+                       jme_attempt_pcc(dpi, PCC_P3);
+               else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
+                                                       PCC_P2_THRESHOLD)
+                       jme_attempt_pcc(dpi, PCC_P2);
+               else
                        jme_attempt_pcc(dpi, PCC_P1);
-               }
-               else {
-                       if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
-                                       PCC_P3_THRESHOLD)
-                               jme_attempt_pcc(dpi, PCC_P3);
-                       else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
-                                       PCC_P2_THRESHOLD)
-                               jme_attempt_pcc(dpi, PCC_P2);
-                       else
-                               jme_attempt_pcc(dpi, PCC_P1);
-               }
 
                if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
                        jme_set_rx_pcc(jme, dpi->attempt);
@@ -912,6 +942,9 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
                skb_put(skb, framesize);
                skb->protocol = eth_type_trans(skb, jme->dev);
 
+               if(jme->reg_rxmcs & RXMCS_CHECKSUM)
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
                netif_rx(skb);
 
                if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
@@ -924,6 +957,26 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
 
        jme_set_clean_rxdesc(jme, idx);
 
+       jme_dynamic_pcc(jme);
+
+}
+
+static int
+jme_rxsum_bad(struct jme_adapter *jme, __u16 flags)
+{
+       if(jme->reg_rxmcs & RXMCS_CHECKSUM) {
+               return  ((flags & RXWBFLAG_IPV4) && 
+                               !(flags & RXWBFLAG_IPCS)) ||
+                       ((flags & RXWBFLAG_IPV6) && 
+                               !(flags & RXWBFLAG_IPCS)) ||
+                       ((flags & RXWBFLAG_TCPON) && 
+                               !(flags & RXWBFLAG_TCPCS)) ||
+                       ((flags & RXWBFLAG_UDPON) && 
+                               !(flags & RXWBFLAG_UDPCS));
+       }
+       else {
+               return 0;
+       }
 }
 
 static int
@@ -947,8 +1000,9 @@ jme_process_receive(struct jme_adapter *jme, int limit)
 
                rx_dbg(jme->dev->name, "RX: Cleaning %d\n", i);
 
-               if(desccnt > 1 ||
-               rxdesc->descwb.errstat & RXWBERR_ALLERR) {
+               if(unlikely(desccnt > 1 ||
+               rxdesc->descwb.errstat & RXWBERR_ALLERR ||
+               jme_rxsum_bad(jme, rxdesc->descwb.flags))) {
 
                        if(rxdesc->descwb.errstat & RXWBERR_CRCERR)
                                ++(NET_STAT(jme).rx_crc_errors);
@@ -983,7 +1037,6 @@ out:
                (jread32(jme, JME_RXNDA) - jread32(jme, JME_RXDBA_LO))
                        >> 4);
 
-       jme_dynamic_pcc(jme);
        rxring->next_to_clean = i;
 
        return limit > 0 ? limit : 0;
@@ -994,7 +1047,64 @@ static void
 jme_link_change_tasklet(unsigned long arg)
 {
        struct jme_adapter *jme = (struct jme_adapter*)arg;
-       jme_check_link(jme->dev);
+       struct net_device *netdev = jme->dev;
+       int timeout = WAIT_TASKLET_TIMEOUT;
+       int rc;
+
+       if(!atomic_dec_and_test(&jme->link_changing))
+               goto out;
+
+       if(jme_check_link(netdev, 1))
+               goto out;
+
+       netif_stop_queue(netdev);
+
+       while(--timeout > 0 &&
+               (
+               atomic_read(&jme->rx_cleaning) != 1 ||
+               atomic_read(&jme->tx_cleaning) != 1
+               )) {
+
+               mdelay(1);
+       }
+
+       if(netif_carrier_ok(netdev)) {
+               jme_reset_mac_processor(jme);
+               jme_free_rx_resources(jme);
+               jme_free_tx_resources(jme);
+       }
+
+       jme_check_link(netdev, 0);
+       if(netif_carrier_ok(netdev)) {
+               rc = jme_setup_rx_resources(jme);
+               if(rc) {
+                       jeprintk(netdev->name,
+                               "Allocating resources for RX error"
+                               ", Device STOPPED!\n");
+                       goto out;
+               }
+
+
+               rc = jme_setup_tx_resources(jme);
+               if(rc) {
+                       jeprintk(netdev->name,
+                               "Allocating resources for TX error"
+                               ", Device STOPPED!\n");
+                       goto err_out_free_rx_resources;
+               }
+
+               jme_enable_rx_engine(jme);
+               jme_enable_tx_engine(jme);
+
+               netif_start_queue(netdev);
+       }
+
+       goto out;
+
+err_out_free_rx_resources:
+       jme_free_rx_resources(jme);
+out:
+       atomic_inc(&jme->link_changing);
 }
 
 static void
@@ -1002,13 +1112,34 @@ jme_rx_clean_tasklet(unsigned long arg)
 {
        struct jme_adapter *jme = (struct jme_adapter*)arg;
 
-       spin_lock(&jme->rx_lock);
+       if(!atomic_dec_and_test(&jme->rx_cleaning))
+               goto out;
+       
+       if(atomic_read(&jme->link_changing) != 1)
+               goto out;
+
+       if(unlikely(netif_queue_stopped(jme->dev)))
+               goto out;
+
        jme_process_receive(jme, RING_DESC_NR);
-       spin_unlock(&jme->rx_lock);
-       if(jme->flags & JME_FLAG_RXQ0_EMPTY) {
-               jme->flags &= ~JME_FLAG_RXQ0_EMPTY;
-               jme_restart_rx_engine(jme);
-       }
+
+out:
+       atomic_inc(&jme->rx_cleaning);
+}
+
+static void
+jme_rx_empty_tasklet(unsigned long arg)
+{
+       struct jme_adapter *jme = (struct jme_adapter*)arg;
+
+       if(atomic_read(&jme->link_changing) != 1)
+               return;
+
+       if(unlikely(netif_queue_stopped(jme->dev)))
+               return;
+
+       jme_rx_clean_tasklet(arg);
+       jme_restart_rx_engine(jme);
 }
 
 static void
@@ -1018,7 +1149,16 @@ jme_tx_clean_tasklet(unsigned long arg)
        struct jme_ring *txring = &(jme->txring[0]);
        volatile struct txdesc *txdesc = txring->desc;
        struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
-       int i, j, cnt = 0, max;
+       int i, j, cnt = 0, max, err;
+
+       if(!atomic_dec_and_test(&jme->tx_cleaning))
+               goto out;
+
+       if(atomic_read(&jme->link_changing) != 1)
+               goto out;
+
+       if(unlikely(netif_queue_stopped(jme->dev)))
+               goto out;
 
        spin_lock(&jme->tx_lock);
        max = RING_DESC_NR - txring->nr_free;
@@ -1030,7 +1170,9 @@ jme_tx_clean_tasklet(unsigned long arg)
 
                ctxbi = txbi + i;
 
-               if(ctxbi->skb && !(txdesc[i].desc1.flags & TXFLAG_OWN)) {
+               if(ctxbi->skb && !(txdesc[i].descwb.flags & TXWBFLAG_OWN)) {
+
+                       err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
 
                        tx_dbg(jme->dev->name,
                                "Tx Tasklet: Clean %d+%d\n",
@@ -1045,7 +1187,9 @@ jme_tx_clean_tasklet(unsigned long arg)
                                                 ttxbi->len,
                                                 PCI_DMA_TODEVICE);
 
-                               NET_STAT(jme).tx_bytes += ttxbi->len;
+                               if(likely(!err))
+                                       NET_STAT(jme).tx_bytes += ttxbi->len;
+
                                ttxbi->mapping = 0;
                                ttxbi->len = 0;
                        }
@@ -1055,7 +1199,10 @@ jme_tx_clean_tasklet(unsigned long arg)
 
                        cnt += ctxbi->nr_desc;
 
-                       ++(NET_STAT(jme).tx_packets);
+                       if(unlikely(err))
+                               ++(NET_STAT(jme).tx_carrier_errors);
+                       else
+                               ++(NET_STAT(jme).tx_packets);
                }
                else {
                        if(!ctxbi->skb)
@@ -1084,6 +1231,8 @@ jme_tx_clean_tasklet(unsigned long arg)
        txring->nr_free += cnt;
        spin_unlock(&jme->tx_lock);
 
+out:
+       atomic_inc(&jme->tx_cleaning);
 }
 
 static irqreturn_t
@@ -1125,24 +1274,23 @@ jme_intr(int irq, void *dev_id)
         * Allow one interrupt handling at a time
         */
         if(unlikely(!atomic_dec_and_test(&jme->intr_sem)))
-               goto out;
+               goto out_inc;
 
        /*
         * Disable interrupt
         */
        jwrite32f(jme, JME_IENC, INTR_ENABLE);
 
-       if(intrstat & INTR_LINKCH)
+       if(intrstat & INTR_LINKCH) {
                tasklet_schedule(&jme->linkch_task);
+               goto out_deassert;
+       }
 
-       if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP)) {
-               if(intrstat & INTR_RX0EMP) {
-                       jme->flags |= JME_FLAG_RXQ0_EMPTY;
-                       jeprintk(netdev->name, "Ranout of Receive Queue 0.\n");
-               }
+       if(intrstat & INTR_RX0EMP)
+               tasklet_schedule(&jme->rxempty_task);
 
+       if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
                tasklet_schedule(&jme->rxclean_task);
-       }
 
        if(intrstat & (INTR_PCCTXTO | INTR_PCCTX))
                tasklet_schedule(&jme->txclean_task);
@@ -1154,30 +1302,54 @@ jme_intr(int irq, void *dev_id)
                 */
        }
 
+out_deassert:
        /*
         * Deassert interrupts
         */
        jwrite32f(jme, JME_IEVE, intrstat);
 
        /*
-        * Enable next interrupt handling
+        * Re-enable interrupt
         */
-       atomic_set(&jme->intr_sem, 1);
+       jwrite32f(jme, JME_IENS, INTR_ENABLE);
 
+out_inc:
        /*
-        * Re-enable interrupt
+        * Enable next interrupt handling
         */
-       jwrite32f(jme, JME_IENS, INTR_ENABLE);
+       atomic_inc(&jme->intr_sem);
 
 out:
         return rc;
 }
 
+static void
+jme_restart_an(struct jme_adapter *jme)
+{
+       __u32 bmcr;
+
+       bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+       bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+}
+
 static int
 jme_open(struct net_device *netdev)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
-       int rc;
+       int rc, timeout = 100;
+
+       while(
+               --timeout > 0 &&
+               (
+               atomic_read(&jme->link_changing) != 1 ||
+               atomic_read(&jme->rx_cleaning) != 1 ||
+               atomic_read(&jme->tx_cleaning) != 1
+               )
+       )
+               msleep(10);
+
+       jme_reset_mac_processor(jme);
 
        rc = request_irq(jme->pdev->irq, jme_intr,
                         IRQF_SHARED, netdev->name, netdev);
@@ -1185,34 +1357,12 @@ jme_open(struct net_device *netdev)
                printk(KERN_ERR PFX "Requesting IRQ error.\n");
                goto err_out;
        }
-
-       rc = jme_setup_rx_resources(jme);
-       if(rc) {
-               printk(KERN_ERR PFX "Allocating resources for RX error.\n");
-               goto err_out_free_irq;
-       }
-
-
-       rc = jme_setup_tx_resources(jme);
-       if(rc) {
-               printk(KERN_ERR PFX "Allocating resources for TX error.\n");
-               goto err_out_free_rx_resources;
-       }
-
-       jme_reset_mac_processor(jme);
-       jme_check_link(netdev);
        jme_enable_shadow(jme);
        jme_start_irq(jme);
-       jme_enable_rx_engine(jme);
-       jme_enable_tx_engine(jme);
-       netif_start_queue(netdev);
+       jme_restart_an(jme);
 
        return 0;
 
-err_out_free_rx_resources:
-       jme_free_rx_resources(jme);
-err_out_free_irq:
-       free_irq(jme->pdev->irq, jme->dev);
 err_out:
        netif_stop_queue(netdev);
        netif_carrier_off(netdev);
@@ -1234,8 +1384,9 @@ jme_close(struct net_device *netdev)
        tasklet_kill(&jme->linkch_task);
        tasklet_kill(&jme->txclean_task);
        tasklet_kill(&jme->rxclean_task);
-       jme_disable_rx_engine(jme);
-       jme_disable_tx_engine(jme);
+       tasklet_kill(&jme->rxempty_task);
+
+       jme_reset_mac_processor(jme);
        jme_free_rx_resources(jme);
        jme_free_tx_resources(jme);
 
@@ -1251,6 +1402,9 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
         struct jme_adapter *jme = netdev_priv(netdev);
        int rc;
 
+       if(unlikely(netif_queue_stopped(jme->dev)))
+               return NETDEV_TX_BUSY;
+
        rc = jme_set_new_txdesc(jme, skb);
 
        if(unlikely(rc != NETDEV_TX_OK))
@@ -1275,7 +1429,7 @@ jme_set_macaddr(struct net_device *netdev, void *p)
        if(netif_running(netdev))
                return -EBUSY;
 
-       spin_lock(&jme->phy_lock);
+       spin_lock(&jme->macaddr_lock);
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 
        val = addr->sa_data[3] << 24 |
@@ -1286,7 +1440,7 @@ jme_set_macaddr(struct net_device *netdev, void *p)
        val = addr->sa_data[5] << 8 |
              addr->sa_data[4];
        jwrite32(jme, JME_RXUMA_HI, val);
-       spin_unlock(&jme->phy_lock);
+       spin_unlock(&jme->macaddr_lock);
 
        return 0;
 }
@@ -1296,23 +1450,24 @@ jme_set_multi(struct net_device *netdev)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
        u32 mc_hash[2] = {};
-       __u32 val;
        int i;
+       unsigned long flags;
 
-       spin_lock(&jme->phy_lock);
-       val = jme->reg_rxmcs | RXMCS_BRDFRAME | RXMCS_UNIFRAME;
+       spin_lock_irqsave(&jme->rxmcs_lock, flags);
+
+       jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
 
         if (netdev->flags & IFF_PROMISC) {
-               val |= RXMCS_ALLFRAME;
+               jme->reg_rxmcs |= RXMCS_ALLFRAME;
        }
         else if (netdev->flags & IFF_ALLMULTI) {
-               val |= RXMCS_ALLMULFRAME;
+               jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
        }
        else if(netdev->flags & IFF_MULTICAST) {
                struct dev_mc_list *mclist;
                int bit_nr;
 
-               val |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
+               jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
                for (i = 0, mclist = netdev->mc_list;
                        mclist && i < netdev->mc_count;
                        ++i, mclist = mclist->next) {
@@ -1325,14 +1480,14 @@ jme_set_multi(struct net_device *netdev)
                jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
        }
 
-
        wmb();
-       jwrite32(jme, JME_RXMCS, val);
-       spin_unlock(&jme->phy_lock);
+       jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+
+       spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
 }
 
 static int
-jme_change_mtu(struct net_device *dev, int new_mtu)
+jme_change_mtu(struct net_device *netdev, int new_mtu)
 {
        /*
         * Not supporting MTU change for now.
@@ -1340,6 +1495,18 @@ jme_change_mtu(struct net_device *dev, int new_mtu)
        return -EINVAL;
 }
 
+static void
+jme_tx_timeout(struct net_device *netdev)
+{
+        struct jme_adapter *jme = netdev_priv(netdev);
+
+       /*
+        * Reset the link
+        * And the link change will reinitiallize all RX/TX resources
+        */
+       jme_restart_an(jme);
+}
+
 static void
 jme_get_drvinfo(struct net_device *netdev,
                     struct ethtool_drvinfo *info)
@@ -1351,12 +1518,146 @@ jme_get_drvinfo(struct net_device *netdev,
         strcpy(info->bus_info, pci_name(jme->pdev));
 }
 
+static int
+jme_get_regs_len(struct net_device *netdev)
+{
+       return 0x400;
+}
+
+static void
+mmapio_memcpy(struct jme_adapter *jme, __u32 *p, __u32 reg, int len)
+{
+       int i;
+
+       for(i = 0 ; i < len ; i += 4)
+               p[i>>2] = jread32(jme, reg + i);
+
+}
+
+static void
+jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
+{
+        struct jme_adapter *jme = netdev_priv(netdev);
+       __u32 *p32 = (__u32*)p;
+
+       memset(p, 0, 0x400);
+
+       regs->version = 1;
+       mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
+
+       p32 += 0x100 >> 2;
+       mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
+
+       p32 += 0x100 >> 2;
+       mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
+
+       p32 += 0x100 >> 2;
+       mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
+
+}
+
+static int
+jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
+{
+       struct jme_adapter *jme = netdev_priv(netdev);
+
+       ecmd->use_adaptive_rx_coalesce = true;
+       ecmd->tx_coalesce_usecs = PCC_TX_TO;
+       ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
+
+       switch(jme->dpi.cur) {
+       case PCC_P1:
+               ecmd->rx_coalesce_usecs = PCC_P1_TO;
+               ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
+               break;
+       case PCC_P2:
+               ecmd->rx_coalesce_usecs = PCC_P2_TO;
+               ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
+               break;
+       case PCC_P3:
+               ecmd->rx_coalesce_usecs = PCC_P3_TO;
+               ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static void
+jme_get_pauseparam(struct net_device *netdev,
+                       struct ethtool_pauseparam *ecmd)
+{
+       struct jme_adapter *jme = netdev_priv(netdev);
+       unsigned long flags;
+       __u32 val;
+
+       ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
+       ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
+
+       spin_lock_irqsave(&jme->phy_lock, flags);
+        val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
+       spin_unlock_irqrestore(&jme->phy_lock, flags);
+       ecmd->autoneg = (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
+}
+
+static int
+jme_set_pauseparam(struct net_device *netdev,
+                       struct ethtool_pauseparam *ecmd)
+{
+       struct jme_adapter *jme = netdev_priv(netdev);
+       unsigned long flags;
+       __u32 val;
+
+       if( ((jme->reg_txpfc & TXPFC_PF_EN) != 0) !=
+               (ecmd->tx_pause != 0)) {
+
+               if(ecmd->tx_pause)
+                       jme->reg_txpfc |= TXPFC_PF_EN;
+               else
+                       jme->reg_txpfc &= ~TXPFC_PF_EN;
+
+               jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
+       }
+
+       spin_lock_irqsave(&jme->rxmcs_lock, flags);
+       if( ((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) !=
+               (ecmd->rx_pause != 0)) {
+
+               if(ecmd->rx_pause)
+                       jme->reg_rxmcs |= RXMCS_FLOWCTRL;
+               else
+                       jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
+
+               jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+       }
+       spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
+
+       spin_lock_irqsave(&jme->phy_lock, flags);
+        val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
+       if( ((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) != 
+               (ecmd->autoneg != 0)) {
+
+               if(ecmd->autoneg)
+                       val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+               else
+                       val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+               jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE, val);
+       }
+       spin_unlock_irqrestore(&jme->phy_lock, flags);
+
+       return 0;
+}
+
 static int
 jme_get_settings(struct net_device *netdev,
                     struct ethtool_cmd *ecmd)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
        int rc;
+
        spin_lock(&jme->phy_lock);
        rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
        spin_unlock(&jme->phy_lock);
@@ -1369,9 +1670,15 @@ jme_set_settings(struct net_device *netdev,
 {
        struct jme_adapter *jme = netdev_priv(netdev);
        int rc;
-       spin_lock(&jme->phy_lock);
+       unsigned long flags;
+
+       if(ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
+               return -EINVAL;
+
+       spin_lock_irqsave(&jme->phy_lock, flags);
        rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
-       spin_unlock(&jme->phy_lock);
+       spin_unlock_irqrestore(&jme->phy_lock, flags);
+
        return rc;
 }
 
@@ -1382,11 +1689,64 @@ jme_get_link(struct net_device *netdev)
        return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
 }
 
+static u32
+jme_get_rx_csum(struct net_device *netdev)
+{
+        struct jme_adapter *jme = netdev_priv(netdev);
+
+       return jme->reg_rxmcs & RXMCS_CHECKSUM;
+}
+
+static int
+jme_set_rx_csum(struct net_device *netdev, u32 on)
+{
+        struct jme_adapter *jme = netdev_priv(netdev);
+       unsigned long flags;
+       
+       spin_lock_irqsave(&jme->rxmcs_lock, flags);
+       if(on)
+               jme->reg_rxmcs |= RXMCS_CHECKSUM;
+       else
+               jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
+       jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+       spin_unlock_irqrestore(&jme->rxmcs_lock, flags);
+
+       return 0;
+}
+
+static int
+jme_set_tx_csum(struct net_device *netdev, u32 on)
+{
+       if(on)
+               netdev->features |= NETIF_F_HW_CSUM;
+       else
+               netdev->features &= ~NETIF_F_HW_CSUM;
+
+       return 0;
+}
+
+static int
+jme_nway_reset(struct net_device *netdev)
+{
+        struct jme_adapter *jme = netdev_priv(netdev);
+       jme_restart_an(jme);
+       return 0;
+}
+
 static const struct ethtool_ops jme_ethtool_ops = {
         .get_drvinfo            = jme_get_drvinfo,
+       .get_regs_len           = jme_get_regs_len,
+       .get_regs               = jme_get_regs,
+       .get_coalesce           = jme_get_coalesce,
+        .get_pauseparam                = jme_get_pauseparam,
+        .set_pauseparam                = jme_set_pauseparam,
        .get_settings           = jme_get_settings,
        .set_settings           = jme_set_settings,
        .get_link               = jme_get_link,
+       .get_rx_csum            = jme_get_rx_csum,
+       .set_rx_csum            = jme_set_rx_csum,
+       .set_tx_csum            = jme_set_tx_csum,
+       .nway_reset             = jme_nway_reset,
 };
 
 static int
@@ -1396,6 +1756,10 @@ jme_pci_dma64(struct pci_dev *pdev)
                 if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
                        return 1;
 
+        if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
+                if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
+                       return 1;
+
         if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
                 if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
                        return 0;
@@ -1457,10 +1821,12 @@ jme_init_one(struct pci_dev *pdev,
        netdev->set_multicast_list      = jme_set_multi;
        netdev->change_mtu              = jme_change_mtu;
        netdev->ethtool_ops             = &jme_ethtool_ops;
+       netdev->tx_timeout              = jme_tx_timeout;
+       netdev->watchdog_timeo          = TX_TIMEOUT;
        NETDEV_GET_STATS(netdev, &jme_get_stats);
-
+       netdev->features                =       NETIF_F_HW_CSUM;
        if(using_dac)
-               netdev->features        = NETIF_F_HIGHDMA;
+               netdev->features        |=      NETIF_F_HIGHDMA;
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
        pci_set_drvdata(pdev, netdev);
@@ -1471,7 +1837,7 @@ jme_init_one(struct pci_dev *pdev,
        jme = netdev_priv(netdev);
        jme->pdev = pdev;
        jme->dev = netdev;
-       jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
+       jme->phylink = 0;
        jme->regs = ioremap(pci_resource_start(pdev, 0),
                             pci_resource_len(pdev, 0));
        if (!(jme->regs)) {
@@ -1486,9 +1852,16 @@ jme_init_one(struct pci_dev *pdev,
                goto err_out_unmap;
        }
 
-       spin_lock_init(&jme->rx_lock);
        spin_lock_init(&jme->tx_lock);
        spin_lock_init(&jme->phy_lock);
+       spin_lock_init(&jme->macaddr_lock);
+       spin_lock_init(&jme->rxmcs_lock);
+
+       atomic_set(&jme->intr_sem, 1);
+       atomic_set(&jme->link_changing, 1);
+       atomic_set(&jme->rx_cleaning, 1);
+       atomic_set(&jme->tx_cleaning, 1);
+
        tasklet_init(&jme->linkch_task,
                     &jme_link_change_tasklet,
                     (unsigned long) jme);
@@ -1498,12 +1871,37 @@ jme_init_one(struct pci_dev *pdev,
        tasklet_init(&jme->rxclean_task,
                     &jme_rx_clean_tasklet,
                     (unsigned long) jme);
+       tasklet_init(&jme->rxempty_task,
+                    &jme_rx_empty_tasklet,
+                    (unsigned long) jme);
        jme->mii_if.dev = netdev;
        jme->mii_if.phy_id = 1;
        jme->mii_if.supports_gmii = 1;
        jme->mii_if.mdio_read = jme_mdio_read;
        jme->mii_if.mdio_write = jme_mdio_write;
 
+       jme->dpi.cur = PCC_P1;
+
+       jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
+       jme->reg_rxmcs = RXMCS_DEFAULT;
+       jme->reg_txpfc = 0;
+       /*
+        * Get Max Read Req Size from PCI Config Space
+        */
+       pci_read_config_byte(pdev, PCI_CONF_DCSR_MRRS, &jme->mrrs);
+       switch(jme->mrrs) {
+               case MRRS_128B:
+                       jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
+                       break;
+               case MRRS_256B:
+                       jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
+                       break;
+               default:
+                       jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
+                       break;
+       };
+
+
        /*
         * Reset MAC processor and reload EEPROM for MAC Address
         */
@@ -1535,16 +1933,13 @@ jme_init_one(struct pci_dev *pdev,
        }
 
        jprintk(netdev->name,
-               "JMC250 gigabit eth at %llx, "
-               "%02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n",
-               (unsigned long long) pci_resource_start(pdev, 0),
+               "JMC250 gigabit eth %02x:%02x:%02x:%02x:%02x:%02x\n",
                netdev->dev_addr[0],
                netdev->dev_addr[1],
                netdev->dev_addr[2],
                netdev->dev_addr[3],
                netdev->dev_addr[4],
-               netdev->dev_addr[5],
-               pdev->irq);
+               netdev->dev_addr[5]);
 
        return 0;