]> bbs.cooldavid.org Git - jme.git/blobdiff - jme.c
Import jme 0.8 source
[jme.git] / jme.c
diff --git a/jme.c b/jme.c
index aead26ea3a4a4db30d74e4783be52b1f58d728bf..986db28b1cba3d5de79c558fe1c8b5ed97b78ba2 100644 (file)
--- a/jme.c
+++ b/jme.c
  *
  */
 
+/*
+ * Note:
+ *     Backdoor for changing "FIFO Threshold for processing next packet"
+ *        Using:
+ *             ethtool -C eth1 adaptive-rx on adaptive-tx on \
+ *             rx-usecs 250 rx-frames-low N
+ *        N := 16 | 32 | 64 | 128
+ */
+
 /*
  * Timeline before release:
- *     Stage 4: Basic feature support.
+ *     Stage 5: Advanced offloading support.
+ *     0.9:
  *     -  Implement scatter-gather offloading.
  *        Use pci_map_page on scattered sk_buff for HIGHMEM support
- *     -  Implement Power Managemt related functions.
- *     -  Implement Jumboframe.
- *     -  Implement MSI.
- *
- *     Stage 5: Advanced offloading support.
- *     -  Implement VLAN offloading.
  *     -  Implement TCP Segement offloading.
+ *             Due to TX FIFO size, we should turn off tso when mtu > 1500.
  *
  *     Stage 6: CPU Load balancing.
+ *     1.0:
  *     -  Implement MSI-X.
  *        Along with multiple RX queue, for CPU load balancing.
  *
 #include <linux/mii.h>
 #include <linux/crc32.h>
 #include <linux/delay.h>
+#include <linux/spinlock.h>
 #include <linux/in.h>
 #include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_vlan.h>
 #include "jme.h"
 
 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
@@ -83,7 +94,7 @@ jme_mdio_read(struct net_device *netdev, int phy, int reg)
                                smi_reg_addr(reg));
 
        wmb();
-        for (i = JME_PHY_TIMEOUT; i > 0; --i) {
+        for (i = JME_PHY_TIMEOUT ; i > 0 ; --i) {
                udelay(1);
                if (((val = jread32(jme, JME_SMI)) & SMI_OP_REQ) == 0)
                        break;
@@ -165,8 +176,9 @@ jme_reset_mac_processor(struct jme_adapter *jme)
 __always_inline static void
 jme_clear_pm(struct jme_adapter *jme)
 {
-       jwrite32(jme, JME_PMCS, 0xFFFF0000);
+       jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
        pci_set_power_state(jme->pdev, PCI_D0);
+       pci_enable_wake(jme->pdev, PCI_D0, false);
 }
 
 static int
@@ -255,10 +267,6 @@ jme_start_irq(struct jme_adapter *jme)
        register struct dynpcc_info *dpi = &(jme->dpi);
 
        jme_set_rx_pcc(jme, PCC_P1);
-
-       dpi->check_point        = jiffies + PCC_INTERVAL;
-       dpi->last_bytes         = NET_STAT(jme).rx_bytes;
-       dpi->last_pkts          = NET_STAT(jme).rx_packets;
        dpi->cur                = PCC_P1;
        dpi->attempt            = PCC_P1;
        dpi->cnt                = 0;
@@ -304,7 +312,7 @@ jme_check_link(struct net_device *netdev, int testonly)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
        __u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
-       char linkmsg[32];
+       char linkmsg[64];
        int rc = 0;
 
        phylink = jread32(jme, JME_PHY_LINK);
@@ -321,6 +329,7 @@ jme_check_link(struct net_device *netdev, int testonly)
                                                jme->mii_if.phy_id,
                                                MII_BMCR);
 
+
                        phylink |= ((bmcr & BMCR_SPEED1000) &&
                                        (bmcr & BMCR_SPEED100) == 0) ?
                                        PHY_LINK_SPEED_1000M :
@@ -330,6 +339,8 @@ jme_check_link(struct net_device *netdev, int testonly)
 
                        phylink |= (bmcr & BMCR_FULLDPLX) ?
                                         PHY_LINK_DUPLEX : 0;
+
+                       strcpy(linkmsg, "Forced: ");
                }
                else {
                        /*
@@ -346,6 +357,8 @@ jme_check_link(struct net_device *netdev, int testonly)
                        if(!cnt)
                                jeprintk(netdev->name,
                                        "Waiting speed resolve timeout.\n");
+
+                       strcpy(linkmsg, "ANed: ");
                }
 
                if(jme->phylink == phylink) {
@@ -427,19 +440,59 @@ jme_alloc_txdesc(struct jme_adapter *jme,
 
        idx = txring->next_to_use;
 
-       if(unlikely(txring->nr_free < nr_alloc))
+       if(unlikely(atomic_read(&txring->nr_free) < nr_alloc))
                return -1;
 
-       spin_lock(&jme->tx_lock);
-       txring->nr_free -= nr_alloc;
+       atomic_sub(nr_alloc, &txring->nr_free);
 
        if((txring->next_to_use += nr_alloc) >= RING_DESC_NR)
                txring->next_to_use -= RING_DESC_NR;
-       spin_unlock(&jme->tx_lock);
 
        return idx;
 }
 
+static void
+jme_tx_csum(struct sk_buff *skb, unsigned mtu, __u8 *flags)
+{
+       if(skb->ip_summed == CHECKSUM_PARTIAL) {
+               __u8 ip_proto;
+
+               switch (skb->protocol) {
+               case __constant_htons(ETH_P_IP):
+                       ip_proto = ip_hdr(skb)->protocol;
+                       break;
+               case __constant_htons(ETH_P_IPV6):
+                       ip_proto = ipv6_hdr(skb)->nexthdr;
+                       break;
+               default:
+                       ip_proto = 0;
+                       break;
+               }
+
+
+               switch(ip_proto) {
+               case IPPROTO_TCP:
+                       *flags |= TXFLAG_TCPCS;
+                       break;
+               case IPPROTO_UDP:
+                       *flags |= TXFLAG_UDPCS;
+                       break;
+               default:
+                       jeprintk("jme", "Error upper layer protocol.\n");
+                       break;
+               }
+       }
+}
+
+__always_inline static void
+jme_tx_vlan(struct sk_buff *skb, volatile __u16 *vlan, __u8 *flags)
+{
+       if(vlan_tx_tag_present(skb)) {
+               *flags |= TXFLAG_TAGON;
+               *vlan = vlan_tx_tag_get(skb);
+       }
+}
+
 static int
 jme_set_new_txdesc(struct jme_adapter *jme,
                        struct sk_buff *skb)
@@ -502,20 +555,8 @@ jme_set_new_txdesc(struct jme_adapter *jme,
         */
        wmb();
        flags = TXFLAG_OWN | TXFLAG_INT; 
-       if(skb->ip_summed == CHECKSUM_PARTIAL) {
-               //flags |= TXFLAG_IPCS;
-
-               switch(ip_hdr(skb)->protocol) {
-               case IPPROTO_TCP:
-                       flags |= TXFLAG_TCPCS;
-                       break;
-               case IPPROTO_UDP:
-                       flags |= TXFLAG_UDPCS;
-                       break;
-               default:
-                       break;
-               }
-       }
+       jme_tx_csum(skb, jme->dev->mtu, &flags);
+       jme_tx_vlan(skb, &(ctxdesc->desc1.vlan), &flags);
        ctxdesc->desc1.flags = flags;
        /*
         * Set tx buffer info after telling NIC to send
@@ -556,7 +597,7 @@ jme_setup_tx_resources(struct jme_adapter *jme)
        txring->dma             = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
        txring->next_to_use     = 0;
        txring->next_to_clean   = 0;
-       txring->nr_free         = RING_DESC_NR;
+       atomic_set(&txring->nr_free, RING_DESC_NR);
 
        /*
         * Initiallize Transmit Descriptors
@@ -599,7 +640,7 @@ jme_free_tx_resources(struct jme_adapter *jme)
        }
        txring->next_to_use     = 0;
        txring->next_to_clean   = 0;
-       txring->nr_free         = 0;
+       atomic_set(&txring->nr_free, 0);
 
 }
 
@@ -633,6 +674,17 @@ jme_enable_tx_engine(struct jme_adapter *jme)
 
 }
 
+__always_inline static void
+jme_restart_tx_engine(struct jme_adapter *jme)
+{
+       /*
+        * Restart TX Engine
+        */
+       jwrite32(jme, JME_TXCS, jme->reg_txcs |
+                               TXCS_SELECT_QUEUE0 |
+                               TXCS_ENABLE);
+}
+
 __always_inline static void
 jme_disable_tx_engine(struct jme_adapter *jme)
 {
@@ -688,7 +740,8 @@ jme_make_new_rx_buf(struct jme_adapter *jme, int i)
        unsigned long offset;
        struct sk_buff* skb;
 
-       skb = netdev_alloc_skb(jme->dev, RX_BUF_ALLOC_SIZE);
+       skb = netdev_alloc_skb(jme->dev,
+               jme->dev->mtu + RX_EXTRA_LEN);
        if(unlikely(!skb))
                return -ENOMEM;
 
@@ -702,7 +755,7 @@ jme_make_new_rx_buf(struct jme_adapter *jme, int i)
 
        if(unlikely(offset =
                        (unsigned long)(skb->data)
-                       & (unsigned long)(RX_BUF_DMA_ALIGN - 1)))
+                       & ((unsigned long)RX_BUF_DMA_ALIGN - 1)))
                skb_reserve(skb, RX_BUF_DMA_ALIGN - offset);
 
        rxbi += i;
@@ -823,7 +876,7 @@ jme_enable_rx_engine(struct jme_adapter *jme)
         * Enable RX Engine
         */
        wmb();
-       jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
+       jwrite32(jme, JME_RXCS, jme->reg_rxcs |
                                RXCS_QUEUESEL_Q0 |
                                RXCS_ENABLE |
                                RXCS_QST);
@@ -835,7 +888,7 @@ jme_restart_rx_engine(struct jme_adapter *jme)
        /*
         * Start RX Engine
         */
-       jwrite32(jme, JME_RXCS, RXCS_DEFAULT |
+       jwrite32(jme, JME_RXCS, jme->reg_rxcs |
                                RXCS_QUEUESEL_Q0 |
                                RXCS_ENABLE |
                                RXCS_QST);
@@ -851,14 +904,12 @@ jme_disable_rx_engine(struct jme_adapter *jme)
        /*
         * Disable RX Engine
         */
-       val = jread32(jme, JME_RXCS);
-       val &= ~RXCS_ENABLE;
-       jwrite32(jme, JME_RXCS, val);
+       jwrite32(jme, JME_RXCS, jme->reg_rxcs);
 
        val = jread32(jme, JME_RXCS);
        for(i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i)
        {
-               mdelay(100);
+               mdelay(1);
                val = jread32(jme, JME_RXCS);
        }
 
@@ -867,47 +918,6 @@ jme_disable_rx_engine(struct jme_adapter *jme)
 
 }
 
-static void
-jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
-{
-       if(dpi->attempt == atmp) {
-               ++(dpi->cnt);
-       }
-       else {
-               dpi->attempt = atmp;
-               dpi->cnt = 0;
-       }
-}
-
-static void
-jme_dynamic_pcc(struct jme_adapter *jme)
-{
-       register struct dynpcc_info *dpi = &(jme->dpi);
-
-       if(jiffies >= dpi->check_point) {
-               if(jiffies > (dpi->check_point + PCC_INTERVAL))
-                       jme_attempt_pcc(dpi, PCC_P1);
-               else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
-                                                       PCC_P3_THRESHOLD)
-                       jme_attempt_pcc(dpi, PCC_P3);
-               else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) >
-                                                       PCC_P2_THRESHOLD)
-                       jme_attempt_pcc(dpi, PCC_P2);
-               else
-                       jme_attempt_pcc(dpi, PCC_P1);
-
-               if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
-                       jme_set_rx_pcc(jme, dpi->attempt);
-                       dpi->cur = dpi->attempt;
-                       dpi->cnt = 0;
-               }
-
-               dpi->last_bytes = NET_STAT(jme).rx_bytes;
-               dpi->last_pkts  = NET_STAT(jme).rx_packets;
-               dpi->check_point = jiffies + PCC_INTERVAL;
-       }
-}
-
 static void
 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
 {
@@ -942,10 +952,19 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
                skb_put(skb, framesize);
                skb->protocol = eth_type_trans(skb, jme->dev);
 
-               if(jme->reg_rxmcs & RXMCS_CHECKSUM)
+               if((rxdesc->descwb.flags &
+                                       (RXWBFLAG_TCPON |
+                                       RXWBFLAG_UDPON |
+                                       RXWBFLAG_IPV4)))
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
+               else
+                       skb->ip_summed = CHECKSUM_NONE;
 
-               netif_rx(skb);
+               if(jme->vlgrp && (rxdesc->descwb.flags & RXWBFLAG_TAGON))
+                       vlan_hwaccel_rx(skb, jme->vlgrp,
+                                       le32_to_cpu(rxdesc->descwb.vlan));
+               else
+                       netif_rx(skb);
 
                if(le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST_MUL)
                        ++(NET_STAT(jme).multicast);
@@ -957,22 +976,25 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
 
        jme_set_clean_rxdesc(jme, idx);
 
-       jme_dynamic_pcc(jme);
-
 }
 
 static int
 jme_rxsum_bad(struct jme_adapter *jme, __u16 flags)
 {
-       if(jme->reg_rxmcs & RXMCS_CHECKSUM) {
-               return  ((flags & RXWBFLAG_IPV4) && 
-                               !(flags & RXWBFLAG_IPCS)) ||
-                       ((flags & RXWBFLAG_IPV6) && 
-                               !(flags & RXWBFLAG_IPCS)) ||
-                       ((flags & RXWBFLAG_TCPON) && 
-                               !(flags & RXWBFLAG_TCPCS)) ||
-                       ((flags & RXWBFLAG_UDPON) && 
-                               !(flags & RXWBFLAG_UDPCS));
+       if(unlikely((flags & RXWBFLAG_TCPON) &&
+       !(flags & RXWBFLAG_TCPCS))) {
+               csum_dbg(jme->dev->name, "TCP Checksum error.\n");
+               return 1;
+       }
+       else if(unlikely((flags & RXWBFLAG_UDPON) &&
+       !(flags & RXWBFLAG_UDPCS))) {
+               csum_dbg(jme->dev->name, "UDP Checksum error.\n");
+               return 1;
+       }
+       else if(unlikely((flags & RXWBFLAG_IPV4) &&
+       !(flags & RXWBFLAG_IPCS))) {
+               csum_dbg(jme->dev->name, "IPV4 Checksum error.\n");
+               return 1;
        }
        else {
                return 0;
@@ -1011,8 +1033,13 @@ jme_process_receive(struct jme_adapter *jme, int limit)
                        else
                                ++(NET_STAT(jme).rx_errors);
 
-                       if(desccnt > 1)
+                       if(desccnt > 1) {
+                               rx_dbg(jme->dev->name,
+                                       "RX: More than one(%d) descriptor, "
+                                       "framelen=%d\n",
+                                       desccnt, le16_to_cpu(rxdesc->descwb.framesize));
                                limit -= desccnt - 1;
+                       }
 
                        for(j = i, ccnt = desccnt ; ccnt-- ; ) {
                                jme_set_clean_rxdesc(jme, j);
@@ -1026,7 +1053,6 @@ jme_process_receive(struct jme_adapter *jme, int limit)
                        jme_alloc_and_feed_skb(jme, i);
                }
 
-
                if((i += desccnt) >= RING_DESC_NR)
                        i -= RING_DESC_NR;
        }
@@ -1043,6 +1069,77 @@ out:
 
 }
 
+static void
+jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
+{
+       if(likely(atmp == dpi->cur))
+               return;
+
+       if(dpi->attempt == atmp) {
+               ++(dpi->cnt);
+       }
+       else {
+               dpi->attempt = atmp;
+               dpi->cnt = 0;
+       }
+
+}
+
+static void
+jme_dynamic_pcc(struct jme_adapter *jme)
+{
+       register struct dynpcc_info *dpi = &(jme->dpi);
+
+       if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
+               jme_attempt_pcc(dpi, PCC_P3);
+       else if((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P2_THRESHOLD
+       || dpi->intr_cnt > PCC_INTR_THRESHOLD)
+               jme_attempt_pcc(dpi, PCC_P2);
+       else
+               jme_attempt_pcc(dpi, PCC_P1);
+
+       if(unlikely(dpi->attempt != dpi->cur && dpi->cnt > 20)) {
+               jme_set_rx_pcc(jme, dpi->attempt);
+               dpi->cur = dpi->attempt;
+               dpi->cnt = 0;
+       }
+}
+
+static void
+jme_start_pcc_timer(struct jme_adapter *jme)
+{
+       struct dynpcc_info *dpi = &(jme->dpi);
+       dpi->last_bytes         = NET_STAT(jme).rx_bytes;
+       dpi->last_pkts          = NET_STAT(jme).rx_packets;
+       dpi->intr_cnt           = 0;
+       jwrite32(jme, JME_TMCSR,
+               TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
+}
+
+static void
+jme_stop_pcc_timer(struct jme_adapter *jme)
+{
+       jwrite32(jme, JME_TMCSR, 0);
+}
+
+static void
+jme_pcc_tasklet(unsigned long arg)
+{
+       struct jme_adapter *jme = (struct jme_adapter*)arg;
+       struct net_device *netdev = jme->dev;
+
+
+       if(unlikely(netif_queue_stopped(netdev) ||
+               (atomic_read(&jme->link_changing) != 1)
+       )) {
+               jme_stop_pcc_timer(jme);
+               return;
+       }
+
+       jme_dynamic_pcc(jme);
+       jme_start_pcc_timer(jme);
+}
+
 static void
 jme_link_change_tasklet(unsigned long arg)
 {
@@ -1054,9 +1151,10 @@ jme_link_change_tasklet(unsigned long arg)
        if(!atomic_dec_and_test(&jme->link_changing))
                goto out;
 
-       if(jme_check_link(netdev, 1))
+       if(jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
                goto out;
 
+       jme->old_mtu = netdev->mtu;
        netif_stop_queue(netdev);
 
        while(--timeout > 0 &&
@@ -1069,6 +1167,7 @@ jme_link_change_tasklet(unsigned long arg)
        }
 
        if(netif_carrier_ok(netdev)) {
+               jme_stop_pcc_timer(jme);
                jme_reset_mac_processor(jme);
                jme_free_rx_resources(jme);
                jme_free_tx_resources(jme);
@@ -1097,6 +1196,7 @@ jme_link_change_tasklet(unsigned long arg)
                jme_enable_tx_engine(jme);
 
                netif_start_queue(netdev);
+               jme_start_pcc_timer(jme);
        }
 
        goto out;
@@ -1111,17 +1211,19 @@ static void
 jme_rx_clean_tasklet(unsigned long arg)
 {
        struct jme_adapter *jme = (struct jme_adapter*)arg;
+       struct dynpcc_info *dpi = &(jme->dpi);
 
-       if(!atomic_dec_and_test(&jme->rx_cleaning))
+       if(unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
                goto out;
-       
-       if(atomic_read(&jme->link_changing) != 1)
+
+       if(unlikely(atomic_read(&jme->link_changing) != 1))
                goto out;
 
        if(unlikely(netif_queue_stopped(jme->dev)))
                goto out;
 
        jme_process_receive(jme, RING_DESC_NR);
+       ++(dpi->intr_cnt);
 
 out:
        atomic_inc(&jme->rx_cleaning);
@@ -1132,12 +1234,14 @@ jme_rx_empty_tasklet(unsigned long arg)
 {
        struct jme_adapter *jme = (struct jme_adapter*)arg;
 
-       if(atomic_read(&jme->link_changing) != 1)
+       if(unlikely(atomic_read(&jme->link_changing) != 1))
                return;
 
        if(unlikely(netif_queue_stopped(jme->dev)))
                return;
 
+       queue_dbg(jme->dev->name, "RX Queue empty!\n");
+
        jme_rx_clean_tasklet(arg);
        jme_restart_rx_engine(jme);
 }
@@ -1151,18 +1255,16 @@ jme_tx_clean_tasklet(unsigned long arg)
        struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
        int i, j, cnt = 0, max, err;
 
-       if(!atomic_dec_and_test(&jme->tx_cleaning))
+       if(unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
                goto out;
 
-       if(atomic_read(&jme->link_changing) != 1)
+       if(unlikely(atomic_read(&jme->link_changing) != 1))
                goto out;
 
        if(unlikely(netif_queue_stopped(jme->dev)))
                goto out;
 
-       spin_lock(&jme->tx_lock);
-       max = RING_DESC_NR - txring->nr_free;
-       spin_unlock(&jme->tx_lock);
+       max = RING_DESC_NR - atomic_read(&txring->nr_free);
 
        tx_dbg(jme->dev->name, "Tx Tasklet: In\n");
 
@@ -1227,65 +1329,33 @@ jme_tx_clean_tasklet(unsigned long arg)
                i, jiffies);
        txring->next_to_clean = i;
 
-       spin_lock(&jme->tx_lock);
-       txring->nr_free += cnt;
-       spin_unlock(&jme->tx_lock);
+       atomic_add(cnt, &txring->nr_free);
 
 out:
        atomic_inc(&jme->tx_cleaning);
 }
 
-static irqreturn_t
-jme_intr(int irq, void *dev_id)
+static void
+jme_intr_msi(struct jme_adapter *jme, __u32 intrstat)
 {
-        struct net_device *netdev = dev_id;
-        struct jme_adapter *jme = netdev_priv(netdev);
-       irqreturn_t rc = IRQ_HANDLED;
-       __u32 intrstat;
-
-#if USE_IEVE_SHADOW
-       pci_dma_sync_single_for_cpu(jme->pdev,
-                                   jme->shadow_dma,
-                                   sizeof(__u32) * SHADOW_REG_NR,
-                                   PCI_DMA_FROMDEVICE);
-       intrstat = jme->shadow_regs[SHADOW_IEVE];
-       jme->shadow_regs[SHADOW_IEVE] = 0;
-#else
-       intrstat = jread32(jme, JME_IEVE);
-#endif
-
-       /*
-        * Check if it's really an interrupt for us
-        */
-        if(intrstat == 0) {
-               rc = IRQ_NONE;
-               goto out;
-       }
-
-       /*
-        * Check if the device still exist
-        */
-       if(unlikely(intrstat == ~((typeof(intrstat))0))) {
-                rc = IRQ_NONE;
-               goto out;
-       }
-
        /*
-        * Allow one interrupt handling at a time
+        * Disable interrupt
         */
-        if(unlikely(!atomic_dec_and_test(&jme->intr_sem)))
-               goto out_inc;
+       jwrite32f(jme, JME_IENC, INTR_ENABLE);
 
        /*
-        * Disable interrupt
+        * Write 1 clear interrupt status
         */
-       jwrite32f(jme, JME_IENC, INTR_ENABLE);
+       jwrite32f(jme, JME_IEVE, intrstat);
 
-       if(intrstat & INTR_LINKCH) {
+       if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
                tasklet_schedule(&jme->linkch_task);
-               goto out_deassert;
+               goto out_reenable;
        }
 
+       if(intrstat & INTR_TMINTR)
+               tasklet_schedule(&jme->pcc_task);
+
        if(intrstat & INTR_RX0EMP)
                tasklet_schedule(&jme->rxempty_task);
 
@@ -1302,35 +1372,122 @@ jme_intr(int irq, void *dev_id)
                 */
        }
 
-out_deassert:
+out_reenable:
        /*
-        * Deassert interrupts
+        * Re-enable interrupt
         */
-       jwrite32f(jme, JME_IEVE, intrstat);
+       jwrite32f(jme, JME_IENS, INTR_ENABLE);
+
+
+}
+
+static irqreturn_t
+jme_intr(int irq, void *dev_id)
+{
+        struct net_device *netdev = dev_id;
+        struct jme_adapter *jme = netdev_priv(netdev);
+       __u32 intrstat;
+
+       intrstat = jread32(jme, JME_IEVE);
 
        /*
-        * Re-enable interrupt
+        * Check if it's really an interrupt for us
         */
-       jwrite32f(jme, JME_IENS, INTR_ENABLE);
+        if(unlikely(intrstat == 0))
+               return IRQ_NONE;
 
-out_inc:
        /*
-        * Enable next interrupt handling
+        * Check if the device still exist
         */
-       atomic_inc(&jme->intr_sem);
+       if(unlikely(intrstat == ~((typeof(intrstat))0)))
+                return IRQ_NONE;
 
-out:
-        return rc;
+       jme_intr_msi(jme, intrstat);
+
+        return IRQ_HANDLED;
+}
+
+static irqreturn_t
+jme_msi(int irq, void *dev_id)
+{
+        struct net_device *netdev = dev_id;
+        struct jme_adapter *jme = netdev_priv(netdev);
+       __u32 intrstat;
+
+       pci_dma_sync_single_for_cpu(jme->pdev,
+                                   jme->shadow_dma,
+                                   sizeof(__u32) * SHADOW_REG_NR,
+                                   PCI_DMA_FROMDEVICE);
+       intrstat = jme->shadow_regs[SHADOW_IEVE];
+       jme->shadow_regs[SHADOW_IEVE] = 0;
+
+       jme_intr_msi(jme, intrstat);
+
+        return IRQ_HANDLED;
+}
+
+
+static void
+jme_reset_link(struct jme_adapter *jme)
+{
+       jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
 }
 
 static void
 jme_restart_an(struct jme_adapter *jme)
 {
        __u32 bmcr;
+       unsigned long flags;
 
+       spin_lock_irqsave(&jme->phy_lock, flags);
        bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
        bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
        jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+       spin_unlock_irqrestore(&jme->phy_lock, flags);
+}
+
+static int
+jme_request_irq(struct jme_adapter *jme)
+{
+       int rc;
+        struct net_device *netdev = jme->dev;
+        irq_handler_t handler = jme_intr;
+        int irq_flags = IRQF_SHARED;
+
+        if (!pci_enable_msi(jme->pdev)) {
+                jme->flags |= JME_FLAG_MSI;
+                handler = jme_msi;
+                irq_flags = 0;
+        }
+
+        rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
+                          netdev);
+        if(rc) {
+                jeprintk(netdev->name,
+                       "Unable to allocate %s interrupt (return: %d)\n",
+                       jme->flags & JME_FLAG_MSI ? "MSI":"INTx", rc);
+
+                if(jme->flags & JME_FLAG_MSI) {
+                        pci_disable_msi(jme->pdev);
+                       jme->flags &= ~JME_FLAG_MSI;
+               }
+        }
+       else {
+               netdev->irq = jme->pdev->irq;
+       }
+
+        return rc;
+}
+
+static void
+jme_free_irq(struct jme_adapter *jme)
+{
+        free_irq(jme->pdev->irq, jme->dev);
+        if (jme->flags & JME_FLAG_MSI) {
+                pci_disable_msi(jme->pdev);
+               jme->flags &= ~JME_FLAG_MSI;
+               jme->dev->irq = jme->pdev->irq;
+        }
 }
 
 static int
@@ -1349,17 +1506,27 @@ jme_open(struct net_device *netdev)
        )
                msleep(10);
 
+       if(!timeout) {
+               rc = -EBUSY;
+               goto err_out;
+       }
+
+       jme_clear_pm(jme);
        jme_reset_mac_processor(jme);
 
-       rc = request_irq(jme->pdev->irq, jme_intr,
-                        IRQF_SHARED, netdev->name, netdev);
-       if(rc) {
-               printk(KERN_ERR PFX "Requesting IRQ error.\n");
+       rc = jme_request_irq(jme);
+       if(rc)
                goto err_out;
-       }
+
        jme_enable_shadow(jme);
        jme_start_irq(jme);
-       jme_restart_an(jme);
+
+       if(jme->flags & JME_FLAG_SSET)
+               jme_set_settings(netdev, &jme->old_ecmd);
+       else
+               jme_reset_phy_processor(jme);
+
+       jme_reset_link(jme);
 
        return 0;
 
@@ -1369,6 +1536,29 @@ err_out:
        return rc;
 }
 
+static void
+jme_set_100m_half(struct jme_adapter *jme)
+{
+       __u32 bmcr, tmp;
+
+       bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+       tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
+                      BMCR_SPEED1000 | BMCR_FULLDPLX);
+       tmp |= BMCR_SPEED100;
+
+       if (bmcr != tmp)
+               jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
+
+       jwrite32(jme, JME_GHC, GHC_SPEED_100M);
+}
+
+static void
+jme_phy_off(struct jme_adapter *jme)
+{
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
+}
+
+
 static int
 jme_close(struct net_device *netdev)
 {
@@ -1379,7 +1569,7 @@ jme_close(struct net_device *netdev)
 
        jme_stop_irq(jme);
        jme_disable_shadow(jme);
-       free_irq(jme->pdev->irq, jme->dev);
+       jme_free_irq(jme);
 
        tasklet_kill(&jme->linkch_task);
        tasklet_kill(&jme->txclean_task);
@@ -1389,6 +1579,18 @@ jme_close(struct net_device *netdev)
        jme_reset_mac_processor(jme);
        jme_free_rx_resources(jme);
        jme_free_tx_resources(jme);
+       jme->phylink = 0;
+
+       if(jme->reg_pmcs) {
+               jme_set_100m_half(jme);
+               pci_enable_wake(jme->pdev, PCI_D0, true);
+               pci_enable_wake(jme->pdev, PCI_D3hot, true);
+               pci_enable_wake(jme->pdev, PCI_D3cold, true);
+               jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+       }
+       else {
+               jme_phy_off(jme);
+       }
 
        return 0;
 }
@@ -1405,6 +1607,16 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        if(unlikely(netif_queue_stopped(jme->dev)))
                return NETDEV_TX_BUSY;
 
+#if 0
+/*Testing*/
+       ("jme", "Frags: %d Headlen: %d Len: %d Sum:%d\n", 
+               skb_shinfo(skb)->nr_frags,
+               skb_headlen(skb),
+               skb->len,
+               skb->ip_summed);
+/*********/
+#endif
+
        rc = jme_set_new_txdesc(jme, skb);
 
        if(unlikely(rc != NETDEV_TX_OK))
@@ -1489,10 +1701,37 @@ jme_set_multi(struct net_device *netdev)
 static int
 jme_change_mtu(struct net_device *netdev, int new_mtu)
 {
-       /*
-        * Not supporting MTU change for now.
-        */
-       return -EINVAL;
+        struct jme_adapter *jme = netdev_priv(netdev);
+
+       if(new_mtu == jme->old_mtu)
+               return 0;
+
+        if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
+                ((new_mtu) < IPV6_MIN_MTU))
+                return -EINVAL;
+
+       if(new_mtu > 4000) {
+               jme->reg_rxcs &= ~RXCS_FIFOTHNP;
+               jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
+               jme_restart_rx_engine(jme);
+       }
+       else {
+               jme->reg_rxcs &= ~RXCS_FIFOTHNP;
+               jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
+               jme_restart_rx_engine(jme);
+       }
+
+       if(new_mtu > 1900) {
+               netdev->features &= ~NETIF_F_HW_CSUM;
+       }
+       else {
+               netdev->features |= NETIF_F_HW_CSUM;
+       }
+
+        netdev->mtu = new_mtu;
+        jme_reset_link(jme);
+
+       return 0;
 }
 
 static void
@@ -1504,7 +1743,16 @@ jme_tx_timeout(struct net_device *netdev)
         * Reset the link
         * And the link change will reinitiallize all RX/TX resources
         */
-       jme_restart_an(jme);
+       jme->phylink = 0;
+       jme_reset_link(jme);
+}
+
+static void
+jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+{
+       struct jme_adapter *jme = netdev_priv(netdev);
+
+       jme->vlgrp = grp;
 }
 
 static void
@@ -1530,7 +1778,7 @@ mmapio_memcpy(struct jme_adapter *jme, __u32 *p, __u32 reg, int len)
        int i;
 
        for(i = 0 ; i < len ; i += 4)
-               p[i>>2] = jread32(jme, reg + i);
+               p[i >> 2] = jread32(jme, reg + i);
 
 }
 
@@ -1585,6 +1833,46 @@ jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
        return 0;
 }
 
+/*
+ * It's not actually for coalesce.
+ * It changes internell FIFO related setting for testing.
+ */
+static int
+jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
+{
+       struct jme_adapter *jme = netdev_priv(netdev);
+
+       if(ecmd->use_adaptive_rx_coalesce &&
+       ecmd->use_adaptive_tx_coalesce &&
+       ecmd->rx_coalesce_usecs == 250 &&
+       (ecmd->rx_max_coalesced_frames_low == 16 ||
+       ecmd->rx_max_coalesced_frames_low == 32 ||
+       ecmd->rx_max_coalesced_frames_low == 64 ||
+       ecmd->rx_max_coalesced_frames_low == 128)) {
+               jme->reg_rxcs &= ~RXCS_FIFOTHNP;
+               switch(ecmd->rx_max_coalesced_frames_low) {
+               case 16:
+                       jme->reg_rxcs |= RXCS_FIFOTHNP_16QW;
+                       break;
+               case 32:
+                       jme->reg_rxcs |= RXCS_FIFOTHNP_32QW;
+                       break;
+               case 64:
+                       jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
+                       break;
+               case 128:
+               default:
+                       jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
+               }
+               jme_restart_rx_engine(jme);
+       }
+       else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static void
 jme_get_pauseparam(struct net_device *netdev,
                        struct ethtool_pauseparam *ecmd)
@@ -1651,16 +1939,60 @@ jme_set_pauseparam(struct net_device *netdev,
        return 0;
 }
 
+static void
+jme_get_wol(struct net_device *netdev,
+               struct ethtool_wolinfo *wol)
+{
+       struct jme_adapter *jme = netdev_priv(netdev);
+
+       wol->supported = WAKE_MAGIC | WAKE_PHY;
+
+       wol->wolopts = 0;
+
+       if(jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+               wol->wolopts |= WAKE_PHY;
+
+       if(jme->reg_pmcs & PMCS_MFEN)
+               wol->wolopts |= WAKE_MAGIC;
+
+}
+
+static int
+jme_set_wol(struct net_device *netdev,
+               struct ethtool_wolinfo *wol)
+{
+       struct jme_adapter *jme = netdev_priv(netdev);
+
+       if(wol->wolopts & (WAKE_MAGICSECURE |
+                               WAKE_UCAST |
+                               WAKE_MCAST |
+                               WAKE_BCAST |
+                               WAKE_ARP))
+               return -EOPNOTSUPP;
+
+       jme->reg_pmcs = 0;
+
+       if(wol->wolopts & WAKE_PHY)
+               jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
+
+       if(wol->wolopts & WAKE_MAGIC)
+               jme->reg_pmcs |= PMCS_MFEN;
+
+
+       return 0;
+}
 static int
 jme_get_settings(struct net_device *netdev,
                     struct ethtool_cmd *ecmd)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
        int rc;
+       unsigned long flags;
 
-       spin_lock(&jme->phy_lock);
+       spin_lock_irqsave(&jme->phy_lock, flags);
        rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
-       spin_unlock(&jme->phy_lock);
+       spin_unlock_irqrestore(&jme->phy_lock, flags);
        return rc;
 }
 
@@ -1669,16 +2001,29 @@ jme_set_settings(struct net_device *netdev,
                     struct ethtool_cmd *ecmd)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
-       int rc;
+       int rc, fdc=0;
        unsigned long flags;
 
        if(ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
                return -EINVAL;
 
+       if(jme->mii_if.force_media &&
+       ecmd->autoneg != AUTONEG_ENABLE &&
+       (jme->mii_if.full_duplex != ecmd->duplex))
+               fdc = 1;
+
        spin_lock_irqsave(&jme->phy_lock, flags);
        rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
        spin_unlock_irqrestore(&jme->phy_lock, flags);
 
+       if(!rc && fdc)
+               jme_reset_link(jme);
+
+       if(!rc) {
+               jme->flags |= JME_FLAG_SSET;
+               jme->old_ecmd = *ecmd;
+       }
+
        return rc;
 }
 
@@ -1717,7 +2062,7 @@ jme_set_rx_csum(struct net_device *netdev, u32 on)
 static int
 jme_set_tx_csum(struct net_device *netdev, u32 on)
 {
-       if(on)
+       if(on && netdev->mtu <= 1900)
                netdev->features |= NETIF_F_HW_CSUM;
        else
                netdev->features &= ~NETIF_F_HW_CSUM;
@@ -1738,8 +2083,11 @@ static const struct ethtool_ops jme_ethtool_ops = {
        .get_regs_len           = jme_get_regs_len,
        .get_regs               = jme_get_regs,
        .get_coalesce           = jme_get_coalesce,
+       .set_coalesce           = jme_set_coalesce,
         .get_pauseparam                = jme_get_pauseparam,
         .set_pauseparam                = jme_set_pauseparam,
+       .get_wol                = jme_get_wol,
+       .set_wol                = jme_set_wol,
        .get_settings           = jme_get_settings,
        .set_settings           = jme_set_settings,
        .get_link               = jme_get_link,
@@ -1753,20 +2101,32 @@ static int
 jme_pci_dma64(struct pci_dev *pdev)
 {
         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
-                if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
+                if(!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
+                       dprintk("jme", "64Bit DMA Selected.\n");
                        return 1;
+               }
 
         if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
-                if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
+                if(!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK)) {
+                       dprintk("jme", "40Bit DMA Selected.\n");
                        return 1;
+               }
 
         if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
-                if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
+                if(!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
+                       dprintk("jme", "32Bit DMA Selected.\n");
                        return 0;
+               }
 
        return -1;
 }
 
+__always_inline static void
+jme_set_phy_ps(struct jme_adapter *jme)
+{
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, 0x00001000);
+}
+
 static int __devinit
 jme_init_one(struct pci_dev *pdev,
             const struct pci_device_id *ent)
@@ -1810,21 +2170,24 @@ jme_init_one(struct pci_dev *pdev,
         */
        netdev = alloc_etherdev(sizeof(*jme));
        if(!netdev) {
+               printk(KERN_ERR PFX "Cannot allocate netdev structure.\n");
                rc = -ENOMEM;
                goto err_out_release_regions;
        }
        netdev->open                    = jme_open;
        netdev->stop                    = jme_close;
        netdev->hard_start_xmit         = jme_start_xmit;
-       netdev->irq                     = pdev->irq;
        netdev->set_mac_address         = jme_set_macaddr;
        netdev->set_multicast_list      = jme_set_multi;
        netdev->change_mtu              = jme_change_mtu;
        netdev->ethtool_ops             = &jme_ethtool_ops;
        netdev->tx_timeout              = jme_tx_timeout;
        netdev->watchdog_timeo          = TX_TIMEOUT;
+       netdev->vlan_rx_register        = jme_vlan_rx_register;
        NETDEV_GET_STATS(netdev, &jme_get_stats);
-       netdev->features                =       NETIF_F_HW_CSUM;
+       netdev->features                =       NETIF_F_HW_CSUM |
+                                               NETIF_F_HW_VLAN_TX |
+                                               NETIF_F_HW_VLAN_RX;
        if(using_dac)
                netdev->features        |=      NETIF_F_HIGHDMA;
 
@@ -1837,10 +2200,12 @@ jme_init_one(struct pci_dev *pdev,
        jme = netdev_priv(netdev);
        jme->pdev = pdev;
        jme->dev = netdev;
+       jme->old_mtu = netdev->mtu = 1500;
        jme->phylink = 0;
        jme->regs = ioremap(pci_resource_start(pdev, 0),
                             pci_resource_len(pdev, 0));
        if (!(jme->regs)) {
+               printk(KERN_ERR PFX "Mapping PCI resource region error.\n");
                rc = -ENOMEM;
                goto err_out_free_netdev;
        }
@@ -1848,20 +2213,22 @@ jme_init_one(struct pci_dev *pdev,
                                                sizeof(__u32) * SHADOW_REG_NR,
                                                &(jme->shadow_dma));
        if (!(jme->shadow_regs)) {
+               printk(KERN_ERR PFX "Allocating shadow register mapping error.\n");
                rc = -ENOMEM;
                goto err_out_unmap;
        }
 
-       spin_lock_init(&jme->tx_lock);
        spin_lock_init(&jme->phy_lock);
        spin_lock_init(&jme->macaddr_lock);
        spin_lock_init(&jme->rxmcs_lock);
 
-       atomic_set(&jme->intr_sem, 1);
        atomic_set(&jme->link_changing, 1);
        atomic_set(&jme->rx_cleaning, 1);
        atomic_set(&jme->tx_cleaning, 1);
 
+       tasklet_init(&jme->pcc_task,
+                    &jme_pcc_tasklet,
+                    (unsigned long) jme);
        tasklet_init(&jme->linkch_task,
                     &jme_link_change_tasklet,
                     (unsigned long) jme);
@@ -1883,8 +2250,10 @@ jme_init_one(struct pci_dev *pdev,
        jme->dpi.cur = PCC_P1;
 
        jme->reg_ghc = GHC_DPX | GHC_SPEED_1000M;
+       jme->reg_rxcs = RXCS_DEFAULT;
        jme->reg_rxmcs = RXMCS_DEFAULT;
        jme->reg_txpfc = 0;
+       jme->reg_pmcs = 0;
        /*
         * Get Max Read Req Size from PCI Config Space
         */
@@ -1906,7 +2275,8 @@ jme_init_one(struct pci_dev *pdev,
         * Reset MAC processor and reload EEPROM for MAC Address
         */
        jme_clear_pm(jme);
-       jme_reset_phy_processor(jme);
+       jme_set_phy_ps(jme);
+       jme_phy_off(jme);
        jme_reset_mac_processor(jme);
        rc = jme_reload_eeprom(jme);
        if(rc) {
@@ -1980,6 +2350,87 @@ jme_remove_one(struct pci_dev *pdev)
 
 }
 
+static int
+jme_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct jme_adapter *jme = netdev_priv(netdev);
+       int timeout = 100;
+
+       atomic_dec(&jme->link_changing);
+
+       netif_device_detach(netdev);
+       netif_stop_queue(netdev);
+       jme_stop_irq(jme);
+       jme_free_irq(jme);
+
+       while(--timeout > 0 &&
+       (
+               atomic_read(&jme->rx_cleaning) != 1 ||
+               atomic_read(&jme->tx_cleaning) != 1
+       )) {
+               mdelay(1);
+       }
+       if(!timeout) {
+               jeprintk(netdev->name, "Waiting tasklets timeout.\n");
+               return -EBUSY;
+       }
+       jme_disable_shadow(jme);
+
+       if(netif_carrier_ok(netdev)) {
+               jme_stop_pcc_timer(jme);
+               jme_reset_mac_processor(jme);
+               jme_free_rx_resources(jme);
+               jme_free_tx_resources(jme);
+               netif_carrier_off(netdev);
+               jme->phylink = 0;
+       }
+
+
+       pci_save_state(pdev);
+       if(jme->reg_pmcs) {
+               jme_set_100m_half(jme);
+               jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+               pci_enable_wake(pdev, PCI_D3hot, true);
+               pci_enable_wake(pdev, PCI_D3cold, true);
+       }
+       else {
+               jme_phy_off(jme);
+               pci_enable_wake(pdev, PCI_D3hot, false);
+               pci_enable_wake(pdev, PCI_D3cold, false);
+       }
+       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+       return 0;
+}
+
+static int
+jme_resume(struct pci_dev *pdev)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct jme_adapter *jme = netdev_priv(netdev);
+
+       jme_clear_pm(jme);
+       pci_restore_state(pdev);
+
+       if(jme->flags & JME_FLAG_SSET)
+               jme_set_settings(netdev, &jme->old_ecmd);
+       else
+               jme_reset_phy_processor(jme);
+
+       jme_reset_mac_processor(jme);
+       jme_enable_shadow(jme);
+       jme_request_irq(jme);
+       jme_start_irq(jme);
+       netif_device_attach(netdev);
+
+       atomic_inc(&jme->link_changing);
+
+       jme_reset_link(jme);
+
+       return 0;
+}
+
 static struct pci_device_id jme_pci_tbl[] = {
        { PCI_VDEVICE(JMICRON, 0x250) },
        { }
@@ -1990,12 +2441,10 @@ static struct pci_driver jme_driver = {
         .id_table       = jme_pci_tbl,
         .probe          = jme_init_one,
         .remove         = __devexit_p(jme_remove_one),
-#if 0
 #ifdef CONFIG_PM
         .suspend        = jme_suspend,
         .resume         = jme_resume,
 #endif /* CONFIG_PM */
-#endif
 };
 
 static int __init
@@ -2021,4 +2470,3 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
 
-