MODULE_PARM_DESC(no_extplug,
"Do not use external plug signal for pseudo hot-plug.");
+#ifndef JME_NEW_PM_API
+static void
+jme_pci_wakeup_enable(struct jme_adapter *jme, int enable)
+{
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)
+ pci_enable_wake(jme->pdev, PCI_D1, enable);
+ pci_enable_wake(jme->pdev, PCI_D2, enable);
+ pci_enable_wake(jme->pdev, PCI_D3hot, enable);
+ pci_enable_wake(jme->pdev, PCI_D3cold, enable);
+#else
+ pci_pme_active(jme->pdev, enable);
+#endif
+}
+#endif
+
static int
jme_mdio_read(struct net_device *netdev, int phy, int reg)
{
static inline void
jme_clear_pm(struct jme_adapter *jme)
{
- jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
- pci_set_power_state(jme->pdev, PCI_D0);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
- pci_enable_wake(jme->pdev, PCI_D0, false);
-#else
- device_set_wakeup_enable(&jme->pdev->dev, false);
-#endif
+ jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
}
static int
struct jme_ring *rxring = &(jme->rxring[0]);
struct jme_buffer_info *rxbi = rxring->bufinf + i;
struct sk_buff *skb;
+ dma_addr_t mapping;
skb = netdev_alloc_skb(jme->dev,
jme->dev->mtu + RX_EXTRA_LEN);
skb->dev = jme->dev;
#endif
+ mapping = pci_map_page(jme->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data), skb_tailroom(skb),
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ if (likely(rxbi->mapping))
+ pci_unmap_page(jme->pdev, rxbi->mapping,
+ rxbi->len, PCI_DMA_FROMDEVICE);
+
rxbi->skb = skb;
rxbi->len = skb_tailroom(skb);
- rxbi->mapping = pci_map_page(jme->pdev,
- virt_to_page(skb->data),
- offset_in_page(skb->data),
- rxbi->len,
- PCI_DMA_FROMDEVICE);
-
+ rxbi->mapping = mapping;
return 0;
}
jme_udpsum(struct sk_buff *skb)
{
u16 csum = 0xFFFFu;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
+ struct iphdr *iph;
+ int iphlen;
+ struct udphdr *udph;
+#endif
if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
return csum;
if (skb->protocol != htons(ETH_P_IP))
return csum;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
+ iph = (struct iphdr *)skb_pull(skb, ETH_HLEN);
+ iphlen = (iph->ihl << 2);
+ if ((iph->protocol != IPPROTO_UDP) ||
+ (skb->len < (iphlen + sizeof(struct udphdr)))) {
+ skb_push(skb, ETH_HLEN);
+ return csum;
+ }
+ udph = (struct udphdr *)skb_pull(skb, iphlen);
+ csum = udph->check;
+ skb_push(skb, iphlen);
+ skb_push(skb, ETH_HLEN);
+#else
skb_set_network_header(skb, ETH_HLEN);
if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
(skb->len < (ETH_HLEN +
csum = udp_hdr(skb)->check;
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
+#endif
return csum;
}
if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,35)
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,36)
skb->ip_summed = CHECKSUM_NONE;
#else
skb_checksum_none_assert(skb);
{
if (jme->reg_pmcs) {
jme_set_100m_half(jme);
-
if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
jme_wait_link(jme);
-
- jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+ jme_clear_pm(jme);
} else {
jme_phy_off(jme);
}
(jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
txbi->skb)) {
netif_stop_queue(jme->dev);
- netif_info(jme, tx_queued, jme->dev, "TX Queue Stopped %d@%lu\n", idx, jiffies);
+ netif_info(jme, tx_queued, jme->dev,
+ "TX Queue Stopped %d@%lu\n", idx, jiffies);
}
}
jme_restart_rx_engine(jme);
}
+#ifndef __USE_NDO_FIX_FEATURES__
if (new_mtu > 1900) {
netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6);
if (test_bit(JME_FLAG_TSO, &jme->flags))
netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
}
+#endif
netdev->mtu = new_mtu;
+#ifdef __USE_NDO_FIX_FEATURES__
+ netdev_update_features(netdev);
+#endif
jme_reset_link(jme);
return 0;
jme->reg_pmcs |= PMCS_MFEN;
jwrite32(jme, JME_PMCS, jme->reg_pmcs);
-
+#ifndef JME_NEW_PM_API
+ jme_pci_wakeup_enable(jme, !!(jme->reg_pmcs));
+#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
- device_set_wakeup_enable(&jme->pdev->dev, jme->reg_pmcs);
+ device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
#endif
return 0;
struct jme_adapter *jme = netdev_priv(netdev);
int rc, fdc = 0;
- if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
+ if (ethtool_cmd_speed(ecmd) == SPEED_1000
+ && ecmd->autoneg != AUTONEG_ENABLE)
return -EINVAL;
/*
jme->msg_enable = value;
}
+#ifndef __USE_NDO_FIX_FEATURES__
static u32
jme_get_rx_csum(struct net_device *netdev)
{
return 0;
}
+#else
+static u32
+jme_fix_features(struct net_device *netdev, u32 features)
+{
+ if (netdev->mtu > 1900)
+ features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
+ return features;
+}
+
+static int
+jme_set_features(struct net_device *netdev, u32 features)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ spin_lock_bh(&jme->rxmcs_lock);
+ if (features & NETIF_F_RXCSUM)
+ jme->reg_rxmcs |= RXMCS_CHECKSUM;
+ else
+ jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
+ jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+ spin_unlock_bh(&jme->rxmcs_lock);
+
+ return 0;
+}
+#endif
static int
jme_nway_reset(struct net_device *netdev)
.get_link = jme_get_link,
.get_msglevel = jme_get_msglevel,
.set_msglevel = jme_set_msglevel,
+#ifndef __USE_NDO_FIX_FEATURES__
.get_rx_csum = jme_get_rx_csum,
.set_rx_csum = jme_set_rx_csum,
.set_tx_csum = jme_set_tx_csum,
.set_tso = jme_set_tso,
.set_sg = ethtool_op_set_sg,
+#endif
.nway_reset = jme_nway_reset,
.get_eeprom_len = jme_get_eeprom_len,
.get_eeprom = jme_get_eeprom,
.ndo_change_mtu = jme_change_mtu,
.ndo_tx_timeout = jme_tx_timeout,
.ndo_vlan_rx_register = jme_vlan_rx_register,
+#ifdef __USE_NDO_FIX_FEATURES__
+ .ndo_fix_features = jme_fix_features,
+ .ndo_set_features = jme_set_features,
+#endif
};
#endif
#endif
netdev->ethtool_ops = &jme_ethtool_ops;
netdev->watchdog_timeo = TX_TIMEOUT;
+#ifdef __USE_NDO_FIX_FEATURES__
+ netdev->hw_features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
+#endif
netdev->features = NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
NETIF_F_SG |
jme->reg_txpfc = 0;
jme->reg_pmcs = PMCS_MFEN;
jme->reg_gpreg1 = GPREG1_DEFAULT;
+#ifndef __USE_NDO_FIX_FEATURES__
set_bit(JME_FLAG_TXCSUM, &jme->flags);
set_bit(JME_FLAG_TSO, &jme->flags);
+#else
+
+ if (jme->reg_rxmcs & RXMCS_CHECKSUM)
+ netdev->features |= NETIF_F_RXCSUM;
+#endif
/*
* Get Max Read Req Size from PCI Config Space
jme->mii_if.mdio_write = jme_mdio_write;
jme_clear_pm(jme);
+ pci_set_power_state(jme->pdev, PCI_D0);
+#ifndef JME_NEW_PM_API
+ jme_pci_wakeup_enable(jme, true);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ device_set_wakeup_enable(&pdev->dev, true);
+#endif
+
jme_set_phyfifo_5level(jme);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)
pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->pcirev);
struct jme_adapter *jme = netdev_priv(netdev);
jme_powersave_phy(jme);
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)
- pci_enable_wake(pdev, PCI_D3hot, true);
-#else
- pci_pme_active(pdev, true);
+#ifndef JME_NEW_PM_API
+ jme_pci_wakeup_enable(jme, !!(jme->reg_pmcs));
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
#endif
}
-#ifdef CONFIG_PM
-static int
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38)
-jme_suspend(struct pci_dev *pdev, pm_message_t state)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+ #ifdef CONFIG_PM
+ #define JME_HAVE_PM
+ #endif
#else
+ #ifdef CONFIG_PM_SLEEP
+ #define JME_HAVE_PM
+ #endif
+#endif
+
+#ifdef JME_HAVE_PM
+static int
+#ifdef JME_NEW_PM_API
jme_suspend(struct device *dev)
+#else
+jme_suspend(struct pci_dev *pdev, pm_message_t state)
#endif
{
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,38)
+#ifdef JME_NEW_PM_API
struct pci_dev *pdev = to_pci_dev(dev);
#endif
struct net_device *netdev = pci_get_drvdata(pdev);
tasklet_hi_enable(&jme->rxempty_task);
jme_powersave_phy(jme);
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38)
+#ifndef JME_NEW_PM_API
pci_save_state(pdev);
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)
- pci_enable_wake(pdev, PCI_D3hot, true);
-#else
- pci_pme_active(pdev, true);
-#endif
+ jme_pci_wakeup_enable(jme, !!(jme->reg_pmcs));
pci_set_power_state(pdev, PCI_D3hot);
#endif
}
static int
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38)
-jme_resume(struct pci_dev *pdev)
-#else
+#ifdef JME_NEW_PM_API
jme_resume(struct device *dev)
+#else
+jme_resume(struct pci_dev *pdev)
#endif
{
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,38)
+#ifdef JME_NEW_PM_API
struct pci_dev *pdev = to_pci_dev(dev);
#endif
struct net_device *netdev = pci_get_drvdata(pdev);
struct jme_adapter *jme = netdev_priv(netdev);
jme_clear_pm(jme);
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38)
+#ifndef JME_NEW_PM_API
+ pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
#endif
return 0;
}
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,38)
+#ifdef JME_NEW_PM_API
static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume);
#define JME_PM_OPS (&jme_pm_ops)
#endif
#else
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,38)
+#ifdef JME_NEW_PM_API
#define JME_PM_OPS NULL
#endif
#endif
.probe = jme_init_one,
.remove = __devexit_p(jme_remove_one),
.shutdown = jme_shutdown,
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38)
+#ifndef JME_NEW_PM_API
.suspend = jme_suspend,
.resume = jme_resume
#else