X-Git-Url: https://bbs.cooldavid.org/git/?p=jme.git;a=blobdiff_plain;f=jme.c;h=6ab8a2e30838ed0a9ff61ad6de9b2bf2ce5a031c;hp=ed35e17f43dc14f0920608177c700149d885ce9e;hb=HEAD;hpb=bb4c5c8c65e5edb34a77571a5543f73c19667c00 diff --git a/jme.c b/jme.c index ed35e17..6ab8a2e 100644 --- a/jme.c +++ b/jme.c @@ -271,9 +271,7 @@ jme_reset_mac_processor(struct jme_adapter *jme) static inline void jme_clear_pm(struct jme_adapter *jme) { - jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); - pci_set_power_state(jme->pdev, PCI_D0); - pci_enable_wake(jme->pdev, PCI_D0, false); + jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); } static int @@ -753,20 +751,28 @@ jme_make_new_rx_buf(struct jme_adapter *jme, int i) struct jme_ring *rxring = &(jme->rxring[0]); struct jme_buffer_info *rxbi = rxring->bufinf + i; struct sk_buff *skb; + dma_addr_t mapping; skb = netdev_alloc_skb(jme->dev, jme->dev->mtu + RX_EXTRA_LEN); if (unlikely(!skb)) return -ENOMEM; + mapping = pci_map_page(jme->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), skb_tailroom(skb), + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + if (likely(rxbi->mapping)) + pci_unmap_page(jme->pdev, rxbi->mapping, + rxbi->len, PCI_DMA_FROMDEVICE); + rxbi->skb = skb; rxbi->len = skb_tailroom(skb); - rxbi->mapping = pci_map_page(jme->pdev, - virt_to_page(skb->data), - offset_in_page(skb->data), - rxbi->len, - PCI_DMA_FROMDEVICE); - + rxbi->mapping = mapping; return 0; } @@ -956,8 +962,34 @@ jme_disable_rx_engine(struct jme_adapter *jme) jme_mac_rxclk_off(jme); } +static u16 +jme_udpsum(struct sk_buff *skb) +{ + u16 csum = 0xFFFFu; + + if (skb->len < (ETH_HLEN + sizeof(struct iphdr))) + return csum; + if (skb->protocol != htons(ETH_P_IP)) + return csum; + skb_set_network_header(skb, ETH_HLEN); + if ((ip_hdr(skb)->protocol != IPPROTO_UDP) || + (skb->len < (ETH_HLEN + + (ip_hdr(skb)->ihl << 2) + + sizeof(struct udphdr)))) { + skb_reset_network_header(skb); + return csum; + } + skb_set_transport_header(skb, + ETH_HLEN + (ip_hdr(skb)->ihl << 2)); + csum = udp_hdr(skb)->check; + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + + return csum; +} + static int -jme_rxsum_ok(struct jme_adapter *jme, u16 flags) +jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb) { if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) return false; @@ -970,7 +1002,7 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags) } if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) - == RXWBFLAG_UDPON)) { + == RXWBFLAG_UDPON) && jme_udpsum(skb)) { if (flags & RXWBFLAG_IPV4) netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n"); return false; @@ -1018,22 +1050,18 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) skb_put(skb, framesize); skb->protocol = eth_type_trans(skb, jme->dev); - if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags))) + if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { - if (jme->vlgrp) { - jme->jme_vlan_rx(skb, jme->vlgrp, - le16_to_cpu(rxdesc->descwb.vlan)); - NET_STAT(jme).rx_bytes += 4; - } else { - dev_kfree_skb(skb); - } - } else { - jme->jme_rx(skb); + u16 vid = le16_to_cpu(rxdesc->descwb.vlan); + + __vlan_hwaccel_put_tag(skb, vid); + NET_STAT(jme).rx_bytes += 4; } + jme->jme_rx(skb); if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == cpu_to_le16(RXWBFLAG_DEST_MUL)) @@ -1716,6 +1744,112 @@ jme_phy_off(struct jme_adapter *jme) jme_new_phy_off(jme); } +static int +jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg) +{ + u32 phy_addr; + + phy_addr = JM_PHY_SPEC_REG_READ | specreg; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, + phy_addr); + return jme_mdio_read(jme->dev, jme->mii_if.phy_id, + JM_PHY_SPEC_DATA_REG); +} + +static void +jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data) +{ + u32 phy_addr; + + phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG, + phy_data); + jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, + phy_addr); +} + +static int +jme_phy_calibration(struct jme_adapter *jme) +{ + u32 ctrl1000, phy_data; + + jme_phy_off(jme); + jme_phy_on(jme); + /* Enabel PHY test mode 1 */ + ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); + ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; + ctrl1000 |= PHY_GAD_TEST_MODE_1; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); + + phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); + phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0; + phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH | + JM_PHY_EXT_COMM_2_CALI_ENABLE; + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); + msleep(20); + phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); + phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE | + JM_PHY_EXT_COMM_2_CALI_MODE_0 | + JM_PHY_EXT_COMM_2_CALI_LATCH); + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); + + /* Disable PHY test mode */ + ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); + ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); + return 0; +} + +static int +jme_phy_setEA(struct jme_adapter *jme) +{ + u32 phy_comm0 = 0, phy_comm1 = 0; + u8 nic_ctrl; + + pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl); + if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE) + return 0; + + switch (jme->pdev->device) { + case PCI_DEVICE_ID_JMICRON_JMC250: + if (((jme->chip_main_rev == 5) && + ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || + (jme->chip_sub_rev == 3))) || + (jme->chip_main_rev >= 6)) { + phy_comm0 = 0x008A; + phy_comm1 = 0x4109; + } + if ((jme->chip_main_rev == 3) && + ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) + phy_comm0 = 0xE088; + break; + case PCI_DEVICE_ID_JMICRON_JMC260: + if (((jme->chip_main_rev == 5) && + ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || + (jme->chip_sub_rev == 3))) || + (jme->chip_main_rev >= 6)) { + phy_comm0 = 0x008A; + phy_comm1 = 0x4109; + } + if ((jme->chip_main_rev == 3) && + ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) + phy_comm0 = 0xE088; + if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0)) + phy_comm0 = 0x608A; + if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2)) + phy_comm0 = 0x408A; + break; + default: + return -ENODEV; + } + if (phy_comm0) + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0); + if (phy_comm1) + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1); + + return 0; +} + static int jme_open(struct net_device *netdev) { @@ -1741,7 +1875,8 @@ jme_open(struct net_device *netdev) jme_set_settings(netdev, &jme->old_ecmd); else jme_reset_phy_processor(jme); - + jme_phy_calibration(jme); + jme_phy_setEA(jme); jme_reset_link(jme); return 0; @@ -1791,11 +1926,9 @@ jme_powersave_phy(struct jme_adapter *jme) { if (jme->reg_pmcs) { jme_set_100m_half(jme); - if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) jme_wait_link(jme); - - jwrite32(jme, JME_PMCS, jme->reg_pmcs); + jme_clear_pm(jme); } else { jme_phy_off(jme); } @@ -1857,7 +1990,7 @@ jme_fill_tx_map(struct pci_dev *pdev, struct page *page, u32 page_offset, u32 len, - u8 hidma) + bool hidma) { dma_addr_t dmaaddr; @@ -1891,10 +2024,10 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) struct jme_ring *txring = &(jme->txring[0]); struct txdesc *txdesc = txring->desc, *ctxdesc; struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; - u8 hidma = jme->dev->features & NETIF_F_HIGHDMA; + bool hidma = jme->dev->features & NETIF_F_HIGHDMA; int i, nr_frags = skb_shinfo(skb)->nr_frags; int mask = jme->tx_ring_mask; - struct skb_frag_struct *frag; + const struct skb_frag_struct *frag; u32 len; for (i = 0 ; i < nr_frags ; ++i) { @@ -1902,8 +2035,9 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) ctxdesc = txdesc + ((idx + i + 2) & (mask)); ctxbi = txbi + ((idx + i + 2) & (mask)); - jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page, - frag->page_offset, frag->size, hidma); + jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, + skb_frag_page(frag), + frag->page_offset, skb_frag_size(frag), hidma); } len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; @@ -2194,27 +2328,11 @@ jme_change_mtu(struct net_device *netdev, int new_mtu) ((new_mtu) < IPV6_MIN_MTU)) return -EINVAL; - if (new_mtu > 4000) { - jme->reg_rxcs &= ~RXCS_FIFOTHNP; - jme->reg_rxcs |= RXCS_FIFOTHNP_64QW; - jme_restart_rx_engine(jme); - } else { - jme->reg_rxcs &= ~RXCS_FIFOTHNP; - jme->reg_rxcs |= RXCS_FIFOTHNP_128QW; - jme_restart_rx_engine(jme); - } - - if (new_mtu > 1900) { - netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_TSO | NETIF_F_TSO6); - } else { - if (test_bit(JME_FLAG_TXCSUM, &jme->flags)) - netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - if (test_bit(JME_FLAG_TSO, &jme->flags)) - netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; - } netdev->mtu = new_mtu; + netdev_update_features(netdev); + + jme_restart_rx_engine(jme); jme_reset_link(jme); return 0; @@ -2267,25 +2385,15 @@ static inline void jme_resume_rx(struct jme_adapter *jme) atomic_inc(&jme->link_changing); } -static void -jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) -{ - struct jme_adapter *jme = netdev_priv(netdev); - - jme_pause_rx(jme); - jme->vlgrp = grp; - jme_resume_rx(jme); -} - static void jme_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { struct jme_adapter *jme = netdev_priv(netdev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(jme->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info)); } static int @@ -2387,7 +2495,6 @@ jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) test_bit(JME_FLAG_POLL, &jme->flags)) { clear_bit(JME_FLAG_POLL, &jme->flags); jme->jme_rx = netif_rx; - jme->jme_vlan_rx = vlan_hwaccel_rx; dpi->cur = PCC_P1; dpi->attempt = PCC_P1; dpi->cnt = 0; @@ -2397,7 +2504,6 @@ jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) !(test_bit(JME_FLAG_POLL, &jme->flags))) { set_bit(JME_FLAG_POLL, &jme->flags); jme->jme_rx = netif_receive_skb; - jme->jme_vlan_rx = vlan_hwaccel_receive_skb; jme_interrupt_mode(jme); } @@ -2511,6 +2617,7 @@ jme_set_wol(struct net_device *netdev, jme->reg_pmcs |= PMCS_MFEN; jwrite32(jme, JME_PMCS, jme->reg_pmcs); + device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs)); return 0; } @@ -2535,7 +2642,8 @@ jme_set_settings(struct net_device *netdev, struct jme_adapter *jme = netdev_priv(netdev); int rc, fdc = 0; - if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE) + if (ethtool_cmd_speed(ecmd) == SPEED_1000 + && ecmd->autoneg != AUTONEG_ENABLE) return -EINVAL; /* @@ -2611,20 +2719,21 @@ jme_set_msglevel(struct net_device *netdev, u32 value) jme->msg_enable = value; } -static u32 -jme_get_rx_csum(struct net_device *netdev) +static netdev_features_t +jme_fix_features(struct net_device *netdev, netdev_features_t features) { - struct jme_adapter *jme = netdev_priv(netdev); - return jme->reg_rxmcs & RXMCS_CHECKSUM; + if (netdev->mtu > 1900) + features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM); + return features; } static int -jme_set_rx_csum(struct net_device *netdev, u32 on) +jme_set_features(struct net_device *netdev, netdev_features_t features) { struct jme_adapter *jme = netdev_priv(netdev); spin_lock_bh(&jme->rxmcs_lock); - if (on) + if (features & NETIF_F_RXCSUM) jme->reg_rxmcs |= RXMCS_CHECKSUM; else jme->reg_rxmcs &= ~RXMCS_CHECKSUM; @@ -2634,42 +2743,6 @@ jme_set_rx_csum(struct net_device *netdev, u32 on) return 0; } -static int -jme_set_tx_csum(struct net_device *netdev, u32 on) -{ - struct jme_adapter *jme = netdev_priv(netdev); - - if (on) { - set_bit(JME_FLAG_TXCSUM, &jme->flags); - if (netdev->mtu <= 1900) - netdev->features |= - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - } else { - clear_bit(JME_FLAG_TXCSUM, &jme->flags); - netdev->features &= - ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - } - - return 0; -} - -static int -jme_set_tso(struct net_device *netdev, u32 on) -{ - struct jme_adapter *jme = netdev_priv(netdev); - - if (on) { - set_bit(JME_FLAG_TSO, &jme->flags); - if (netdev->mtu <= 1900) - netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; - } else { - clear_bit(JME_FLAG_TSO, &jme->flags); - netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); - } - - return 0; -} - static int jme_nway_reset(struct net_device *netdev) { @@ -2811,11 +2884,6 @@ static const struct ethtool_ops jme_ethtool_ops = { .get_link = jme_get_link, .get_msglevel = jme_get_msglevel, .set_msglevel = jme_set_msglevel, - .get_rx_csum = jme_get_rx_csum, - .set_rx_csum = jme_set_rx_csum, - .set_tx_csum = jme_set_tx_csum, - .set_tso = jme_set_tso, - .set_sg = ethtool_op_set_sg, .nway_reset = jme_nway_reset, .get_eeprom_len = jme_get_eeprom_len, .get_eeprom = jme_get_eeprom, @@ -2871,10 +2939,11 @@ static const struct net_device_ops jme_netdev_ops = { .ndo_do_ioctl = jme_ioctl, .ndo_start_xmit = jme_start_xmit, .ndo_set_mac_address = jme_set_macaddr, - .ndo_set_multicast_list = jme_set_multi, + .ndo_set_rx_mode = jme_set_multi, .ndo_change_mtu = jme_change_mtu, .ndo_tx_timeout = jme_tx_timeout, - .ndo_vlan_rx_register = jme_vlan_rx_register, + .ndo_fix_features = jme_fix_features, + .ndo_set_features = jme_set_features, }; static int __devinit @@ -2922,13 +2991,18 @@ jme_init_one(struct pci_dev *pdev, */ netdev = alloc_etherdev(sizeof(*jme)); if (!netdev) { - pr_err("Cannot allocate netdev structure\n"); rc = -ENOMEM; goto err_out_release_regions; } netdev->netdev_ops = &jme_netdev_ops; netdev->ethtool_ops = &jme_ethtool_ops; netdev->watchdog_timeo = TX_TIMEOUT; + netdev->hw_features = NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM; netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | @@ -2949,7 +3023,6 @@ jme_init_one(struct pci_dev *pdev, jme->pdev = pdev; jme->dev = netdev; jme->jme_rx = netif_rx; - jme->jme_vlan_rx = vlan_hwaccel_rx; jme->old_mtu = netdev->mtu = 1500; jme->phylink = 0; jme->tx_ring_size = 1 << 10; @@ -3012,8 +3085,9 @@ jme_init_one(struct pci_dev *pdev, jme->reg_txpfc = 0; jme->reg_pmcs = PMCS_MFEN; jme->reg_gpreg1 = GPREG1_DEFAULT; - set_bit(JME_FLAG_TXCSUM, &jme->flags); - set_bit(JME_FLAG_TSO, &jme->flags); + + if (jme->reg_rxmcs & RXMCS_CHECKSUM) + netdev->features |= NETIF_F_RXCSUM; /* * Get Max Read Req Size from PCI Config Space @@ -3068,8 +3142,11 @@ jme_init_one(struct pci_dev *pdev, jme->mii_if.mdio_write = jme_mdio_write; jme_clear_pm(jme); + pci_set_power_state(jme->pdev, PCI_D0); + device_set_wakeup_enable(&pdev->dev, true); + jme_set_phyfifo_5level(jme); - pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->pcirev); + jme->pcirev = pdev->revision; if (!jme->fpgaver) jme_phy_init(jme); jme_phy_off(jme); @@ -3145,10 +3222,11 @@ jme_shutdown(struct pci_dev *pdev) pci_pme_active(pdev, true); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int -jme_suspend(struct pci_dev *pdev, pm_message_t state) +jme_suspend(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct jme_adapter *jme = netdev_priv(netdev); @@ -3180,29 +3258,26 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state) tasklet_hi_enable(&jme->rxclean_task); tasklet_hi_enable(&jme->rxempty_task); - pci_save_state(pdev); jme_powersave_phy(jme); - pci_enable_wake(jme->pdev, PCI_D3hot, true); - pci_set_power_state(pdev, PCI_D3hot); return 0; } static int -jme_resume(struct pci_dev *pdev) +jme_resume(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct jme_adapter *jme = netdev_priv(netdev); jme_clear_pm(jme); - pci_restore_state(pdev); - jme_phy_on(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) jme_set_settings(netdev, &jme->old_ecmd); else jme_reset_phy_processor(jme); - + jme_phy_calibration(jme); + jme_phy_setEA(jme); jme_start_irq(jme); netif_device_attach(netdev); @@ -3212,6 +3287,13 @@ jme_resume(struct pci_dev *pdev) return 0; } + +static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume); +#define JME_PM_OPS (&jme_pm_ops) + +#else + +#define JME_PM_OPS NULL #endif static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { @@ -3225,11 +3307,8 @@ static struct pci_driver jme_driver = { .id_table = jme_pci_tbl, .probe = jme_init_one, .remove = __devexit_p(jme_remove_one), -#ifdef CONFIG_PM - .suspend = jme_suspend, - .resume = jme_resume, -#endif /* CONFIG_PM */ .shutdown = jme_shutdown, + .driver.pm = JME_PM_OPS, }; static int __init @@ -3253,4 +3332,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, jme_pci_tbl); -