X-Git-Url: http://bbs.cooldavid.org/git/?a=blobdiff_plain;f=jme.c;h=66865b9903751358e5c07dc8baaa532d965aacaa;hb=c0e7819373b51a44fbfa26e55ee9085d73580a4f;hp=ab6e154f466d636700443f26c7176a2fc8e94f05;hpb=e3b96dc93cf64b2ad559c7682f60a7596f655dbf;p=jme.git diff --git a/jme.c b/jme.c index ab6e154..66865b9 100644 --- a/jme.c +++ b/jme.c @@ -59,6 +59,7 @@ module_param(no_extplug, int, 0); MODULE_PARM_DESC(no_extplug, "Do not use external plug signal for pseudo hot-plug."); +#ifndef JME_NEW_PM_API static void jme_pci_wakeup_enable(struct jme_adapter *jme, int enable) { @@ -71,6 +72,7 @@ jme_pci_wakeup_enable(struct jme_adapter *jme, int enable) pci_pme_active(jme->pdev, enable); #endif } +#endif static int jme_mdio_read(struct net_device *netdev, int phy, int reg) @@ -767,6 +769,7 @@ jme_make_new_rx_buf(struct jme_adapter *jme, int i) struct jme_ring *rxring = &(jme->rxring[0]); struct jme_buffer_info *rxbi = rxring->bufinf + i; struct sk_buff *skb; + dma_addr_t mapping; skb = netdev_alloc_skb(jme->dev, jme->dev->mtu + RX_EXTRA_LEN); @@ -776,14 +779,21 @@ jme_make_new_rx_buf(struct jme_adapter *jme, int i) skb->dev = jme->dev; #endif + mapping = pci_map_page(jme->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), skb_tailroom(skb), + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + if (likely(rxbi->mapping)) + pci_unmap_page(jme->pdev, rxbi->mapping, + rxbi->len, PCI_DMA_FROMDEVICE); + rxbi->skb = skb; rxbi->len = skb_tailroom(skb); - rxbi->mapping = pci_map_page(jme->pdev, - virt_to_page(skb->data), - offset_in_page(skb->data), - rxbi->len, - PCI_DMA_FROMDEVICE); - + rxbi->mapping = mapping; return 0; } @@ -977,11 +987,29 @@ static u16 jme_udpsum(struct sk_buff *skb) { u16 csum = 0xFFFFu; +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21) + struct iphdr *iph; + int iphlen; + struct udphdr *udph; +#endif if (skb->len < (ETH_HLEN + sizeof(struct iphdr))) return csum; if (skb->protocol != htons(ETH_P_IP)) return csum; +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21) + iph = (struct iphdr *)skb_pull(skb, ETH_HLEN); + iphlen = (iph->ihl << 2); + if ((iph->protocol != IPPROTO_UDP) || + (skb->len < (iphlen + sizeof(struct udphdr)))) { + skb_push(skb, ETH_HLEN); + return csum; + } + udph = (struct udphdr *)skb_pull(skb, iphlen); + csum = udph->check; + skb_push(skb, iphlen); + skb_push(skb, ETH_HLEN); +#else skb_set_network_header(skb, ETH_HLEN); if ((ip_hdr(skb)->protocol != IPPROTO_UDP) || (skb->len < (ETH_HLEN + @@ -995,6 +1023,7 @@ jme_udpsum(struct sk_buff *skb) csum = udp_hdr(skb)->check; skb_reset_transport_header(skb); skb_reset_network_header(skb); +#endif return csum; } @@ -1070,6 +1099,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) skb_checksum_none_assert(skb); #endif +#ifndef __UNIFY_VLAN_RX_PATH__ if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { if (jme->vlgrp) { jme->jme_vlan_rx(skb, jme->vlgrp, @@ -1081,6 +1111,15 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) } else { jme->jme_rx(skb); } +#else + if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { + u16 vid = le16_to_cpu(rxdesc->descwb.vlan); + + __vlan_hwaccel_put_tag(skb, vid); + NET_STAT(jme).rx_bytes += 4; + } + jme->jme_rx(skb); +#endif if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == cpu_to_le16(RXWBFLAG_DEST_MUL)) @@ -1955,7 +1994,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) u8 hidma = jme->dev->features & NETIF_F_HIGHDMA; int i, nr_frags = skb_shinfo(skb)->nr_frags; int mask = jme->tx_ring_mask; - struct skb_frag_struct *frag; + const struct skb_frag_struct *frag; u32 len; for (i = 0 ; i < nr_frags ; ++i) { @@ -1963,8 +2002,14 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) ctxdesc = txdesc + ((idx + i + 2) & (mask)); ctxbi = txbi + ((idx + i + 2) & (mask)); +#ifndef __USE_SKB_FRAG_API__ jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page, frag->page_offset, frag->size, hidma); +#else + jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, + skb_frag_page(frag), + frag->page_offset, skb_frag_size(frag), hidma); +#endif } len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; @@ -2314,6 +2359,7 @@ jme_change_mtu(struct net_device *netdev, int new_mtu) jme_restart_rx_engine(jme); } +#ifndef __USE_NDO_FIX_FEATURES__ if (new_mtu > 1900) { netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6); @@ -2323,8 +2369,12 @@ jme_change_mtu(struct net_device *netdev, int new_mtu) if (test_bit(JME_FLAG_TSO, &jme->flags)) netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; } +#endif netdev->mtu = new_mtu; +#ifdef __USE_NDO_FIX_FEATURES__ + netdev_update_features(netdev); +#endif jme_reset_link(jme); return 0; @@ -2377,6 +2427,7 @@ static inline void jme_resume_rx(struct jme_adapter *jme) atomic_inc(&jme->link_changing); } +#ifndef __UNIFY_VLAN_RX_PATH__ static void jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) { @@ -2386,6 +2437,7 @@ jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) jme->vlgrp = grp; jme_resume_rx(jme); } +#endif #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21) static void @@ -2411,9 +2463,9 @@ jme_get_drvinfo(struct net_device *netdev, { struct jme_adapter *jme = netdev_priv(netdev); - strcpy(info->driver, DRV_NAME); - strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, pci_name(jme->pdev)); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info)); } static int @@ -2515,7 +2567,9 @@ jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) test_bit(JME_FLAG_POLL, &jme->flags)) { clear_bit(JME_FLAG_POLL, &jme->flags); jme->jme_rx = netif_rx; +#ifndef __UNIFY_VLAN_RX_PATH__ jme->jme_vlan_rx = vlan_hwaccel_rx; +#endif dpi->cur = PCC_P1; dpi->attempt = PCC_P1; dpi->cnt = 0; @@ -2525,7 +2579,9 @@ jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) !(test_bit(JME_FLAG_POLL, &jme->flags))) { set_bit(JME_FLAG_POLL, &jme->flags); jme->jme_rx = netif_receive_skb; +#ifndef __UNIFY_VLAN_RX_PATH__ jme->jme_vlan_rx = vlan_hwaccel_receive_skb; +#endif jme_interrupt_mode(jme); } @@ -2746,6 +2802,7 @@ jme_set_msglevel(struct net_device *netdev, u32 value) jme->msg_enable = value; } +#ifndef __USE_NDO_FIX_FEATURES__ static u32 jme_get_rx_csum(struct net_device *netdev) { @@ -2804,6 +2861,31 @@ jme_set_tso(struct net_device *netdev, u32 on) return 0; } +#else +static u32 +jme_fix_features(struct net_device *netdev, u32 features) +{ + if (netdev->mtu > 1900) + features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM); + return features; +} + +static int +jme_set_features(struct net_device *netdev, u32 features) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + spin_lock_bh(&jme->rxmcs_lock); + if (features & NETIF_F_RXCSUM) + jme->reg_rxmcs |= RXMCS_CHECKSUM; + else + jme->reg_rxmcs &= ~RXMCS_CHECKSUM; + jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); + spin_unlock_bh(&jme->rxmcs_lock); + + return 0; +} +#endif static int jme_nway_reset(struct net_device *netdev) @@ -2950,11 +3032,13 @@ static const struct ethtool_ops jme_ethtool_ops = { .get_link = jme_get_link, .get_msglevel = jme_get_msglevel, .set_msglevel = jme_set_msglevel, +#ifndef __USE_NDO_FIX_FEATURES__ .get_rx_csum = jme_get_rx_csum, .set_rx_csum = jme_set_rx_csum, .set_tx_csum = jme_set_tx_csum, .set_tso = jme_set_tso, .set_sg = ethtool_op_set_sg, +#endif .nway_reset = jme_nway_reset, .get_eeprom_len = jme_get_eeprom_len, .get_eeprom = jme_get_eeprom, @@ -3034,10 +3118,20 @@ static const struct net_device_ops jme_netdev_ops = { .ndo_do_ioctl = jme_ioctl, .ndo_start_xmit = jme_start_xmit, .ndo_set_mac_address = jme_set_macaddr, +#ifndef __USE_NDO_SET_RX_MODE__ .ndo_set_multicast_list = jme_set_multi, +#else + .ndo_set_rx_mode = jme_set_multi, +#endif .ndo_change_mtu = jme_change_mtu, .ndo_tx_timeout = jme_tx_timeout, +#ifndef __UNIFY_VLAN_RX_PATH__ .ndo_vlan_rx_register = jme_vlan_rx_register, +#endif +#ifdef __USE_NDO_FIX_FEATURES__ + .ndo_fix_features = jme_fix_features, + .ndo_set_features = jme_set_features, +#endif }; #endif @@ -3109,6 +3203,14 @@ jme_init_one(struct pci_dev *pdev, #endif netdev->ethtool_ops = &jme_ethtool_ops; netdev->watchdog_timeo = TX_TIMEOUT; +#ifdef __USE_NDO_FIX_FEATURES__ + netdev->hw_features = NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM; +#endif netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | @@ -3129,7 +3231,9 @@ jme_init_one(struct pci_dev *pdev, jme->pdev = pdev; jme->dev = netdev; jme->jme_rx = netif_rx; +#ifndef __UNIFY_VLAN_RX_PATH__ jme->jme_vlan_rx = vlan_hwaccel_rx; +#endif jme->old_mtu = netdev->mtu = 1500; jme->phylink = 0; jme->tx_ring_size = 1 << 10; @@ -3192,8 +3296,14 @@ jme_init_one(struct pci_dev *pdev, jme->reg_txpfc = 0; jme->reg_pmcs = PMCS_MFEN; jme->reg_gpreg1 = GPREG1_DEFAULT; +#ifndef __USE_NDO_FIX_FEATURES__ set_bit(JME_FLAG_TXCSUM, &jme->flags); set_bit(JME_FLAG_TSO, &jme->flags); +#else + + if (jme->reg_rxmcs & RXMCS_CHECKSUM) + netdev->features |= NETIF_F_RXCSUM; +#endif /* * Get Max Read Req Size from PCI Config Space