struct jme_ring *rxring = &(jme->rxring[0]);
struct jme_buffer_info *rxbi = rxring->bufinf + i;
struct sk_buff *skb;
+ dma_addr_t mapping;
skb = netdev_alloc_skb(jme->dev,
jme->dev->mtu + RX_EXTRA_LEN);
skb->dev = jme->dev;
#endif
+ mapping = pci_map_page(jme->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data), skb_tailroom(skb),
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ if (likely(rxbi->mapping))
+ pci_unmap_page(jme->pdev, rxbi->mapping,
+ rxbi->len, PCI_DMA_FROMDEVICE);
+
rxbi->skb = skb;
rxbi->len = skb_tailroom(skb);
- rxbi->mapping = pci_map_page(jme->pdev,
- virt_to_page(skb->data),
- offset_in_page(skb->data),
- rxbi->len,
- PCI_DMA_FROMDEVICE);
-
+ rxbi->mapping = mapping;
return 0;
}
jme_udpsum(struct sk_buff *skb)
{
u16 csum = 0xFFFFu;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
+ struct iphdr *iph;
+ int iphlen;
+ struct udphdr *udph;
+#endif
if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
return csum;
if (skb->protocol != htons(ETH_P_IP))
return csum;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
+ iph = (struct iphdr *)skb_pull(skb, ETH_HLEN);
+ iphlen = (iph->ihl << 2);
+ if ((iph->protocol != IPPROTO_UDP) ||
+ (skb->len < (iphlen + sizeof(struct udphdr)))) {
+ skb_push(skb, ETH_HLEN);
+ return csum;
+ }
+ udph = (struct udphdr *)skb_pull(skb, iphlen);
+ csum = udph->check;
+ skb_push(skb, iphlen);
+ skb_push(skb, ETH_HLEN);
+#else
skb_set_network_header(skb, ETH_HLEN);
if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
(skb->len < (ETH_HLEN +
csum = udp_hdr(skb)->check;
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
+#endif
return csum;
}
skb_checksum_none_assert(skb);
#endif
+#ifndef __UNIFY_VLAN_RX_PATH__
if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
if (jme->vlgrp) {
jme->jme_vlan_rx(skb, jme->vlgrp,
} else {
jme->jme_rx(skb);
}
+#else
+ if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
+ u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
+
+ __vlan_hwaccel_put_tag(skb, vid);
+ NET_STAT(jme).rx_bytes += 4;
+ }
+ jme->jme_rx(skb);
+#endif
if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
cpu_to_le16(RXWBFLAG_DEST_MUL))
ctxdesc = txdesc + ((idx + i + 2) & (mask));
ctxbi = txbi + ((idx + i + 2) & (mask));
+#ifndef __USE_SKB_FRAG_API__
jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
frag->page_offset, frag->size, hidma);
+#else
+ jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+ skb_frag_page(frag),
+ frag->page_offset, frag->size, hidma);
+#endif
}
len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
jme_restart_rx_engine(jme);
}
+#ifndef __USE_NDO_FIX_FEATURES__
if (new_mtu > 1900) {
netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6);
if (test_bit(JME_FLAG_TSO, &jme->flags))
netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
}
+#endif
netdev->mtu = new_mtu;
+#ifdef __USE_NDO_FIX_FEATURES__
+ netdev_update_features(netdev);
+#endif
jme_reset_link(jme);
return 0;
atomic_inc(&jme->link_changing);
}
+#ifndef __UNIFY_VLAN_RX_PATH__
static void
jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
{
jme->vlgrp = grp;
jme_resume_rx(jme);
}
+#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
static void
test_bit(JME_FLAG_POLL, &jme->flags)) {
clear_bit(JME_FLAG_POLL, &jme->flags);
jme->jme_rx = netif_rx;
+#ifndef __UNIFY_VLAN_RX_PATH__
jme->jme_vlan_rx = vlan_hwaccel_rx;
+#endif
dpi->cur = PCC_P1;
dpi->attempt = PCC_P1;
dpi->cnt = 0;
!(test_bit(JME_FLAG_POLL, &jme->flags))) {
set_bit(JME_FLAG_POLL, &jme->flags);
jme->jme_rx = netif_receive_skb;
+#ifndef __UNIFY_VLAN_RX_PATH__
jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
+#endif
jme_interrupt_mode(jme);
}
jme->msg_enable = value;
}
+#ifndef __USE_NDO_FIX_FEATURES__
static u32
jme_get_rx_csum(struct net_device *netdev)
{
return 0;
}
+#else
+static u32
+jme_fix_features(struct net_device *netdev, u32 features)
+{
+ if (netdev->mtu > 1900)
+ features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
+ return features;
+}
+
+static int
+jme_set_features(struct net_device *netdev, u32 features)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ spin_lock_bh(&jme->rxmcs_lock);
+ if (features & NETIF_F_RXCSUM)
+ jme->reg_rxmcs |= RXMCS_CHECKSUM;
+ else
+ jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
+ jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
+ spin_unlock_bh(&jme->rxmcs_lock);
+
+ return 0;
+}
+#endif
static int
jme_nway_reset(struct net_device *netdev)
.get_link = jme_get_link,
.get_msglevel = jme_get_msglevel,
.set_msglevel = jme_set_msglevel,
+#ifndef __USE_NDO_FIX_FEATURES__
.get_rx_csum = jme_get_rx_csum,
.set_rx_csum = jme_set_rx_csum,
.set_tx_csum = jme_set_tx_csum,
.set_tso = jme_set_tso,
.set_sg = ethtool_op_set_sg,
+#endif
.nway_reset = jme_nway_reset,
.get_eeprom_len = jme_get_eeprom_len,
.get_eeprom = jme_get_eeprom,
.ndo_do_ioctl = jme_ioctl,
.ndo_start_xmit = jme_start_xmit,
.ndo_set_mac_address = jme_set_macaddr,
+#ifndef __USE_NDO_SET_RX_MODE__
.ndo_set_multicast_list = jme_set_multi,
+#else
+ .ndo_set_rx_mode = jme_set_multi,
+#endif
.ndo_change_mtu = jme_change_mtu,
.ndo_tx_timeout = jme_tx_timeout,
+#ifndef __UNIFY_VLAN_RX_PATH__
.ndo_vlan_rx_register = jme_vlan_rx_register,
+#endif
+#ifdef __USE_NDO_FIX_FEATURES__
+ .ndo_fix_features = jme_fix_features,
+ .ndo_set_features = jme_set_features,
+#endif
};
#endif
#endif
netdev->ethtool_ops = &jme_ethtool_ops;
netdev->watchdog_timeo = TX_TIMEOUT;
+#ifdef __USE_NDO_FIX_FEATURES__
+ netdev->hw_features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
+#endif
netdev->features = NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
NETIF_F_SG |
jme->pdev = pdev;
jme->dev = netdev;
jme->jme_rx = netif_rx;
+#ifndef __UNIFY_VLAN_RX_PATH__
jme->jme_vlan_rx = vlan_hwaccel_rx;
+#endif
jme->old_mtu = netdev->mtu = 1500;
jme->phylink = 0;
jme->tx_ring_size = 1 << 10;
jme->reg_txpfc = 0;
jme->reg_pmcs = PMCS_MFEN;
jme->reg_gpreg1 = GPREG1_DEFAULT;
+#ifndef __USE_NDO_FIX_FEATURES__
set_bit(JME_FLAG_TXCSUM, &jme->flags);
set_bit(JME_FLAG_TSO, &jme->flags);
+#else
+
+ if (jme->reg_rxmcs & RXMCS_CHECKSUM)
+ netdev->features |= NETIF_F_RXCSUM;
+#endif
/*
* Get Max Read Req Size from PCI Config Space