#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_vlan.h>
+#include <linux/slab.h>
#include <net/ip6_checksum.h>
#include "jme.h"
if (i == 0)
jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg);
-
- return;
}
static inline void
jme_mdio_write(jme->dev,
jme->mii_if.phy_id,
MII_BMCR, val | BMCR_RESET);
-
- return;
}
static void
skb->ip_summed = CHECKSUM_NONE;
if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
- spin_lock(&jme->vlgrp_lock);
if (jme->vlgrp) {
jme->jme_vlan_rx(skb, jme->vlgrp,
le16_to_cpu(rxdesc->descwb.vlan));
- spin_unlock(&jme->vlgrp_lock);
NET_STAT(jme).rx_bytes += 4;
} else {
- spin_unlock(&jme->vlgrp_lock);
dev_kfree_skb(skb);
}
} else {
} else if (netdev->flags & IFF_ALLMULTI) {
jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
} else if (netdev->flags & IFF_MULTICAST) {
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)
struct dev_mc_list *mclist;
+#else
+ struct netdev_hw_addr *ha;
+#endif
int bit_nr;
jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
for (i = 0, mclist = netdev->mc_list;
mclist && i < netdev->mc_count;
++i, mclist = mclist->next) {
-#else
+#elif LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)
netdev_for_each_mc_addr(mclist, netdev) {
+#else
+ netdev_for_each_mc_addr(ha, netdev) {
#endif
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)
bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
+#else
+ bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
+#endif
mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
}
jme_reset_link(jme);
}
+static inline void jme_pause_rx(struct jme_adapter *jme)
+{
+ atomic_dec(&jme->link_changing);
+
+ jme_set_rx_pcc(jme, PCC_OFF);
+ if (test_bit(JME_FLAG_POLL, &jme->flags)) {
+ JME_NAPI_DISABLE(jme);
+ } else {
+ tasklet_disable(&jme->rxclean_task);
+ tasklet_disable(&jme->rxempty_task);
+ }
+}
+
+static inline void jme_resume_rx(struct jme_adapter *jme)
+{
+ struct dynpcc_info *dpi = &(jme->dpi);
+
+ if (test_bit(JME_FLAG_POLL, &jme->flags)) {
+ JME_NAPI_ENABLE(jme);
+ } else {
+ tasklet_hi_enable(&jme->rxclean_task);
+ tasklet_hi_enable(&jme->rxempty_task);
+ }
+ dpi->cur = PCC_P1;
+ dpi->attempt = PCC_P1;
+ dpi->cnt = 0;
+ jme_set_rx_pcc(jme, PCC_P1);
+
+ atomic_inc(&jme->link_changing);
+}
+
static void
jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
{
struct jme_adapter *jme = netdev_priv(netdev);
- spin_lock_bh(&jme->vlgrp_lock);
+ jme_pause_rx(jme);
jme->vlgrp = grp;
- spin_unlock_bh(&jme->vlgrp_lock);
+ jme_resume_rx(jme);
}
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
{
struct jme_adapter *jme = netdev_priv(netdev);
- spin_lock_bh(&jme->vlgrp_lock);
if(jme->vlgrp) {
+ jme_pause_rx(jme);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,20)
jme->vlgrp->vlan_devices[vid] = NULL;
#else
vlan_group_set_device(jme->vlgrp, vid, NULL);
#endif
+ jme_resume_rx(jme);
}
- spin_unlock_bh(&jme->vlgrp_lock);
}
#endif
spin_lock_init(&jme->phy_lock);
spin_lock_init(&jme->macaddr_lock);
spin_lock_init(&jme->rxmcs_lock);
- spin_lock_init(&jme->vlgrp_lock);
atomic_set(&jme->link_changing, 1);
atomic_set(&jme->rx_cleaning, 1);