pr_err("phy(%d) write timeout : %d\n", phy, reg);
}
+static int
+jme_phyext_read(struct jme_adapter *jme, int reg)
+{
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id,
+ JME_PHY_SPEC_ADDR_REG,
+ JME_PHY_SPEC_REG_READ | (reg & 0x3FFF));
+ return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
+ JME_PHY_SPEC_DATA_REG);
+}
+
+static void
+jme_phyext_write(struct jme_adapter *jme, int reg, int val)
+{
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id,
+ JME_PHY_SPEC_DATA_REG, val);
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id,
+ JME_PHY_SPEC_ADDR_REG,
+ JME_PHY_SPEC_REG_WRITE | (reg & 0x3FFF));
+}
+
+static void
+jme_phyext_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
+{
+ int i;
+ u16 *p16 = (u16 *)p;
+
+ for (i = 0; i < reg_nr; ++i)
+ p16[i] = jme_phyext_read(jme, i);
+}
+
static inline void
jme_reset_phy_processor(struct jme_adapter *jme)
{
/*
* Setup Unicast Filter
*/
+ jme_set_unicastaddr(jme->dev);
jme_set_multi(jme->dev);
/*
jme_mac_rxclk_off(jme);
}
+static u16
+jme_udpsum(struct sk_buff *skb)
+{
+ u16 csum = 0xFFFFu;
+
+ if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
+ return csum;
+ if (skb->protocol != htons(ETH_P_IP))
+ return csum;
+ skb_set_network_header(skb, ETH_HLEN);
+ if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
+ (skb->len < (ETH_HLEN +
+ (ip_hdr(skb)->ihl << 2) +
+ sizeof(struct udphdr)))) {
+ skb_reset_network_header(skb);
+ return csum;
+ }
+ skb_set_transport_header(skb,
+ ETH_HLEN + (ip_hdr(skb)->ihl << 2));
+ csum = udp_hdr(skb)->check;
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+
+ return csum;
+}
+
static int
-jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
+jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
{
if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
return false;
}
if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
- == RXWBFLAG_UDPON)) {
+ == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
if (flags & RXWBFLAG_IPV4)
netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
return false;
skb_put(skb, framesize);
skb->protocol = eth_type_trans(skb, jme->dev);
- if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
+ if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,35)
pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
}
+static inline void
+jme_recal_phy(struct jme_adapter *jme)
+{
+ u32 miictl1000, comm2;
+
+ miictl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ miictl1000 &= ~JME_PHY_GCTRL_TESTMASK;
+ miictl1000 |= JME_PHY_GCTRL_TESTMODE1;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, miictl1000);
+
+ comm2 = jme_phyext_read(jme, JME_PHYEXT_COMM2);
+ comm2 &= ~(0x0001u);
+ comm2 |= 0x0011u;
+ jme_phyext_write(jme, JME_PHYEXT_COMM2, comm2);
+
+ mdelay(20);
+
+ comm2 = jme_phyext_read(jme, JME_PHYEXT_COMM2);
+ comm2 &= ~(0x0013u);
+ jme_phyext_write(jme, JME_PHYEXT_COMM2, comm2);
+
+ miictl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ miictl1000 &= ~JME_PHY_GCTRL_TESTMASK;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, miictl1000);
+}
+
+static inline void
+jme_refill_phyparm(struct jme_adapter *jme)
+{
+ if (jme->chip_main_rev >= 6 ||
+ (jme->chip_main_rev == 5 &&
+ (jme->chip_sub_rev == 0 ||
+ jme->chip_sub_rev == 1 ||
+ jme->chip_sub_rev == 3))) {
+ jme_phyext_write(jme, JME_PHYEXT_COMM0, 0x008Au);
+ jme_phyext_write(jme, JME_PHYEXT_COMM1, 0x4109u);
+ } else if (jme->chip_main_rev == 3 &&
+ (jme->chip_sub_rev == 1 ||
+ jme->chip_sub_rev == 2)) {
+ jme_phyext_write(jme, JME_PHYEXT_COMM0, 0xE088u);
+// jme_phyext_write(jme, JME_PHYEXT_COMM1, 0x4108u);
+ } else if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260 &&
+ jme->chip_main_rev == 2) {
+ if (jme->chip_sub_rev == 0) {
+ jme_phyext_write(jme, JME_PHYEXT_COMM0, 0x608Au);
+// jme_phyext_write(jme, JME_PHYEXT_COMM1, 0x4108u);
+ } else if (jme->chip_sub_rev == 2) {
+ jme_phyext_write(jme, JME_PHYEXT_COMM0, 0x408Au);
+// jme_phyext_write(jme, JME_PHYEXT_COMM1, 0x4108u);
+ }
+ }
+}
+
static inline void
jme_phy_on(struct jme_adapter *jme)
{
u32 bmcr;
+ if (new_phy_power_ctrl(jme->chip_main_rev))
+ jme_new_phy_on(jme);
+
bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
bmcr &= ~BMCR_PDOWN;
jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
- if (new_phy_power_ctrl(jme->chip_main_rev))
- jme_new_phy_on(jme);
+ jme_recal_phy(jme);
+ jme_refill_phyparm(jme);
}
static inline void
return NETDEV_TX_OK;
}
+static void
+jme_set_unicastaddr(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ u32 val;
+
+ val = (netdev->dev_addr[3] & 0xff) << 24 |
+ (netdev->dev_addr[2] & 0xff) << 16 |
+ (netdev->dev_addr[1] & 0xff) << 8 |
+ (netdev->dev_addr[0] & 0xff);
+ jwrite32(jme, JME_RXUMA_LO, val);
+ val = (netdev->dev_addr[5] & 0xff) << 8 |
+ (netdev->dev_addr[4] & 0xff);
+ jwrite32(jme, JME_RXUMA_HI, val);
+}
+
static int
jme_set_macaddr(struct net_device *netdev, void *p)
{
struct jme_adapter *jme = netdev_priv(netdev);
struct sockaddr *addr = p;
- u32 val;
if (netif_running(netdev))
return -EBUSY;
spin_lock_bh(&jme->macaddr_lock);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- val = (addr->sa_data[3] & 0xff) << 24 |
- (addr->sa_data[2] & 0xff) << 16 |
- (addr->sa_data[1] & 0xff) << 8 |
- (addr->sa_data[0] & 0xff);
- jwrite32(jme, JME_RXUMA_LO, val);
- val = (addr->sa_data[5] & 0xff) << 8 |
- (addr->sa_data[4] & 0xff);
- jwrite32(jme, JME_RXUMA_HI, val);
+ jme_set_unicastaddr(netdev);
spin_unlock_bh(&jme->macaddr_lock);
return 0;
p32 += 0x100 >> 2;
mdio_memcpy(jme, p32, JME_PHY_REG_NR);
+
+ p32 += 0x100 >> 2;
+ jme_phyext_memcpy(jme, p32, JME_PHY_SPEC_REG_NR);
}
static int