+static int
+jme_tx_tso(struct sk_buff *skb,
+ volatile __u16 *mss, __u8 *flags)
+{
+ if((*mss = (skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT))) {
+ *flags |= TXFLAG_LSEN;
+
+ if(skb->protocol == __constant_htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ }
+ else {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
+ &ip6h->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ }
+
+ return 0;
+ }
+
+ return 1;
+}
+
+static void
+jme_tx_csum(struct sk_buff *skb, __u8 *flags)
+{
+ if(skb->ip_summed == CHECKSUM_PARTIAL) {
+ __u8 ip_proto;
+
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ ip_proto = ip_hdr(skb)->protocol;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ ip_proto = 0;
+ break;
+ }
+
+ switch(ip_proto) {
+ case IPPROTO_TCP:
+ *flags |= TXFLAG_TCPCS;
+ break;
+ case IPPROTO_UDP:
+ *flags |= TXFLAG_UDPCS;
+ break;
+ default:
+ jeprintk("jme", "Error upper layer protocol.\n");
+ break;
+ }
+ }
+}
+
+__always_inline static void
+jme_tx_vlan(struct sk_buff *skb, volatile __u16 *vlan, __u8 *flags)
+{
+ if(vlan_tx_tag_present(skb)) {
+ vlan_dbg("jme", "Tag found!(%04x)\n", vlan_tx_tag_get(skb));
+ *flags |= TXFLAG_TAGON;
+ *vlan = vlan_tx_tag_get(skb);
+ }
+}
+
+static int
+jme_fill_first_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
+{
+ struct jme_ring *txring = jme->txring;
+ volatile struct txdesc *txdesc;
+ struct jme_buffer_info *txbi;
+ __u8 flags;
+
+ txdesc = (volatile struct txdesc*)txring->desc + idx;
+ txbi = txring->bufinf + idx;
+
+ txdesc->dw[0] = 0;
+ txdesc->dw[1] = 0;
+ txdesc->dw[2] = 0;
+ txdesc->dw[3] = 0;
+ txdesc->desc1.pktsize = cpu_to_le16(skb->len);
+ /*
+ * Set OWN bit at final.
+ * When kernel transmit faster than NIC.
+ * And NIC trying to send this descriptor before we tell
+ * it to start sending this TX queue.
+ * Other fields are already filled correctly.
+ */
+ wmb();
+ flags = TXFLAG_OWN | TXFLAG_INT;
+ //Set checksum flags while not tso
+ if(jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
+ jme_tx_csum(skb, &flags);
+ jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
+ txdesc->desc1.flags = flags;
+ /*
+ * Set tx buffer info after telling NIC to send
+ * For better tx_clean timing
+ */
+ wmb();
+ txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
+ txbi->skb = skb;
+ txbi->len = skb->len;
+ if(!(txbi->start_xmit = jiffies))
+ txbi->start_xmit = (0UL-1);
+
+ return 0;
+}
+
+static void
+jme_stop_queue_if_full(struct jme_adapter *jme)
+{
+ struct jme_ring *txring = jme->txring;
+ struct jme_buffer_info *txbi = txring->bufinf;
+
+ txbi += atomic_read(&txring->next_to_clean);
+
+ smp_wmb();
+ if(unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
+ netif_stop_queue(jme->dev);
+ queue_dbg(jme->dev->name, "TX Queue Paused.\n");
+ smp_wmb();
+ if (atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold)) {
+ netif_wake_queue(jme->dev);
+ queue_dbg(jme->dev->name, "TX Queue Fast Waked.\n");
+ }
+ }
+
+ if(unlikely( txbi->start_xmit &&
+ (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
+ txbi->skb)) {
+ netif_stop_queue(jme->dev);
+ }
+}
+