-
-static int
-jme_alloc_txdesc(struct jme_adapter *jme,
- int nr_alloc)
-{
- struct jme_ring *txring = jme->txring;
- int idx;
-
- idx = txring->next_to_use;
-
- if(unlikely(atomic_read(&txring->nr_free) < nr_alloc))
- return -1;
-
- atomic_sub(nr_alloc, &txring->nr_free);
-
- if((txring->next_to_use += nr_alloc) >= RING_DESC_NR)
- txring->next_to_use -= RING_DESC_NR;
-
- return idx;
-}
-
-static void
-jme_tx_csum(struct sk_buff *skb, unsigned mtu, __u8 *flags)
-{
- if(skb->ip_summed == CHECKSUM_PARTIAL) {
- __u8 ip_proto;
-
- switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
- ip_proto = ip_hdr(skb)->protocol;
- break;
- case __constant_htons(ETH_P_IPV6):
- ip_proto = ipv6_hdr(skb)->nexthdr;
- break;
- default:
- ip_proto = 0;
- break;
- }
-
-
- switch(ip_proto) {
- case IPPROTO_TCP:
- *flags |= TXFLAG_TCPCS;
- break;
- case IPPROTO_UDP:
- *flags |= TXFLAG_UDPCS;
- break;
- default:
- jeprintk("jme", "Error upper layer protocol.\n");
- break;
- }
- }
-}
-
-__always_inline static void
-jme_tx_vlan(struct sk_buff *skb, volatile __u16 *vlan, __u8 *flags)
-{
- if(vlan_tx_tag_present(skb)) {
- *flags |= TXFLAG_TAGON;
- *vlan = vlan_tx_tag_get(skb);
- }
-}
-
-static int
-jme_set_new_txdesc(struct jme_adapter *jme,
- struct sk_buff *skb)
-{
- struct jme_ring *txring = jme->txring;
- volatile struct txdesc *txdesc = txring->desc, *ctxdesc;
- struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
- dma_addr_t dmaaddr;
- int i, idx, nr_desc;
- __u8 flags;
-
- nr_desc = 2;
- idx = jme_alloc_txdesc(jme, nr_desc);
-
- if(unlikely(idx<0))
- return NETDEV_TX_BUSY;
-
- for(i = 1 ; i < nr_desc ; ++i) {
- ctxdesc = txdesc + ((idx + i) & (RING_DESC_NR-1));
- ctxbi = txbi + ((idx + i) & (RING_DESC_NR-1));
-
- dmaaddr = pci_map_single(jme->pdev,
- skb->data,
- skb->len,
- PCI_DMA_TODEVICE);
-
- pci_dma_sync_single_for_device(jme->pdev,
- dmaaddr,
- skb->len,
- PCI_DMA_TODEVICE);
-
- ctxdesc->dw[0] = 0;
- ctxdesc->dw[1] = 0;
- ctxdesc->desc2.flags = TXFLAG_OWN;
- if(jme->dev->features & NETIF_F_HIGHDMA)
- ctxdesc->desc2.flags |= TXFLAG_64BIT;
- ctxdesc->desc2.datalen = cpu_to_le16(skb->len);
- ctxdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
- ctxdesc->desc2.bufaddrl = cpu_to_le32(
- (__u64)dmaaddr & 0xFFFFFFFFUL);
-
- ctxbi->mapping = dmaaddr;
- ctxbi->len = skb->len;
- }
-
- ctxdesc = txdesc + idx;
- ctxbi = txbi + idx;
-
- ctxdesc->dw[0] = 0;
- ctxdesc->dw[1] = 0;
- ctxdesc->dw[2] = 0;
- ctxdesc->dw[3] = 0;
- ctxdesc->desc1.pktsize = cpu_to_le16(skb->len);
- /*
- * Set OWN bit at final.
- * When kernel transmit faster than NIC.
- * And NIC trying to send this descriptor before we tell
- * it to start sending this TX queue.
- * Other fields are already filled correctly.
- */
- wmb();
- flags = TXFLAG_OWN | TXFLAG_INT;
- jme_tx_csum(skb, jme->dev->mtu, &flags);
- jme_tx_vlan(skb, &(ctxdesc->desc1.vlan), &flags);
- ctxdesc->desc1.flags = flags;
- /*
- * Set tx buffer info after telling NIC to send
- * For better tx_clean timing
- */
- wmb();
- ctxbi->nr_desc = nr_desc;
- ctxbi->skb = skb;
-
- tx_dbg(jme->dev->name, "Xmit: %d+%d\n", idx, nr_desc);
-
- return 0;
-}
-
-