From 0ede469cae20a61d6fdf59e7b1b7e81a26354016 Mon Sep 17 00:00:00 2001 From: Guo-Fu Tseng Date: Tue, 3 Aug 2010 17:20:41 +0800 Subject: [PATCH] Import jme 1.0.5-backport source --- Makefile | 4 +- jme.c | 244 ++++++++++++++++++++++++++++++------------------------- jme.h | 50 ++---------- 3 files changed, 143 insertions(+), 155 deletions(-) diff --git a/Makefile b/Makefile index 7aa314f..3a626bc 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ MODNAME := jme -TEMPFILES := $(MODNAME).o $(MODNAME).mod.c $(MODNAME).mod.o Module.symvers .$(MODNAME).*.cmd .tmp_versions modules.order +TEMPFILES := $(MODNAME).o $(MODNAME).mod.c $(MODNAME).mod.o Module.symvers .$(MODNAME).*.cmd .tmp_versions modules.order Module.markers Modules.symvers EXTRA_CFLAGS += -Wall -O3 #EXTRA_CFLAGS += -DTX_DEBUG @@ -29,7 +29,7 @@ namespacecheck: @rm -rf $(TEMPFILES) patch: - @/usr/bin/diff -uarN -X dontdiff ../mod ./ > bc.patch || echo > /dev/null + @/usr/bin/diff -uar -X dontdiff ../../trunc ./ > bc.patch || echo > /dev/null clean: @rm -rf $(MODNAME).ko $(TEMPFILES) diff --git a/jme.c b/jme.c index 83eb75a..60c158e 100644 --- a/jme.c +++ b/jme.c @@ -322,20 +322,6 @@ jme_stop_irq(struct jme_adapter *jme) jwrite32f(jme, JME_IENC, INTR_ENABLE); } -static inline void -jme_enable_shadow(struct jme_adapter *jme) -{ - jwrite32(jme, - JME_SHBA_LO, - ((u32)jme->shadow_dma & ~((u32)0x1F)) | SHBA_POSTEN); -} - -static inline void -jme_disable_shadow(struct jme_adapter *jme) -{ - jwrite32(jme, JME_SHBA_LO, 0x0); -} - static u32 jme_linkstat_from_phy(struct jme_adapter *jme) { @@ -522,12 +508,8 @@ jme_setup_tx_resources(struct jme_adapter *jme) &(txring->dmaalloc), GFP_ATOMIC); - if (!txring->alloc) { - txring->desc = NULL; - txring->dmaalloc = 0; - txring->dma = 0; - return -ENOMEM; - } + if (!txring->alloc) + goto err_set_null; /* * 16 Bytes align @@ -539,6 +521,11 @@ jme_setup_tx_resources(struct jme_adapter *jme) atomic_set(&txring->next_to_clean, 0); atomic_set(&txring->nr_free, jme->tx_ring_size); + txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * + jme->tx_ring_size, GFP_ATOMIC); + if (unlikely(!(txring->bufinf))) + goto err_free_txring; + /* * Initialize Transmit Descriptors */ @@ -547,6 +534,20 @@ jme_setup_tx_resources(struct jme_adapter *jme) sizeof(struct jme_buffer_info) * jme->tx_ring_size); return 0; + +err_free_txring: + dma_free_coherent(&(jme->pdev->dev), + TX_RING_ALLOC_SIZE(jme->tx_ring_size), + txring->alloc, + txring->dmaalloc); + +err_set_null: + txring->desc = NULL; + txring->dmaalloc = 0; + txring->dma = 0; + txring->bufinf = NULL; + + return -ENOMEM; } static void @@ -554,19 +555,22 @@ jme_free_tx_resources(struct jme_adapter *jme) { int i; struct jme_ring *txring = &(jme->txring[0]); - struct jme_buffer_info *txbi = txring->bufinf; + struct jme_buffer_info *txbi; if (txring->alloc) { - for (i = 0 ; i < jme->tx_ring_size ; ++i) { - txbi = txring->bufinf + i; - if (txbi->skb) { - dev_kfree_skb(txbi->skb); - txbi->skb = NULL; + if (txring->bufinf) { + for (i = 0 ; i < jme->tx_ring_size ; ++i) { + txbi = txring->bufinf + i; + if (txbi->skb) { + dev_kfree_skb(txbi->skb); + txbi->skb = NULL; + } + txbi->mapping = 0; + txbi->len = 0; + txbi->nr_desc = 0; + txbi->start_xmit = 0; } - txbi->mapping = 0; - txbi->len = 0; - txbi->nr_desc = 0; - txbi->start_xmit = 0; + kfree(txring->bufinf); } dma_free_coherent(&(jme->pdev->dev), @@ -578,11 +582,11 @@ jme_free_tx_resources(struct jme_adapter *jme) txring->desc = NULL; txring->dmaalloc = 0; txring->dma = 0; + txring->bufinf = NULL; } txring->next_to_use = 0; atomic_set(&txring->next_to_clean, 0); atomic_set(&txring->nr_free, 0); - } static inline void @@ -653,7 +657,7 @@ jme_disable_tx_engine(struct jme_adapter *jme) static void jme_set_clean_rxdesc(struct jme_adapter *jme, int i) { - struct jme_ring *rxring = jme->rxring; + struct jme_ring *rxring = &(jme->rxring[0]); register struct rxdesc *rxdesc = rxring->desc; struct jme_buffer_info *rxbi = rxring->bufinf; rxdesc += i; @@ -723,8 +727,11 @@ jme_free_rx_resources(struct jme_adapter *jme) struct jme_ring *rxring = &(jme->rxring[0]); if (rxring->alloc) { - for (i = 0 ; i < jme->rx_ring_size ; ++i) - jme_free_rx_buf(jme, i); + if (rxring->bufinf) { + for (i = 0 ; i < jme->rx_ring_size ; ++i) + jme_free_rx_buf(jme, i); + kfree(rxring->bufinf); + } dma_free_coherent(&(jme->pdev->dev), RX_RING_ALLOC_SIZE(jme->rx_ring_size), @@ -734,6 +741,7 @@ jme_free_rx_resources(struct jme_adapter *jme) rxring->desc = NULL; rxring->dmaalloc = 0; rxring->dma = 0; + rxring->bufinf = NULL; } rxring->next_to_use = 0; atomic_set(&rxring->next_to_clean, 0); @@ -749,12 +757,8 @@ jme_setup_rx_resources(struct jme_adapter *jme) RX_RING_ALLOC_SIZE(jme->rx_ring_size), &(rxring->dmaalloc), GFP_ATOMIC); - if (!rxring->alloc) { - rxring->desc = NULL; - rxring->dmaalloc = 0; - rxring->dma = 0; - return -ENOMEM; - } + if (!rxring->alloc) + goto err_set_null; /* * 16 Bytes align @@ -765,9 +769,16 @@ jme_setup_rx_resources(struct jme_adapter *jme) rxring->next_to_use = 0; atomic_set(&rxring->next_to_clean, 0); + rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * + jme->rx_ring_size, GFP_ATOMIC); + if (unlikely(!(rxring->bufinf))) + goto err_free_rxring; + /* * Initiallize Receive Descriptors */ + memset(rxring->bufinf, 0, + sizeof(struct jme_buffer_info) * jme->rx_ring_size); for (i = 0 ; i < jme->rx_ring_size ; ++i) { if (unlikely(jme_make_new_rx_buf(jme, i))) { jme_free_rx_resources(jme); @@ -778,6 +789,19 @@ jme_setup_rx_resources(struct jme_adapter *jme) } return 0; + +err_free_rxring: + dma_free_coherent(&(jme->pdev->dev), + RX_RING_ALLOC_SIZE(jme->rx_ring_size), + rxring->alloc, + rxring->dmaalloc); +err_set_null: + rxring->desc = NULL; + rxring->dmaalloc = 0; + rxring->dma = 0; + rxring->bufinf = NULL; + + return -ENOMEM; } static inline void @@ -793,9 +817,9 @@ jme_enable_rx_engine(struct jme_adapter *jme) /* * Setup RX DMA Bass Address */ - jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL); + jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32); - jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL); + jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); /* * Setup RX Descriptor Count @@ -859,27 +883,27 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags) if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) return false; - if (unlikely(!(flags & RXWBFLAG_MF) && - (flags & RXWBFLAG_TCPON) && !(flags & RXWBFLAG_TCPCS))) { - msg_rx_err(jme, "TCP Checksum error.\n"); - goto out_sumerr; + if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS)) + == RXWBFLAG_TCPON)) { + if (flags & RXWBFLAG_IPV4) + msg_rx_err(jme, "TCP Checksum error\n"); + return false; } - if (unlikely(!(flags & RXWBFLAG_MF) && - (flags & RXWBFLAG_UDPON) && !(flags & RXWBFLAG_UDPCS))) { - msg_rx_err(jme, "UDP Checksum error.\n"); - goto out_sumerr; + if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) + == RXWBFLAG_UDPON)) { + if (flags & RXWBFLAG_IPV4) + msg_rx_err(jme, "UDP Checksum error.\n"); + return false; } - if (unlikely((flags & RXWBFLAG_IPV4) && !(flags & RXWBFLAG_IPCS))) { + if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) + == RXWBFLAG_IPV4)) { msg_rx_err(jme, "IPv4 Checksum error.\n"); - goto out_sumerr; + return false; } return true; - -out_sumerr: - return false; } static void @@ -959,13 +983,14 @@ jme_process_receive(struct jme_adapter *jme, int limit) goto out_inc; i = atomic_read(&rxring->next_to_clean); - while (limit-- > 0) { + while (limit > 0) { rxdesc = rxring->desc; rxdesc += i; if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) goto out; + --limit; desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; @@ -1299,7 +1324,7 @@ jme_rx_empty_tasklet(unsigned long arg) static void jme_wake_queue_if_stopped(struct jme_adapter *jme) { - struct jme_ring *txring = jme->txring; + struct jme_ring *txring = &(jme->txring[0]); smp_wmb(); if (unlikely(netif_queue_stopped(jme->dev) && @@ -1496,12 +1521,7 @@ jme_msi(int irq, void *dev_id) struct jme_adapter *jme = netdev_priv(netdev); u32 intrstat; - pci_dma_sync_single_for_cpu(jme->pdev, - jme->shadow_dma, - sizeof(u32) * SHADOW_REG_NR, - PCI_DMA_FROMDEVICE); - intrstat = jme->shadow_regs[SHADOW_IEVE]; - jme->shadow_regs[SHADOW_IEVE] = 0; + intrstat = jread32(jme, JME_IEVE); jme_intr_msi(jme, intrstat); @@ -1584,6 +1604,7 @@ jme_open(struct net_device *netdev) jme_clear_pm(jme); JME_NAPI_ENABLE(jme); + tasklet_enable(&jme->linkch_task); tasklet_enable(&jme->txclean_task); tasklet_hi_enable(&jme->rxclean_task); tasklet_hi_enable(&jme->rxempty_task); @@ -1592,7 +1613,6 @@ jme_open(struct net_device *netdev) if (rc) goto err_out; - jme_enable_shadow(jme); jme_start_irq(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) @@ -1660,15 +1680,14 @@ jme_close(struct net_device *netdev) netif_carrier_off(netdev); jme_stop_irq(jme); - jme_disable_shadow(jme); jme_free_irq(jme); JME_NAPI_DISABLE(jme); - tasklet_kill(&jme->linkch_task); - tasklet_kill(&jme->txclean_task); - tasklet_kill(&jme->rxclean_task); - tasklet_kill(&jme->rxempty_task); + tasklet_disable(&jme->linkch_task); + tasklet_disable(&jme->txclean_task); + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); jme_reset_ghc_speed(jme); jme_disable_rx_engine(jme); @@ -1686,7 +1705,7 @@ static int jme_alloc_txdesc(struct jme_adapter *jme, struct sk_buff *skb) { - struct jme_ring *txring = jme->txring; + struct jme_ring *txring = &(jme->txring[0]); int idx, nr_alloc, mask = jme->tx_ring_mask; idx = txring->next_to_use; @@ -1740,7 +1759,7 @@ jme_fill_tx_map(struct pci_dev *pdev, static void jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) { - struct jme_ring *txring = jme->txring; + struct jme_ring *txring = &(jme->txring[0]); struct txdesc *txdesc = txring->desc, *ctxdesc; struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; u8 hidma = jme->dev->features & NETIF_F_HIGHDMA; @@ -1770,7 +1789,7 @@ static int jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb) { if (unlikely( -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,16) +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17) skb_shinfo(skb)->tso_size #else skb_shinfo(skb)->gso_size @@ -1787,7 +1806,7 @@ jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb) static int jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) { -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,16) +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17) *mss = cpu_to_le16(skb_shinfo(skb)->tso_size << TXDESC_MSS_SHIFT); #else *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); @@ -1876,7 +1895,7 @@ jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) static int jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) { - struct jme_ring *txring = jme->txring; + struct jme_ring *txring = &(jme->txring[0]); struct txdesc *txdesc; struct jme_buffer_info *txbi; u8 flags; @@ -1924,7 +1943,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) static void jme_stop_queue_if_full(struct jme_adapter *jme) { - struct jme_ring *txring = jme->txring; + struct jme_ring *txring = &(jme->txring[0]); struct jme_buffer_info *txbi = txring->bufinf; int idx = atomic_read(&txring->next_to_clean); @@ -1980,7 +1999,9 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) TXCS_SELECT_QUEUE0 | TXCS_QUEUE0S | TXCS_ENABLE); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,29) netdev->trans_start = jiffies; +#endif tx_dbg(jme, "xmit: %d+%d@%lu\n", idx, skb_shinfo(skb)->nr_frags + 2, @@ -2651,17 +2672,40 @@ static int jme_pci_dma64(struct pci_dev *pdev) { if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && - !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) +#else + !pci_set_dma_mask(pdev, DMA_64BIT_MASK) +#endif + ) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) +#else if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) +#endif return 1; if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && - !pci_set_dma_mask(pdev, DMA_40BIT_MASK)) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) + !pci_set_dma_mask(pdev, DMA_BIT_MASK(40)) +#else + !pci_set_dma_mask(pdev, DMA_40BIT_MASK) +#endif + ) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) +#else if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK)) +#endif return 1; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) +#else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) if (!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) +#endif return 0; return -1; @@ -2789,16 +2833,10 @@ jme_init_one(struct pci_dev *pdev, jme->jme_vlan_rx = vlan_hwaccel_rx; jme->old_mtu = netdev->mtu = 1500; jme->phylink = 0; -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21) - jme->tx_ring_size = 1 << 9; - jme->tx_wake_threshold = 1 << 8; - jme->rx_ring_size = 1 << 8; -#else jme->tx_ring_size = 1 << 10; + jme->tx_ring_mask = jme->tx_ring_size - 1; jme->tx_wake_threshold = 1 << 9; jme->rx_ring_size = 1 << 9; -#endif - jme->tx_ring_mask = jme->tx_ring_size - 1; jme->rx_ring_mask = jme->rx_ring_size - 1; jme->msg_enable = JME_DEF_MSG_ENABLE; jme->regs = ioremap(pci_resource_start(pdev, 0), @@ -2808,14 +2846,6 @@ jme_init_one(struct pci_dev *pdev, rc = -ENOMEM; goto err_out_free_netdev; } - jme->shadow_regs = pci_alloc_consistent(pdev, - sizeof(u32) * SHADOW_REG_NR, - &(jme->shadow_dma)); - if (!(jme->shadow_regs)) { - jeprintk(pdev, "Allocating shadow register mapping error.\n"); - rc = -ENOMEM; - goto err_out_unmap; - } if (no_pseudohp) { apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN; @@ -2851,6 +2881,7 @@ jme_init_one(struct pci_dev *pdev, tasklet_init(&jme->rxempty_task, &jme_rx_empty_tasklet, (unsigned long) jme); + tasklet_disable_nosync(&jme->linkch_task); tasklet_disable_nosync(&jme->txclean_task); tasklet_disable_nosync(&jme->rxclean_task); tasklet_disable_nosync(&jme->rxempty_task); @@ -2900,7 +2931,7 @@ jme_init_one(struct pci_dev *pdev, if (!jme->mii_if.phy_id) { rc = -EIO; jeprintk(pdev, "Can not find phy_id.\n"); - goto err_out_free_shadow; + goto err_out_unmap; } jme->reg_ghc |= GHC_LINK_POLL; @@ -2929,7 +2960,7 @@ jme_init_one(struct pci_dev *pdev, if (rc) { jeprintk(pdev, "Reload eeprom for reading MAC Address error.\n"); - goto err_out_free_shadow; + goto err_out_unmap; } jme_load_macaddr(netdev); @@ -2945,25 +2976,27 @@ jme_init_one(struct pci_dev *pdev, rc = register_netdev(netdev); if (rc) { jeprintk(pdev, "Cannot register net device.\n"); - goto err_out_free_shadow; + goto err_out_unmap; } - msg_probe(jme, "%s%s ver:%x rev:%x macaddr:%pM\n", + msg_probe(jme, "%s%s ver:%x rev:%x " + "macaddr: %02x:%02x:%02x:%02x:%02x:%02x\n", (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? "JMC250 Gigabit Ethernet" : (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? "JMC260 Fast Ethernet" : "Unknown", (jme->fpgaver != 0) ? " (FPGA)" : "", (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, - jme->rev, netdev->dev_addr); + jme->rev, + netdev->dev_addr[0], + netdev->dev_addr[1], + netdev->dev_addr[2], + netdev->dev_addr[3], + netdev->dev_addr[4], + netdev->dev_addr[5]); return 0; -err_out_free_shadow: - pci_free_consistent(pdev, - sizeof(u32) * SHADOW_REG_NR, - jme->shadow_regs, - jme->shadow_dma); err_out_unmap: iounmap(jme->regs); err_out_free_netdev: @@ -2984,10 +3017,6 @@ jme_remove_one(struct pci_dev *pdev) struct jme_adapter *jme = netdev_priv(netdev); unregister_netdev(netdev); - pci_free_consistent(pdev, - sizeof(u32) * SHADOW_REG_NR, - jme->shadow_regs, - jme->shadow_dma); iounmap(jme->regs); pci_set_drvdata(pdev, NULL); free_netdev(netdev); @@ -3013,8 +3042,6 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state) tasklet_disable(&jme->rxclean_task); tasklet_disable(&jme->rxempty_task); - jme_disable_shadow(jme); - if (netif_carrier_ok(netdev)) { if (test_bit(JME_FLAG_POLL, &jme->flags)) jme_polling_mode(jme); @@ -3066,7 +3093,6 @@ jme_resume(struct pci_dev *pdev) else jme_reset_phy_processor(jme); - jme_enable_shadow(jme); jme_start_irq(jme); netif_device_attach(netdev); diff --git a/jme.h b/jme.h index 7eb2d97..c7a4b19 100644 --- a/jme.h +++ b/jme.h @@ -25,7 +25,7 @@ #define __JME_H_INCLUDED__ #define DRV_NAME "jme" -#define DRV_VERSION "1.0.4" +#define DRV_VERSION "1.0.5" #define PFX DRV_NAME ": " #define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 @@ -247,7 +247,7 @@ enum jme_txdesc_flags_bits { }; #define TXDESC_MSS_SHIFT 2 -enum jme_rxdescwb_flags_bits { +enum jme_txwbdesc_flags_bits { TXWBFLAG_OWN = 0x80, TXWBFLAG_INT = 0x40, TXWBFLAG_TMOUT = 0x20, @@ -372,13 +372,6 @@ struct jme_buffer_info { /* * The structure holding buffer information and ring descriptors all together. */ -#include -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21) -#define MAX_RING_DESC_NR 512 -#else -#define MAX_RING_DESC_NR 1024 -#endif - struct jme_ring { void *alloc; /* pointer to allocated memory */ void *desc; /* pointer to ring memory */ @@ -386,13 +379,14 @@ struct jme_ring { dma_addr_t dma; /* phys address for ring dma */ /* Buffer information corresponding to each descriptor */ - struct jme_buffer_info bufinf[MAX_RING_DESC_NR]; + struct jme_buffer_info *bufinf; int next_to_use; atomic_t next_to_clean; atomic_t nr_free; }; +#include #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) #define false 0 #define true 0 @@ -426,7 +420,7 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) return skb->h.th; } #else -#define NET_STAT(priv) priv->dev->stats +#define NET_STAT(priv) (priv->dev->stats) #define NETDEV_GET_STATS(netdev, fun_ptr) #define DECLARE_NET_DEVICE_STATS #endif @@ -448,7 +442,7 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) netif_rx_schedule_prep(priv->dev) #define JME_RX_SCHEDULE(priv) \ __netif_rx_schedule(priv->dev); -#elif LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,28) +#else #define DECLARE_NAPI_STRUCT struct napi_struct napi; #define NETIF_NAPI_SET(dev, napis, pollfn, q) \ netif_napi_add(dev, napis, pollfn, q); @@ -466,36 +460,15 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) napi_schedule_prep(&priv->napi) #define JME_RX_SCHEDULE(priv) \ __napi_schedule(&priv->napi); -#else -#define DECLARE_NAPI_STRUCT struct napi_struct napi; -#define NETIF_NAPI_SET(dev, napis, pollfn, q) \ - netif_napi_add(dev, napis, pollfn, q); -#define JME_NAPI_HOLDER(holder) struct napi_struct *holder -#define JME_NAPI_WEIGHT(w) int w -#define JME_NAPI_WEIGHT_VAL(w) w -#define JME_NAPI_WEIGHT_SET(w, r) -#define DECLARE_NETDEV struct net_device *netdev = jme->dev; -#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev, napis) -#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi); -#define JME_NAPI_DISABLE(priv) \ - if (!napi_disable_pending(&priv->napi)) \ - napi_disable(&priv->napi); -#define JME_RX_SCHEDULE_PREP(priv) \ - netif_rx_schedule_prep(priv->dev, &priv->napi) -#define JME_RX_SCHEDULE(priv) \ - __netif_rx_schedule(priv->dev, &priv->napi); #endif /* * Jmac Adapter Private data */ -#define SHADOW_REG_NR 8 struct jme_adapter { struct pci_dev *pdev; struct net_device *dev; void __iomem *regs; - dma_addr_t shadow_dma; - u32 *shadow_regs; struct mii_if_info mii_if; struct jme_ring rxring[RX_RING_NR]; struct jme_ring txring[TX_RING_NR]; @@ -551,10 +524,6 @@ jme_get_stats(struct net_device *netdev) } #endif -enum shadow_reg_val { - SHADOW_IEVE = 0, -}; - enum jme_flags_bits { JME_FLAG_MSI = 1, JME_FLAG_SSET = 2, @@ -1200,13 +1169,6 @@ enum jme_chipmode_shifts { CM_CHIPREV_SHIFT = 8, }; -/* - * Shadow base address register bits - */ -enum jme_shadow_base_address_bits { - SHBA_POSTEN = 0x1, -}; - /* * Aggressive Power Mode Control */ -- 2.39.3