#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/irq.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
+#include <linux/net.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
static void
jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
{
- struct jme_ring *rxring = jme->rxring;
+ struct jme_ring *rxring = &(jme->rxring[0]);
register volatile struct rxdesc* rxdesc = rxring->desc;
struct jme_buffer_info *rxbi = rxring->bufinf;
rxdesc += i;
struct jme_adapter *jme = (struct jme_adapter*)arg;
struct net_device *netdev = jme->dev;
-
if(unlikely(!netif_carrier_ok(netdev) ||
(atomic_read(&jme->link_changing) != 1)
)) {
*/
jwrite32f(jme, JME_IENC, INTR_ENABLE);
- /*
- * Write 1 clear interrupt status
- */
- jwrite32f(jme, JME_IEVE, intrstat);
-
if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
tasklet_schedule(&jme->linkch_task);
goto out_reenable;
atomic_inc(&jme->rx_empty);
tasklet_schedule(&jme->rxempty_task);
}
-
- if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
+ else if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
tasklet_schedule(&jme->rxclean_task);
}
out_reenable:
+ /*
+ * Write 1 clear interrupt status
+ */
+ jwrite32f(jme, JME_IEVE, intrstat);
+
/*
* Re-enable interrupt
*/
return IRQ_HANDLED;
}
+static irqreturn_t
+jme_msix_misc(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct jme_adapter *jme = netdev_priv(netdev);
+ __u32 intrstat;
+
+ pci_dma_sync_single_for_cpu(jme->pdev,
+ jme->shadow_dma,
+ sizeof(__u32) * SHADOW_REG_NR,
+ PCI_DMA_FROMDEVICE);
+ intrstat = jme->shadow_regs[SHADOW_IEVE];
+ jme->shadow_regs[SHADOW_IEVE] &= ~INTR_EN_MISC;
+
+ /*
+ * Disable interrupt
+ */
+ jwrite32f(jme, JME_IENC, INTR_EN_MISC);
+
+ if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
+ tasklet_schedule(&jme->linkch_task);
+ goto out_reenable;
+ }
+
+ if(intrstat & INTR_TMINTR)
+ tasklet_schedule(&jme->pcc_task);
+
+out_reenable:
+ /*
+ * Write 1 clear interrupt status
+ */
+ jwrite32f(jme, JME_IEVE, INTR_EN_MISC);
+
+ /*
+ * Re-enable interrupt
+ */
+ jwrite32f(jme, JME_IENS, INTR_EN_MISC);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+jme_msix_tx(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ /*
+ * Disable interrupt
+ */
+ jwrite32f(jme, JME_IENC, INTR_EN_TX);
+
+ if(unlikely(atomic_read(&jme->link_changing) != 1))
+ goto out_reenable;
+
+ tasklet_schedule(&jme->txclean_task);
+
+out_reenable:
+ /*
+ * Write 1 clear interrupt status
+ */
+ jwrite32f(jme, JME_IEVE, INTR_EN_TX | INTR_TX0);
+
+ /*
+ * Re-enable interrupt
+ */
+ jwrite32f(jme, JME_IENS, INTR_EN_TX);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+jme_msix_rx(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct jme_adapter *jme = netdev_priv(netdev);
+ __u32 intrstat;
+
+ pci_dma_sync_single_for_cpu(jme->pdev,
+ jme->shadow_dma,
+ sizeof(__u32) * SHADOW_REG_NR,
+ PCI_DMA_FROMDEVICE);
+ intrstat = jme->shadow_regs[SHADOW_IEVE];
+ jme->shadow_regs[SHADOW_IEVE] &= ~INTR_EN_RX0;
+
+ /*
+ * Disable interrupt
+ */
+ jwrite32f(jme, JME_IENC, INTR_EN_RX0);
+
+ if(unlikely(atomic_read(&jme->link_changing) != 1))
+ goto out_reenable;
+
+ if(jme->flags & JME_FLAG_POLL) {
+ if(intrstat & INTR_RX0EMP)
+ atomic_inc(&jme->rx_empty);
+
+ if(likely(JME_RX_SCHEDULE_PREP(jme))) {
+ jme_polling_mode(jme);
+ JME_RX_SCHEDULE(jme);
+ }
+ }
+ else {
+ if(intrstat & INTR_RX0EMP) {
+ atomic_inc(&jme->rx_empty);
+ tasklet_schedule(&jme->rxempty_task);
+ }
+ else if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
+ tasklet_schedule(&jme->rxclean_task);
+ }
+
+out_reenable:
+ /*
+ * Write 1 clear interrupt status
+ */
+ jwrite32f(jme, JME_IEVE, INTR_EN_RX0 | INTR_RX0);
+
+ /*
+ * Re-enable interrupt
+ */
+ jwrite32f(jme, JME_IENS, INTR_EN_RX0);
+
+ return IRQ_HANDLED;
+}
static void
jme_reset_link(struct jme_adapter *jme)
spin_unlock_irqrestore(&jme->phy_lock, flags);
}
+static void
+jme_setup_msix_info(struct jme_adapter *jme, struct msix_entry *msix_ent)
+{
+ int i;
+
+ for (i = 0; i < JME_MSIX_VEC_NR; i++) {
+ jme->msix[i].requested = false;
+ jme->msix[i].vector = msix_ent[i].vector;
+ strcpy(jme->msix[i].name, jme->dev->name);
+ }
+
+ jme->msix[0].handler = jme_msix_misc;
+ jme->msix[1].handler = jme_msix_tx;
+ jme->msix[2].handler = jme_msix_rx;
+
+ strcat(jme->msix[0].name, "-misc");
+ strcat(jme->msix[1].name, "-tx");
+ strcat(jme->msix[2].name, "-rx");
+}
+
+static void
+jme_fill_msix_regs(struct jme_adapter *jme)
+{
+ __u32 mask = 1, reg_msix = 0;
+ int i, vec;
+
+ for(i = 0 ; i < 32 ; ++i) {
+ if(mask & INTR_EN_TX)
+ vec = 1;
+ else if(mask & INTR_EN_RX0)
+ vec = 2;
+ else
+ vec = 0;
+
+ if(!(i & 7))
+ reg_msix = 0;
+ reg_msix |= (vec & 7) << ((i & 7) << 2);
+ if((i & 7) == 7)
+ jwrite32(jme,
+ JME_MSIX_ENT + ((i >> 3) << 2),
+ reg_msix);
+
+ mask <<= 1;
+ }
+}
+
+static int
+jme_request_msix_irq(struct jme_adapter *jme)
+{
+ int i, rc;
+ struct jme_msix_info *msix_info;
+
+ for (i = 0; i < JME_MSIX_VEC_NR; i++) {
+ msix_info = jme->msix + i;
+ rc = request_irq(msix_info->vector,
+ msix_info->handler,
+ 0,
+ msix_info->name,
+ jme->dev);
+ if(rc)
+ break;
+#if 0
+#ifdef CONFIG_SMP
+ /*
+ * Try to set different cpumask for each irq,
+ * ignoring assign fail since it has no critical
+ * effect to the working function.
+ */
+ if(irq_can_set_affinity(msix_info->vector))
+ irq_set_affinity(msix_info->vector,
+ cpumask_of_cpu(i % num_online_cpus()));
+#endif
+#endif
+ msix_info->requested = true;
+ }
+
+ return rc;
+}
+
+static void
+jme_free_msix(struct jme_adapter *jme)
+{
+ int i;
+ struct jme_msix_info *msix_info;
+
+ for (i = 0; i < JME_MSIX_VEC_NR; i++) {
+ msix_info = jme->msix + i;
+ if(msix_info->requested)
+ free_irq(msix_info->vector, jme->dev);
+ else
+ break;
+ msix_info->requested = false;
+ }
+ pci_disable_msix(jme->pdev);
+}
+
+static int
+jme_request_msix(struct jme_adapter *jme)
+{
+ int i, rc;
+ struct msix_entry msix_ent[JME_MSIX_VEC_NR];
+
+ for (i = 0; i < JME_MSIX_VEC_NR; i++) {
+ msix_ent[i].entry = i;
+ msix_ent[i].vector = 0;
+ }
+
+ rc = pci_enable_msix(jme->pdev, msix_ent, JME_MSIX_VEC_NR);
+ if (rc)
+ goto out;
+
+ jme_setup_msix_info(jme, msix_ent);
+ jme_fill_msix_regs(jme);
+
+ rc = jme_request_msix_irq(jme);
+ if(rc)
+ goto out_free_msix;
+
+ return 0;
+
+out_free_msix:
+ jme_free_msix(jme);
+out:
+ return rc;
+}
+
static int
jme_request_irq(struct jme_adapter *jme)
{
irq_handler_t handler = jme_intr;
int irq_flags = IRQF_SHARED;
- if (!pci_enable_msi(jme->pdev)) {
+
+ if(!jme_request_msix(jme)) {
+ jme->flags |= JME_FLAG_MSIX;
+ return 0;
+ }
+
+ if(!pci_enable_msi(jme->pdev)) {
jme->flags |= JME_FLAG_MSI;
handler = jme_msi;
irq_flags = 0;
static void
jme_free_irq(struct jme_adapter *jme)
{
- free_irq(jme->pdev->irq, jme->dev);
- if (jme->flags & JME_FLAG_MSI) {
- pci_disable_msi(jme->pdev);
- jme->flags &= ~JME_FLAG_MSI;
- jme->dev->irq = jme->pdev->irq;
- }
+ if(jme->flags & JME_FLAG_MSIX) {
+ jme_free_msix(jme);
+ jme->flags &= ~JME_FLAG_MSIX;
+ }
+ else {
+ free_irq(jme->pdev->irq, jme->dev);
+ if (jme->flags & JME_FLAG_MSI) {
+ pci_disable_msi(jme->pdev);
+ jme->flags &= ~JME_FLAG_MSI;
+ jme->dev->irq = jme->pdev->irq;
+ }
+ }
}
static int
struct ethtool_eeprom *eeprom, u8 *data)
{
struct jme_adapter *jme = netdev_priv(netdev);
- int i, offset = eeprom->offset, len = eeprom->len;
+ int i, offset = eeprom->offset, len = eeprom->len, idx;
/*
* ethtool will check the boundary for us
*/
+ memset(data, 0xFF, len);
eeprom->magic = JME_EEPROM_MAGIC;
- for(i = 0 ; i < len ; ++i)
- data[i] = jme_smb_read(jme, i + offset);
+ for(i = 0 ; i < len ; ++i) {
+ idx = i + offset;
+ data[i] = jme_smb_read(jme, idx);
+ if(data[i] == 0xFF)
+ break;
+ if((idx > 1) && !((idx - 2) % 3) && (data[i] & 0x80))
+ len = (len > i + 3)?i + 3:len;
+ }
return 0;
}
#include <linux/version.h>
#define DRV_NAME "jme"
-#define DRV_VERSION "0.9d"
+#define DRV_VERSION "0.9d-msix"
#define PFX DRV_NAME ": "
#define JME_GE_DEVICE 0x250
#define jeprintk(devname, fmt, args...) \
printk(KERN_ERR "%s: " fmt, devname, ## args)
-#define DEFAULT_MSG_ENABLE \
- (NETIF_MSG_DRV | \
- NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | \
- NETIF_MSG_TIMER | \
- NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR)
-
#define PCI_CONF_DCSR_MRRS 0x59
#define PCI_CONF_DCSR_MRRS_MASK 0x70
enum pci_conf_dcsr_mrrs_vals {
#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9216
#define MIN_ETHERNET_PACKET_SIZE 60
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
+#define NET_STAT(priv) priv->stats
+#define NETDEV_GET_STATS(netdev, fun_ptr) \
+ netdev->get_stats = fun_ptr
+#define DECLARE_NET_DEVICE_STATS struct net_device_stats stats;
+#else
+#define NET_STAT(priv) priv->dev->stats
+#define NETDEV_GET_STATS(netdev, fun_ptr)
+#define DECLARE_NET_DEVICE_STATS
+#endif
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
+#define DECLARE_NAPI_STRUCT
+#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
+ dev->poll = pollfn; \
+ dev->weight = q;
+#define JME_NAPI_HOLDER(holder) struct net_device *holder
+#define JME_NAPI_WEIGHT(w) int *w
+#define JME_NAPI_WEIGHT_VAL(w) *w
+#define JME_NAPI_WEIGHT_SET(w, r) *w = r
+#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev)
+#define JME_NAPI_ENABLE(priv) netif_poll_enable(priv->dev);
+#define JME_NAPI_DISABLE(priv) netif_poll_disable(priv->dev);
+#define JME_RX_SCHEDULE_PREP(priv) \
+ netif_rx_schedule_prep(priv->dev)
+#define JME_RX_SCHEDULE(priv) \
+ __netif_rx_schedule(priv->dev);
+#else
+#define DECLARE_NAPI_STRUCT struct napi_struct napi;
+#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
+ netif_napi_add(dev, napis, pollfn, q);
+#define JME_NAPI_HOLDER(holder) struct napi_struct *holder
+#define JME_NAPI_WEIGHT(w) int w
+#define JME_NAPI_WEIGHT_VAL(w) w
+#define JME_NAPI_WEIGHT_SET(w, r)
+#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev, napis)
+#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi);
+#define JME_NAPI_DISABLE(priv) \
+ if(!napi_disable_pending(&priv->napi)) \
+ napi_disable(&priv->napi);
+#define JME_RX_SCHEDULE_PREP(priv) \
+ netif_rx_schedule_prep(priv->dev, &priv->napi)
+#define JME_RX_SCHEDULE(priv) \
+ __netif_rx_schedule(priv->dev, &priv->napi);
+#endif
+
+
enum dynamic_pcc_values {
PCC_OFF = 0,
PCC_P1 = 1,
atomic_t nr_free;
};
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
-#define NET_STAT(priv) priv->stats
-#define NETDEV_GET_STATS(netdev, fun_ptr) \
- netdev->get_stats = fun_ptr
-#define DECLARE_NET_DEVICE_STATS struct net_device_stats stats;
-#else
-#define NET_STAT(priv) priv->dev->stats
-#define NETDEV_GET_STATS(netdev, fun_ptr)
-#define DECLARE_NET_DEVICE_STATS
-#endif
-
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
-#define DECLARE_NAPI_STRUCT
-#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
- dev->poll = pollfn; \
- dev->weight = q;
-#define JME_NAPI_HOLDER(holder) struct net_device *holder
-#define JME_NAPI_WEIGHT(w) int *w
-#define JME_NAPI_WEIGHT_VAL(w) *w
-#define JME_NAPI_WEIGHT_SET(w, r) *w = r
-#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev)
-#define JME_NAPI_ENABLE(priv) netif_poll_enable(priv->dev);
-#define JME_NAPI_DISABLE(priv) netif_poll_disable(priv->dev);
-#define JME_RX_SCHEDULE_PREP(priv) \
- netif_rx_schedule_prep(priv->dev)
-#define JME_RX_SCHEDULE(priv) \
- __netif_rx_schedule(priv->dev);
-#else
-#define DECLARE_NAPI_STRUCT struct napi_struct napi;
-#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
- netif_napi_add(dev, napis, pollfn, q);
-#define JME_NAPI_HOLDER(holder) struct napi_struct *holder
-#define JME_NAPI_WEIGHT(w) int w
-#define JME_NAPI_WEIGHT_VAL(w) w
-#define JME_NAPI_WEIGHT_SET(w, r)
-#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev, napis)
-#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi);
-#define JME_NAPI_DISABLE(priv) \
- if(!napi_disable_pending(&priv->napi)) \
- napi_disable(&priv->napi);
-#define JME_RX_SCHEDULE_PREP(priv) \
- netif_rx_schedule_prep(priv->dev, &priv->napi)
-#define JME_RX_SCHEDULE(priv) \
- __netif_rx_schedule(priv->dev, &priv->napi);
-#endif
+#define JME_MSIX_VEC_NR 3
+struct jme_msix_info {
+ irq_handler_t handler;
+ __u16 vector;
+ __u8 requested;
+ char name[16];
+};
/*
* Jmac Adapter Private data
struct tasklet_struct txclean_task;
struct tasklet_struct linkch_task;
struct tasklet_struct pcc_task;
+ struct jme_msix_info msix[JME_MSIX_VEC_NR];
__u32 flags;
__u32 reg_txcs;
__u32 reg_txpfc;
};
enum jme_flags_bits {
JME_FLAG_MSI = 0x00000001,
- JME_FLAG_SSET = 0x00000002,
- JME_FLAG_TXCSUM = 0x00000004,
- JME_FLAG_TSO = 0x00000008,
- JME_FLAG_POLL = 0x00000010,
+ JME_FLAG_MSIX = 0x00000002,
+ JME_FLAG_SSET = 0x00000004,
+ JME_FLAG_TXCSUM = 0x00000008,
+ JME_FLAG_TSO = 0x00000010,
+ JME_FLAG_POLL = 0x00000020,
};
#define WAIT_TASKLET_TIMEOUT 500 /* 500 ms */
#define TX_TIMEOUT (5*HZ)
JME_TMCSR = JME_MISC| 0x00, /* Timer Control/Status Register */
JME_GPREG0 = JME_MISC| 0x08, /* General purpose REG-0 */
JME_GPREG1 = JME_MISC| 0x0C, /* General purpose REG-1 */
+ JME_MSIX_ENT = JME_MISC| 0x10, /* MSIX Entry table */
JME_IEVE = JME_MISC| 0x20, /* Interrupt Event Status */
JME_IREQ = JME_MISC| 0x24, /* Interrupt Req Status(For Debug) */
JME_IENS = JME_MISC| 0x28, /* Interrupt Enable - Setting Port */
/*
* Interrupt Status Bits
*/
-enum jme_interrupt_bits
-{
+enum jme_interrupt_bits {
INTR_SWINTR = 0x80000000,
INTR_TMINTR = 0x40000000,
INTR_LINKCH = 0x20000000,
INTR_TX1 = 0x00000002,
INTR_TX0 = 0x00000001,
};
-static const __u32 INTR_ENABLE = INTR_SWINTR |
- INTR_TMINTR |
- INTR_LINKCH |
- INTR_PCCRX0TO |
- INTR_PCCRX0 |
- INTR_PCCTXTO |
- INTR_PCCTX |
- INTR_RX0EMP;
+enum jme_interrupt_enables {
+ INTR_ENABLE = INTR_SWINTR |
+ INTR_TMINTR |
+ INTR_LINKCH |
+ INTR_PCCRX0TO |
+ INTR_PCCRX0 |
+ INTR_PCCTXTO |
+ INTR_PCCTX |
+ INTR_RX0EMP,
+
+ INTR_EN_TX = INTR_PCCTXTO |
+ INTR_PCCTX,
+
+ INTR_EN_RX0 = INTR_PCCRX0TO |
+ INTR_PCCRX0 |
+ INTR_RX0EMP,
+
+ INTR_EN_MISC = INTR_ENABLE & ~(INTR_EN_TX | INTR_EN_RX0),
+};
/*
* PCC Control Registers