#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/irq.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
+#include <linux/net.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
static void
jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
{
- struct jme_ring *rxring = jme->rxring;
+ struct jme_ring *rxring = &(jme->rxring[0]);
register volatile struct rxdesc* rxdesc = rxring->desc;
struct jme_buffer_info *rxbi = rxring->bufinf;
rxdesc += i;
struct jme_adapter *jme = (struct jme_adapter*)arg;
struct net_device *netdev = jme->dev;
-
if(unlikely(!netif_carrier_ok(netdev) ||
(atomic_read(&jme->link_changing) != 1)
)) {
*/
jwrite32f(jme, JME_IENC, INTR_ENABLE);
- /*
- * Write 1 clear interrupt status
- */
- jwrite32f(jme, JME_IEVE, intrstat);
-
if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
tasklet_schedule(&jme->linkch_task);
goto out_reenable;
atomic_inc(&jme->rx_empty);
tasklet_schedule(&jme->rxempty_task);
}
-
- if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
+ else if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
tasklet_schedule(&jme->rxclean_task);
}
out_reenable:
+ /*
+ * Write 1 clear interrupt status
+ */
+ jwrite32f(jme, JME_IEVE, intrstat);
+
/*
* Re-enable interrupt
*/
return IRQ_HANDLED;
}
+static irqreturn_t
+jme_msix_misc(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct jme_adapter *jme = netdev_priv(netdev);
+ __u32 intrstat;
+
+ pci_dma_sync_single_for_cpu(jme->pdev,
+ jme->shadow_dma,
+ sizeof(__u32) * SHADOW_REG_NR,
+ PCI_DMA_FROMDEVICE);
+ intrstat = jme->shadow_regs[SHADOW_IEVE];
+ jme->shadow_regs[SHADOW_IEVE] &= ~INTR_EN_MISC;
+
+ /*
+ * Disable interrupt
+ */
+ jwrite32f(jme, JME_IENC, INTR_EN_MISC);
+
+ if(intrstat & (INTR_LINKCH | INTR_SWINTR)) {
+ tasklet_schedule(&jme->linkch_task);
+ goto out_reenable;
+ }
+
+ if(intrstat & INTR_TMINTR)
+ tasklet_schedule(&jme->pcc_task);
+
+out_reenable:
+ /*
+ * Write 1 clear interrupt status
+ */
+ jwrite32f(jme, JME_IEVE, INTR_EN_MISC);
+
+ /*
+ * Re-enable interrupt
+ */
+ jwrite32f(jme, JME_IENS, INTR_EN_MISC);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+jme_msix_tx(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ /*
+ * Disable interrupt
+ */
+ jwrite32f(jme, JME_IENC, INTR_EN_TX);
+
+ if(unlikely(atomic_read(&jme->link_changing) != 1))
+ goto out_reenable;
+
+ tasklet_schedule(&jme->txclean_task);
+
+out_reenable:
+ /*
+ * Write 1 clear interrupt status
+ */
+ jwrite32f(jme, JME_IEVE, INTR_EN_TX | INTR_TX0);
+
+ /*
+ * Re-enable interrupt
+ */
+ jwrite32f(jme, JME_IENS, INTR_EN_TX);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+jme_msix_rx(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct jme_adapter *jme = netdev_priv(netdev);
+ __u32 intrstat;
+
+ pci_dma_sync_single_for_cpu(jme->pdev,
+ jme->shadow_dma,
+ sizeof(__u32) * SHADOW_REG_NR,
+ PCI_DMA_FROMDEVICE);
+ intrstat = jme->shadow_regs[SHADOW_IEVE];
+ jme->shadow_regs[SHADOW_IEVE] &= ~INTR_EN_RX0;
+
+ /*
+ * Disable interrupt
+ */
+ jwrite32f(jme, JME_IENC, INTR_EN_RX0);
+
+ if(unlikely(atomic_read(&jme->link_changing) != 1))
+ goto out_reenable;
+
+ if(jme->flags & JME_FLAG_POLL) {
+ if(intrstat & INTR_RX0EMP)
+ atomic_inc(&jme->rx_empty);
+
+ if(likely(JME_RX_SCHEDULE_PREP(jme))) {
+ jme_polling_mode(jme);
+ JME_RX_SCHEDULE(jme);
+ }
+ }
+ else {
+ if(intrstat & INTR_RX0EMP) {
+ atomic_inc(&jme->rx_empty);
+ tasklet_schedule(&jme->rxempty_task);
+ }
+ else if(intrstat & (INTR_PCCRX0TO | INTR_PCCRX0))
+ tasklet_schedule(&jme->rxclean_task);
+ }
+
+out_reenable:
+ /*
+ * Write 1 clear interrupt status
+ */
+ jwrite32f(jme, JME_IEVE, INTR_EN_RX0 | INTR_RX0);
+
+ /*
+ * Re-enable interrupt
+ */
+ jwrite32f(jme, JME_IENS, INTR_EN_RX0);
+
+ return IRQ_HANDLED;
+}
static void
jme_reset_link(struct jme_adapter *jme)
spin_unlock_irqrestore(&jme->phy_lock, flags);
}
+static void
+jme_setup_msix_info(struct jme_adapter *jme, struct msix_entry *msix_ent)
+{
+ int i;
+
+ for (i = 0; i < JME_MSIX_VEC_NR; i++) {
+ jme->msix[i].requested = false;
+ jme->msix[i].vector = msix_ent[i].vector;
+ strcpy(jme->msix[i].name, jme->dev->name);
+ }
+
+ jme->msix[0].handler = jme_msix_misc;
+ jme->msix[1].handler = jme_msix_tx;
+ jme->msix[2].handler = jme_msix_rx;
+
+ strcat(jme->msix[0].name, "-misc");
+ strcat(jme->msix[1].name, "-tx");
+ strcat(jme->msix[2].name, "-rx");
+}
+
+static void
+jme_fill_msix_regs(struct jme_adapter *jme)
+{
+ __u32 mask = 1, reg_msix = 0;
+ int i, vec;
+
+ for(i = 0 ; i < 32 ; ++i) {
+ if(mask & INTR_EN_TX)
+ vec = 1;
+ else if(mask & INTR_EN_RX0)
+ vec = 2;
+ else
+ vec = 0;
+
+ if(!(i & 7))
+ reg_msix = 0;
+ reg_msix |= (vec & 7) << ((i & 7) << 2);
+ if((i & 7) == 7)
+ jwrite32(jme,
+ JME_MSIX_ENT + ((i >> 3) << 2),
+ reg_msix);
+
+ mask <<= 1;
+ }
+}
+
+static int
+jme_request_msix_irq(struct jme_adapter *jme)
+{
+ int i, rc;
+ struct jme_msix_info *msix_info;
+
+ for (i = 0; i < JME_MSIX_VEC_NR; i++) {
+ msix_info = jme->msix + i;
+ rc = request_irq(msix_info->vector,
+ msix_info->handler,
+ 0,
+ msix_info->name,
+ jme->dev);
+ if(rc)
+ break;
+#if 0
+#ifdef CONFIG_SMP
+ /*
+ * Try to set different cpumask for each irq,
+ * ignoring assign fail since it has no critical
+ * effect to the working function.
+ */
+ if(irq_can_set_affinity(msix_info->vector))
+ irq_set_affinity(msix_info->vector,
+ cpumask_of_cpu(i % num_online_cpus()));
+#endif
+#endif
+ msix_info->requested = true;
+ }
+
+ return rc;
+}
+
+static void
+jme_free_msix(struct jme_adapter *jme)
+{
+ int i;
+ struct jme_msix_info *msix_info;
+
+ for (i = 0; i < JME_MSIX_VEC_NR; i++) {
+ msix_info = jme->msix + i;
+ if(msix_info->requested)
+ free_irq(msix_info->vector, jme->dev);
+ else
+ break;
+ msix_info->requested = false;
+ }
+ pci_disable_msix(jme->pdev);
+}
+
+static int
+jme_request_msix(struct jme_adapter *jme)
+{
+ int i, rc;
+ struct msix_entry msix_ent[JME_MSIX_VEC_NR];
+
+ for (i = 0; i < JME_MSIX_VEC_NR; i++) {
+ msix_ent[i].entry = i;
+ msix_ent[i].vector = 0;
+ }
+
+ rc = pci_enable_msix(jme->pdev, msix_ent, JME_MSIX_VEC_NR);
+ if (rc)
+ goto out;
+
+ jme_setup_msix_info(jme, msix_ent);
+ jme_fill_msix_regs(jme);
+
+ rc = jme_request_msix_irq(jme);
+ if(rc)
+ goto out_free_msix;
+
+ return 0;
+
+out_free_msix:
+ jme_free_msix(jme);
+out:
+ return rc;
+}
+
static int
jme_request_irq(struct jme_adapter *jme)
{
irq_handler_t handler = jme_intr;
int irq_flags = IRQF_SHARED;
- if (!pci_enable_msi(jme->pdev)) {
+
+ if(!jme_request_msix(jme)) {
+ jme->flags |= JME_FLAG_MSIX;
+ return 0;
+ }
+
+ if(!pci_enable_msi(jme->pdev)) {
jme->flags |= JME_FLAG_MSI;
handler = jme_msi;
irq_flags = 0;
static void
jme_free_irq(struct jme_adapter *jme)
{
- free_irq(jme->pdev->irq, jme->dev);
- if (jme->flags & JME_FLAG_MSI) {
- pci_disable_msi(jme->pdev);
- jme->flags &= ~JME_FLAG_MSI;
- jme->dev->irq = jme->pdev->irq;
- }
+ if(jme->flags & JME_FLAG_MSIX) {
+ jme_free_msix(jme);
+ jme->flags &= ~JME_FLAG_MSIX;
+ }
+ else {
+ free_irq(jme->pdev->irq, jme->dev);
+ if (jme->flags & JME_FLAG_MSI) {
+ pci_disable_msi(jme->pdev);
+ jme->flags &= ~JME_FLAG_MSI;
+ jme->dev->irq = jme->pdev->irq;
+ }
+ }
}
static int
struct ethtool_eeprom *eeprom, u8 *data)
{
struct jme_adapter *jme = netdev_priv(netdev);
- int i, offset = eeprom->offset, len = eeprom->len;
+ int i, offset = eeprom->offset, len = eeprom->len, idx;
/*
* ethtool will check the boundary for us
*/
+ memset(data, 0xFF, len);
eeprom->magic = JME_EEPROM_MAGIC;
- for(i = 0 ; i < len ; ++i)
- data[i] = jme_smb_read(jme, i + offset);
+ for(i = 0 ; i < len ; ++i) {
+ idx = i + offset;
+ data[i] = jme_smb_read(jme, idx);
+ if(data[i] == 0xFF)
+ break;
+ if((idx > 1) && !((idx - 2) % 3) && (data[i] & 0x80))
+ len = (len > i + 3)?i + 3:len;
+ }
return 0;
}