]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
authorDavid S. Miller <davem@davemloft.net>
Wed, 1 Sep 2010 19:01:05 +0000 (12:01 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 1 Sep 2010 19:01:05 +0000 (12:01 -0700)
85 files changed:
drivers/isdn/hardware/avm/Kconfig
drivers/isdn/hardware/avm/c4.c
drivers/isdn/hardware/avm/t1pci.c
drivers/isdn/hardware/mISDN/mISDNinfineon.c
drivers/net/3c59x.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_main.c
drivers/net/caif/Kconfig
drivers/net/caif/caif_spi_slave.c
drivers/net/e1000e/82571.c
drivers/net/e1000e/defines.h
drivers/net/e1000e/lib.c
drivers/net/ehea/ehea.h
drivers/net/ehea/ehea_main.c
drivers/net/ibmveth.c
drivers/net/ll_temac_main.c
drivers/net/netxen/netxen_nic.h
drivers/net/netxen/netxen_nic_init.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/pcmcia/pcnet_cs.c
drivers/net/phy/Kconfig
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/pxa168_eth.c [new file with mode: 0644]
drivers/net/qlcnic/qlcnic_main.c
drivers/net/qlge/qlge_main.c
drivers/net/sh_eth.c
drivers/net/usb/usbnet.c
drivers/net/wan/farsync.c
drivers/s390/net/claw.c
drivers/s390/net/claw.h
drivers/s390/net/ctcm_fsms.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/ctcm_main.h
drivers/s390/net/ctcm_mpc.c
drivers/s390/net/ctcm_sysfs.c
include/linux/etherdevice.h
include/linux/if_ether.h
include/linux/if_fddi.h
include/linux/if_hippi.h
include/linux/if_pppox.h
include/linux/ipv6.h
include/linux/nbd.h
include/linux/ncp.h
include/linux/netfilter/xt_IDLETIMER.h
include/linux/netfilter/xt_ipvs.h
include/linux/netpoll.h
include/linux/phonet.h
include/linux/pxa168_eth.h [new file with mode: 0644]
include/linux/rfkill.h
include/net/sock.h
include/net/tcp.h
net/8021q/vlan_dev.c
net/ax25/ax25_ds_timer.c
net/bridge/br_netfilter.c
net/caif/cfpkt_skbuff.c
net/caif/cfrfml.c
net/can/bcm.c
net/core/dev.c
net/dsa/Kconfig
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_timer.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/route.c
net/irda/af_irda.c
net/irda/irlan/irlan_eth.c
net/l2tp/l2tp_eth.c
net/netlink/af_netlink.c
net/rds/recv.c
net/sched/act_gact.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/sch_api.c
net/sched/sch_atm.c
net/sched/sch_sfq.c
net/sched/sch_tbf.c
net/sched/sch_teql.c
net/xfrm/xfrm_user.c

index 5dbcbe3a54a610d81d61b6e0b4d227af7751636d..b99b906ea9b1bfef709bec43e61237a74b177949 100644 (file)
@@ -36,12 +36,13 @@ config ISDN_DRV_AVMB1_T1ISA
 
 config ISDN_DRV_AVMB1_B1PCMCIA
        tristate "AVM B1/M1/M2 PCMCIA support"
+       depends on PCMCIA
        help
          Enable support for the PCMCIA version of the AVM B1 card.
 
 config ISDN_DRV_AVMB1_AVM_CS
        tristate "AVM B1/M1/M2 PCMCIA cs module"
-       depends on ISDN_DRV_AVMB1_B1PCMCIA && PCMCIA
+       depends on ISDN_DRV_AVMB1_B1PCMCIA
        help
          Enable the PCMCIA client driver for the AVM B1/M1/M2
          PCMCIA cards.
index 7715d3242ec81849dbd725cbbaaf52a7cd597380..d3530f6e8115350668b148377d61a85256883a0a 100644 (file)
@@ -1273,6 +1273,7 @@ static int __devinit c4_probe(struct pci_dev *dev,
        if (retval != 0) {
                printk(KERN_ERR "c4: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n",
                       nr, param.port, param.irq, param.membase);
+               pci_disable_device(dev);
                return -ENODEV;
        }
        return 0;
index 5a3f830980185c45614fb4fbf59f51924032223c..a79eb5afb92dda279d7634b51bd20a0e047ace40 100644 (file)
@@ -210,6 +210,7 @@ static int __devinit t1pci_probe(struct pci_dev *dev,
        if (retval != 0) {
                printk(KERN_ERR "t1pci: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n",
                       param.port, param.irq, param.membase);
+               pci_disable_device(dev);
                return -ENODEV;
        }
        return 0;
index d2dd61d65d519687fbb20493627b8d62045e37d2..af25e1f3efd4a784daa5aa78128e908e07cef648 100644 (file)
@@ -1094,6 +1094,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                pr_info("mISDN: do not have informations about adapter at %s\n",
                        pci_name(pdev));
                kfree(card);
+               pci_disable_device(pdev);
                return -EINVAL;
        } else
                pr_notice("mISDN: found adapter %s at %s\n",
@@ -1103,7 +1104,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_drvdata(pdev, card);
        err = setup_instance(card);
        if (err) {
-               pci_disable_device(card->pdev);
+               pci_disable_device(pdev);
                kfree(card);
                pci_set_drvdata(pdev, NULL);
        } else if (ent->driver_data == INF_SCT_1) {
@@ -1114,6 +1115,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        sc = kzalloc(sizeof(struct inf_hw), GFP_KERNEL);
                        if (!sc) {
                                release_card(card);
+                               pci_disable_device(pdev);
                                return -ENOMEM;
                        }
                        sc->irq = card->irq;
@@ -1121,6 +1123,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        sc->ci = card->ci + i;
                        err = setup_instance(sc);
                        if (err) {
+                               pci_disable_device(pdev);
                                kfree(sc);
                                release_card(card);
                                break;
index c754d88e5ec92d0af82d80094a4576256a50dc61..c685a55fc2f415a7cf376344753366098603c78b 100644 (file)
@@ -633,7 +633,8 @@ struct vortex_private {
                open:1,
                medialock:1,
                must_free_region:1,                             /* Flag: if zero, Cardbus owns the I/O region */
-               large_frames:1;                 /* accept large frames */
+               large_frames:1,                 /* accept large frames */
+               handling_irq:1;                 /* private in_irq indicator */
        int drv_flags;
        u16 status_enable;
        u16 intr_enable;
@@ -2133,6 +2134,15 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
                           dev->name, vp->cur_tx);
        }
 
+       /*
+        * We can't allow a recursion from our interrupt handler back into the
+        * tx routine, as they take the same spin lock, and that causes
+        * deadlock.  Just return NETDEV_TX_BUSY and let the stack try again in
+        * a bit
+        */
+       if (vp->handling_irq)
+               return NETDEV_TX_BUSY;
+
        if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
                if (vortex_debug > 0)
                        pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
@@ -2335,11 +2345,13 @@ boomerang_interrupt(int irq, void *dev_id)
 
        ioaddr = vp->ioaddr;
 
+
        /*
         * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
         * and boomerang_start_xmit
         */
        spin_lock(&vp->lock);
+       vp->handling_irq = 1;
 
        status = ioread16(ioaddr + EL3_STATUS);
 
@@ -2447,6 +2459,7 @@ boomerang_interrupt(int irq, void *dev_id)
                pr_debug("%s: exiting interrupt, status %4.4x.\n",
                           dev->name, status);
 handler_exit:
+       vp->handling_irq = 0;
        spin_unlock(&vp->lock);
        return IRQ_HANDLED;
 }
index ebe68395ecf8c98cd292052d6e78cf5ad9f4ff31..fe581566cb266231cd051ee1593ae35d82f49022 100644 (file)
@@ -928,6 +928,16 @@ config SMC91X
          The module will be called smc91x.  If you want to compile it as a
          module, say M here and read <file:Documentation/kbuild/modules.txt>.
 
+config PXA168_ETH
+       tristate "Marvell pxa168 ethernet support"
+       depends on CPU_PXA168
+       select PHYLIB
+       help
+         This driver supports the pxa168 Ethernet ports.
+
+         To compile this driver as a module, choose M here. The module
+         will be called pxa168_eth.
+
 config NET_NETX
        tristate "NetX Ethernet support"
        select MII
index 56e8c27f77cebe9ef3b9ac3c1284f7e2975f8448..3e8f150c4b14b0034edb3632b9de33b1338b9635 100644 (file)
@@ -244,6 +244,7 @@ obj-$(CONFIG_MYRI10GE) += myri10ge/
 obj-$(CONFIG_SMC91X) += smc91x.o
 obj-$(CONFIG_SMC911X) += smc911x.o
 obj-$(CONFIG_SMSC911X) += smsc911x.o
+obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
 obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
 obj-$(CONFIG_DM9000) += dm9000.o
 obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
index 53af9c93e75c3bca661abfb8dce7aabe96589f52..0c2d96ed561c46ebd63f3c03c119c0751b5204e1 100644 (file)
@@ -20,8 +20,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.52.53-3"
-#define DRV_MODULE_RELDATE      "2010/18/04"
+#define DRV_MODULE_VERSION      "1.52.53-4"
+#define DRV_MODULE_RELDATE      "2010/16/08"
 #define BNX2X_BC_VER            0x040200
 
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
index b4ec2b02a465cf7d822f23700c119d6e93a2121b..f8c3f08e4ce73fb4d5d37739fa7dd18cfc557da7 100644 (file)
@@ -4328,10 +4328,12 @@ static int bnx2x_init_port(struct bnx2x *bp)
                val |= aeu_gpio_mask;
                REG_WR(bp, offset, val);
                }
+               bp->port.need_hw_lock = 1;
                break;
 
-       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+               bp->port.need_hw_lock = 1;
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
                /* add SPIO 5 to group 0 */
                {
                u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4341,7 +4343,10 @@ static int bnx2x_init_port(struct bnx2x *bp)
                REG_WR(bp, reg_addr, val);
                }
                break;
-
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+               bp->port.need_hw_lock = 1;
+               break;
        default:
                break;
        }
index 631a6242b0112aec566b9df0916eb9b1d1552be5..75bfc3a9d95f3f118e6eae1cdc649fd0a442baeb 100644 (file)
@@ -15,7 +15,7 @@ config CAIF_TTY
 
 config CAIF_SPI_SLAVE
        tristate "CAIF SPI transport driver for slave interface"
-       depends on CAIF
+       depends on CAIF && HAS_DMA
        default n
        ---help---
        The CAIF Link layer SPI Protocol driver for Slave SPI interface.
index 077ccf840edf24dc46107334c39c5758de248465..2111dbfea6feb8b53f56e73567e54fd4d11a818b 100644 (file)
 #include <net/caif/caif_spi.h>
 
 #ifndef CONFIG_CAIF_SPI_SYNC
-#define SPI_DATA_POS SPI_CMD_SZ
+#define SPI_DATA_POS 0
 static inline int forward_to_spi_cmd(struct cfspi *cfspi)
 {
        return cfspi->rx_cpck_len;
 }
 #else
-#define SPI_DATA_POS 0
+#define SPI_DATA_POS SPI_CMD_SZ
 static inline int forward_to_spi_cmd(struct cfspi *cfspi)
 {
        return 0;
index a4a0d2b6eb1c60e116811b31378a426ad00a44e2..d3d4a57e24505f9c36af9ef139bfc9133788a4bd 100644 (file)
@@ -936,12 +936,14 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
        ew32(IMC, 0xffffffff);
        icr = er32(ICR);
 
-       /* Install any alternate MAC address into RAR0 */
-       ret_val = e1000_check_alt_mac_addr_generic(hw);
-       if (ret_val)
-               return ret_val;
+       if (hw->mac.type == e1000_82571) {
+               /* Install any alternate MAC address into RAR0 */
+               ret_val = e1000_check_alt_mac_addr_generic(hw);
+               if (ret_val)
+                       return ret_val;
 
-       e1000e_set_laa_state_82571(hw, true);
+               e1000e_set_laa_state_82571(hw, true);
+       }
 
        /* Reinitialize the 82571 serdes link state machine */
        if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1618,14 +1620,16 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
 {
        s32 ret_val = 0;
 
-       /*
-        * If there's an alternate MAC address place it in RAR0
-        * so that it will override the Si installed default perm
-        * address.
-        */
-       ret_val = e1000_check_alt_mac_addr_generic(hw);
-       if (ret_val)
-               goto out;
+       if (hw->mac.type == e1000_82571) {
+               /*
+                * If there's an alternate MAC address place it in RAR0
+                * so that it will override the Si installed default perm
+                * address.
+                */
+               ret_val = e1000_check_alt_mac_addr_generic(hw);
+               if (ret_val)
+                       goto out;
+       }
 
        ret_val = e1000_read_mac_addr_generic(hw);
 
@@ -1833,6 +1837,7 @@ struct e1000_info e1000_82573_info = {
                                  | FLAG_HAS_SMART_POWER_DOWN
                                  | FLAG_HAS_AMT
                                  | FLAG_HAS_SWSM_ON_LOAD,
+       .flags2                 = FLAG2_DISABLE_ASPM_L1,
        .pba                    = 20,
        .max_hw_frame_size      = ETH_FRAME_LEN + ETH_FCS_LEN,
        .get_variants           = e1000_get_variants_82571,
index 307a72f483ee644fb1199776e4b0521739badb46..93b3bedae8d2b2457a88f52b4664fe1b4b6b7dd8 100644 (file)
 #define E1000_FLASH_UPDATES  2000
 
 /* NVM Word Offsets */
+#define NVM_COMPAT                 0x0003
 #define NVM_ID_LED_SETTINGS        0x0004
 #define NVM_INIT_CONTROL2_REG      0x000F
 #define NVM_INIT_CONTROL3_PORT_B   0x0014
 /* Mask bits for fields in Word 0x1a of the NVM */
 #define NVM_WORD1A_ASPM_MASK  0x000C
 
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM    0x0800
+
 /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
 #define NVM_SUM                    0xBABA
 
index df4a2792293123eba7b1b6f3a589220ce1185ea1..0fd4eb5ac5fb9241f57061ee5eb76dff06e8afbf 100644 (file)
@@ -183,6 +183,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
        u16 offset, nvm_alt_mac_addr_offset, nvm_data;
        u8 alt_mac_addr[ETH_ALEN];
 
+       ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
+       if (ret_val)
+               goto out;
+
+       /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
+       if (!((nvm_data & NVM_COMPAT_LOM) ||
+             (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
+             (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)))
+               goto out;
+
        ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
                                 &nvm_alt_mac_addr_offset);
        if (ret_val) {
index 0060e422f171bda3c385ef757fce13ee39d0a517..2ce67f6152cd53a0c02325c743cc040bc9e7bbde 100644 (file)
@@ -40,7 +40,7 @@
 #include <asm/io.h>
 
 #define DRV_NAME       "ehea"
-#define DRV_VERSION    "EHEA_0105"
+#define DRV_VERSION    "EHEA_0106"
 
 /* eHEA capability flags */
 #define DLPAR_PORT_ADD_REM 1
@@ -400,6 +400,7 @@ struct ehea_port_res {
        u32 poll_counter;
        struct net_lro_mgr lro_mgr;
        struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
+       int sq_restart_flag;
 };
 
 
index 3beba70b7dea5827c45927d2c9b550bb68d148f5..adb5994c125f1f5b5113c3c7c8f0b36f189e05d4 100644 (file)
@@ -776,6 +776,53 @@ static int ehea_proc_rwqes(struct net_device *dev,
        return processed;
 }
 
+#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
+
+static void reset_sq_restart_flag(struct ehea_port *port)
+{
+       int i;
+
+       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+               struct ehea_port_res *pr = &port->port_res[i];
+               pr->sq_restart_flag = 0;
+       }
+}
+
+static void check_sqs(struct ehea_port *port)
+{
+       struct ehea_swqe *swqe;
+       int swqe_index;
+       int i, k;
+
+       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+               struct ehea_port_res *pr = &port->port_res[i];
+               k = 0;
+               swqe = ehea_get_swqe(pr->qp, &swqe_index);
+               memset(swqe, 0, SWQE_HEADER_SIZE);
+               atomic_dec(&pr->swqe_avail);
+
+               swqe->tx_control |= EHEA_SWQE_PURGE;
+               swqe->wr_id = SWQE_RESTART_CHECK;
+               swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
+               swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
+               swqe->immediate_data_length = 80;
+
+               ehea_post_swqe(pr->qp, swqe);
+
+               while (pr->sq_restart_flag == 0) {
+                       msleep(5);
+                       if (++k == 100) {
+                               ehea_error("HW/SW queues out of sync");
+                               ehea_schedule_port_reset(pr->port);
+                               return;
+                       }
+               }
+       }
+
+       return;
+}
+
+
 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
 {
        struct sk_buff *skb;
@@ -793,6 +840,13 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
 
                cqe_counter++;
                rmb();
+
+               if (cqe->wr_id == SWQE_RESTART_CHECK) {
+                       pr->sq_restart_flag = 1;
+                       swqe_av++;
+                       break;
+               }
+
                if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
                        ehea_error("Bad send completion status=0x%04X",
                                   cqe->status);
@@ -2675,8 +2729,10 @@ static void ehea_flush_sq(struct ehea_port *port)
                int k = 0;
                while (atomic_read(&pr->swqe_avail) < swqe_max) {
                        msleep(5);
-                       if (++k == 20)
+                       if (++k == 20) {
+                               ehea_error("WARNING: sq not flushed completely");
                                break;
+                       }
                }
        }
 }
@@ -2917,6 +2973,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
                                        port_napi_disable(port);
                                        mutex_unlock(&port->port_lock);
                                }
+                               reset_sq_restart_flag(port);
                        }
 
                        /* Unregister old memory region */
@@ -2951,6 +3008,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
                                                mutex_lock(&port->port_lock);
                                                port_napi_enable(port);
                                                ret = ehea_restart_qps(dev);
+                                               check_sqs(port);
                                                if (!ret)
                                                        netif_wake_queue(dev);
                                                mutex_unlock(&port->port_lock);
index 2602852cc55a6037c5160575f3620632397fcd2c..4734c939ad03574a63e6dd0bc9ccf6d8cf29c06a 100644 (file)
@@ -1113,7 +1113,8 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
        struct ibmveth_adapter *adapter = netdev_priv(dev);
        struct vio_dev *viodev = adapter->vdev;
        int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
-       int i;
+       int i, rc;
+       int need_restart = 0;
 
        if (new_mtu < IBMVETH_MAX_MTU)
                return -EINVAL;
@@ -1127,35 +1128,32 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
 
        /* Deactivate all the buffer pools so that the next loop can activate
           only the buffer pools necessary to hold the new MTU */
-       for (i = 0; i < IbmVethNumBufferPools; i++)
-               if (adapter->rx_buff_pool[i].active) {
-                       ibmveth_free_buffer_pool(adapter,
-                                                &adapter->rx_buff_pool[i]);
-                       adapter->rx_buff_pool[i].active = 0;
-               }
+       if (netif_running(adapter->netdev)) {
+               need_restart = 1;
+               adapter->pool_config = 1;
+               ibmveth_close(adapter->netdev);
+               adapter->pool_config = 0;
+       }
 
        /* Look for an active buffer pool that can hold the new MTU */
        for(i = 0; i<IbmVethNumBufferPools; i++) {
                adapter->rx_buff_pool[i].active = 1;
 
                if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
-                       if (netif_running(adapter->netdev)) {
-                               adapter->pool_config = 1;
-                               ibmveth_close(adapter->netdev);
-                               adapter->pool_config = 0;
-                               dev->mtu = new_mtu;
-                               vio_cmo_set_dev_desired(viodev,
-                                               ibmveth_get_desired_dma
-                                               (viodev));
-                               return ibmveth_open(adapter->netdev);
-                       }
                        dev->mtu = new_mtu;
                        vio_cmo_set_dev_desired(viodev,
                                                ibmveth_get_desired_dma
                                                (viodev));
+                       if (need_restart) {
+                               return ibmveth_open(adapter->netdev);
+                       }
                        return 0;
                }
        }
+
+       if (need_restart && (rc = ibmveth_open(adapter->netdev)))
+               return rc;
+
        return -EINVAL;
 }
 
index 4eea3f70c5cf1865415b22b6464635af98858136..09b813f1c3cdc09af9a036a3825e95123c8fb6a4 100644 (file)
@@ -902,8 +902,8 @@ temac_poll_controller(struct net_device *ndev)
        disable_irq(lp->tx_irq);
        disable_irq(lp->rx_irq);
 
-       ll_temac_rx_irq(lp->tx_irq, lp);
-       ll_temac_tx_irq(lp->rx_irq, lp);
+       ll_temac_rx_irq(lp->tx_irq, ndev);
+       ll_temac_tx_irq(lp->rx_irq, ndev);
 
        enable_irq(lp->tx_irq);
        enable_irq(lp->rx_irq);
index ffa1b9ce1cc5a8f4c474136940df4de36d853c55..6dca3574e35507a94ca7e5b1b518a06199e99e03 100644 (file)
@@ -53,8 +53,8 @@
 
 #define _NETXEN_NIC_LINUX_MAJOR 4
 #define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 73
-#define NETXEN_NIC_LINUX_VERSIONID  "4.0.73"
+#define _NETXEN_NIC_LINUX_SUBVERSION 74
+#define NETXEN_NIC_LINUX_VERSIONID  "4.0.74"
 
 #define NETXEN_VERSION_CODE(a, b, c)   (((a) << 24) + ((b) << 16) + (c))
 #define _major(v)      (((v) >> 24) & 0xff)
index c865dda2adf15f8b59a579640d416099e9fabc0c..cabae7bb1fc6777d3366c5a8728feadcd53d0aa3 100644 (file)
@@ -1805,8 +1805,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
        netxen_ctx_msg msg = 0;
        struct list_head *head;
 
-       spin_lock(&rds_ring->lock);
-
        producer = rds_ring->producer;
 
        head = &rds_ring->free_list;
@@ -1853,8 +1851,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
                                        NETXEN_RCV_PRODUCER_OFFSET), msg);
                }
        }
-
-       spin_unlock(&rds_ring->lock);
 }
 
 static void
index fd86e18604e636a5b1ede55a077d56dd2ba06713..73d31459223098c8e3727a0565ed12805d58e388 100644 (file)
@@ -2032,8 +2032,6 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
        struct netxen_adapter *adapter = netdev_priv(netdev);
        struct net_device_stats *stats = &netdev->stats;
 
-       memset(stats, 0, sizeof(*stats));
-
        stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
        stats->tx_packets = adapter->stats.xmitfinished;
        stats->rx_bytes = adapter->stats.rxbytes;
@@ -2133,9 +2131,16 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void netxen_nic_poll_controller(struct net_device *netdev)
 {
+       int ring;
+       struct nx_host_sds_ring *sds_ring;
        struct netxen_adapter *adapter = netdev_priv(netdev);
+       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
        disable_irq(adapter->irq);
-       netxen_intr(adapter->irq, adapter);
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               netxen_intr(adapter->irq, sds_ring);
+       }
        enable_irq(adapter->irq);
 }
 #endif
index bfdef72c5d5ea6371b2fc634c9e2a8476d13fa33..7fd8c55288c9e2709a88795ea0d9fe39b9e13680 100644 (file)
@@ -1644,6 +1644,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
        PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b),
        PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0),
        PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956),
+       PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616),
        PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64),
        PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5),
        PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3),
index a527e37728cd9fe4566445b78bd2c1cf47bc0a88..eb799b36c86a391f90bb7f07074257084f4047ba 100644 (file)
@@ -5,7 +5,7 @@
 menuconfig PHYLIB
        tristate "PHY Device support and infrastructure"
        depends on !S390
-       depends on NET_ETHERNET
+       depends on NETDEVICES
        help
          Ethernet controllers are usually attached to PHY
          devices.  This option provides infrastructure for
index 5130db8f5c4ec4b2ced76a400071814c3e638c60..1bb16cb794331ba975eddcc5327f1fc13ef495c9 100644 (file)
@@ -301,7 +301,7 @@ EXPORT_SYMBOL(phy_ethtool_gset);
 /**
  * phy_mii_ioctl - generic PHY MII ioctl interface
  * @phydev: the phy_device struct
- * @mii_data: MII ioctl data
+ * @ifr: &struct ifreq for socket ioctl's
  * @cmd: ioctl cmd to execute
  *
  * Note that this function is currently incompatible with the
index c0761197c07e6a94e67e3d0d4f417d888e2ec68f..16ddc77313cb08348b3cdbdb7016bd5c56fb9304 100644 (file)
@@ -466,6 +466,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
 
        phydev->interface = interface;
 
+       phydev->state = PHY_READY;
+
        /* Do initial configuration here, now that
         * we have certain key parameters
         * (dev_flags and interface) */
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
new file mode 100644 (file)
index 0000000..410ea0a
--- /dev/null
@@ -0,0 +1,1664 @@
+/*
+ * PXA168 ethernet driver.
+ * Most of the code is derived from mv643xx ethernet driver.
+ *
+ * Copyright (C) 2010 Marvell International Ltd.
+ *             Sachin Sanap <ssanap@marvell.com>
+ *             Philip Rakity <prakity@marvell.com>
+ *             Mark Brown <markb@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/etherdevice.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <linux/pxa168_eth.h>
+
+#define DRIVER_NAME    "pxa168-eth"
+#define DRIVER_VERSION "0.3"
+
+/*
+ * Registers
+ */
+
+#define PHY_ADDRESS            0x0000
+#define SMI                    0x0010
+#define PORT_CONFIG            0x0400
+#define PORT_CONFIG_EXT                0x0408
+#define PORT_COMMAND           0x0410
+#define PORT_STATUS            0x0418
+#define HTPR                   0x0428
+#define SDMA_CONFIG            0x0440
+#define SDMA_CMD               0x0448
+#define INT_CAUSE              0x0450
+#define INT_W_CLEAR            0x0454
+#define INT_MASK               0x0458
+#define ETH_F_RX_DESC_0                0x0480
+#define ETH_C_RX_DESC_0                0x04A0
+#define ETH_C_TX_DESC_1                0x04E4
+
+/* smi register */
+#define SMI_BUSY               (1 << 28)       /* 0 - Write, 1 - Read  */
+#define SMI_R_VALID            (1 << 27)       /* 0 - Write, 1 - Read  */
+#define SMI_OP_W               (0 << 26)       /* Write operation      */
+#define SMI_OP_R               (1 << 26)       /* Read operation */
+
+#define PHY_WAIT_ITERATIONS    10
+
+#define PXA168_ETH_PHY_ADDR_DEFAULT    0
+/* RX & TX descriptor command */
+#define BUF_OWNED_BY_DMA       (1 << 31)
+
+/* RX descriptor status */
+#define RX_EN_INT              (1 << 23)
+#define RX_FIRST_DESC          (1 << 17)
+#define RX_LAST_DESC           (1 << 16)
+#define RX_ERROR               (1 << 15)
+
+/* TX descriptor command */
+#define TX_EN_INT              (1 << 23)
+#define TX_GEN_CRC             (1 << 22)
+#define TX_ZERO_PADDING                (1 << 18)
+#define TX_FIRST_DESC          (1 << 17)
+#define TX_LAST_DESC           (1 << 16)
+#define TX_ERROR               (1 << 15)
+
+/* SDMA_CMD */
+#define SDMA_CMD_AT            (1 << 31)
+#define SDMA_CMD_TXDL          (1 << 24)
+#define SDMA_CMD_TXDH          (1 << 23)
+#define SDMA_CMD_AR            (1 << 15)
+#define SDMA_CMD_ERD           (1 << 7)
+
+/* Bit definitions of the Port Config Reg */
+#define PCR_HS                 (1 << 12)
+#define PCR_EN                 (1 << 7)
+#define PCR_PM                 (1 << 0)
+
+/* Bit definitions of the Port Config Extend Reg */
+#define PCXR_2BSM              (1 << 28)
+#define PCXR_DSCP_EN           (1 << 21)
+#define PCXR_MFL_1518          (0 << 14)
+#define PCXR_MFL_1536          (1 << 14)
+#define PCXR_MFL_2048          (2 << 14)
+#define PCXR_MFL_64K           (3 << 14)
+#define PCXR_FLP               (1 << 11)
+#define PCXR_PRIO_TX_OFF       3
+#define PCXR_TX_HIGH_PRI       (7 << PCXR_PRIO_TX_OFF)
+
+/* Bit definitions of the SDMA Config Reg */
+#define SDCR_BSZ_OFF           12
+#define SDCR_BSZ8              (3 << SDCR_BSZ_OFF)
+#define SDCR_BSZ4              (2 << SDCR_BSZ_OFF)
+#define SDCR_BSZ2              (1 << SDCR_BSZ_OFF)
+#define SDCR_BSZ1              (0 << SDCR_BSZ_OFF)
+#define SDCR_BLMR              (1 << 6)
+#define SDCR_BLMT              (1 << 7)
+#define SDCR_RIFB              (1 << 9)
+#define SDCR_RC_OFF            2
+#define SDCR_RC_MAX_RETRANS    (0xf << SDCR_RC_OFF)
+
+/*
+ * Bit definitions of the Interrupt Cause Reg
+ * and Interrupt MASK Reg is the same
+ */
+#define ICR_RXBUF              (1 << 0)
+#define ICR_TXBUF_H            (1 << 2)
+#define ICR_TXBUF_L            (1 << 3)
+#define ICR_TXEND_H            (1 << 6)
+#define ICR_TXEND_L            (1 << 7)
+#define ICR_RXERR              (1 << 8)
+#define ICR_TXERR_H            (1 << 10)
+#define ICR_TXERR_L            (1 << 11)
+#define ICR_TX_UDR             (1 << 13)
+#define ICR_MII_CH             (1 << 28)
+
+#define ALL_INTS (ICR_TXBUF_H  | ICR_TXBUF_L  | ICR_TX_UDR |\
+                               ICR_TXERR_H  | ICR_TXERR_L |\
+                               ICR_TXEND_H  | ICR_TXEND_L |\
+                               ICR_RXBUF | ICR_RXERR  | ICR_MII_CH)
+
+#define ETH_HW_IP_ALIGN                2       /* hw aligns IP header */
+
+#define NUM_RX_DESCS           64
+#define NUM_TX_DESCS           64
+
+#define HASH_ADD               0
+#define HASH_DELETE            1
+#define HASH_ADDR_TABLE_SIZE   0x4000  /* 16K (1/2K address - PCR_HS == 1) */
+#define HOP_NUMBER             12
+
+/* Bit definitions for Port status */
+#define PORT_SPEED_100         (1 << 0)
+#define FULL_DUPLEX            (1 << 1)
+#define FLOW_CONTROL_ENABLED   (1 << 2)
+#define LINK_UP                        (1 << 3)
+
+/* Bit definitions for work to be done */
+#define WORK_LINK              (1 << 0)
+#define WORK_TX_DONE           (1 << 1)
+
+/*
+ * Misc definitions.
+ */
+#define SKB_DMA_REALIGN                ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+
+struct rx_desc {
+       u32 cmd_sts;            /* Descriptor command status            */
+       u16 byte_cnt;           /* Descriptor buffer byte count         */
+       u16 buf_size;           /* Buffer size                          */
+       u32 buf_ptr;            /* Descriptor buffer pointer            */
+       u32 next_desc_ptr;      /* Next descriptor pointer              */
+};
+
+struct tx_desc {
+       u32 cmd_sts;            /* Command/status field                 */
+       u16 reserved;
+       u16 byte_cnt;           /* buffer byte count                    */
+       u32 buf_ptr;            /* pointer to buffer for this descriptor */
+       u32 next_desc_ptr;      /* Pointer to next descriptor           */
+};
+
+struct pxa168_eth_private {
+       int port_num;           /* User Ethernet port number    */
+
+       int rx_resource_err;    /* Rx ring resource error flag */
+
+       /* Next available and first returning Rx resource */
+       int rx_curr_desc_q, rx_used_desc_q;
+
+       /* Next available and first returning Tx resource */
+       int tx_curr_desc_q, tx_used_desc_q;
+
+       struct rx_desc *p_rx_desc_area;
+       dma_addr_t rx_desc_dma;
+       int rx_desc_area_size;
+       struct sk_buff **rx_skb;
+
+       struct tx_desc *p_tx_desc_area;
+       dma_addr_t tx_desc_dma;
+       int tx_desc_area_size;
+       struct sk_buff **tx_skb;
+
+       struct work_struct tx_timeout_task;
+
+       struct net_device *dev;
+       struct napi_struct napi;
+       u8 work_todo;
+       int skb_size;
+
+       struct net_device_stats stats;
+       /* Size of Tx Ring per queue */
+       int tx_ring_size;
+       /* Number of tx descriptors in use */
+       int tx_desc_count;
+       /* Size of Rx Ring per queue */
+       int rx_ring_size;
+       /* Number of rx descriptors in use */
+       int rx_desc_count;
+
+       /*
+        * Used in case RX Ring is empty, which can occur when
+        * system does not have resources (skb's)
+        */
+       struct timer_list timeout;
+       struct mii_bus *smi_bus;
+       struct phy_device *phy;
+
+       /* clock */
+       struct clk *clk;
+       struct pxa168_eth_platform_data *pd;
+       /*
+        * Ethernet controller base address.
+        */
+       void __iomem *base;
+
+       /* Pointer to the hardware address filter table */
+       void *htpr;
+       dma_addr_t htpr_dma;
+};
+
+struct addr_table_entry {
+       __le32 lo;
+       __le32 hi;
+};
+
+/* Bit fields of a Hash Table Entry */
+enum hash_table_entry {
+       HASH_ENTRY_VALID = 1,
+       SKIP = 2,
+       HASH_ENTRY_RECEIVE_DISCARD = 4,
+       HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
+};
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_init_hw(struct pxa168_eth_private *pep);
+static void eth_port_reset(struct net_device *dev);
+static void eth_port_start(struct net_device *dev);
+static int pxa168_eth_open(struct net_device *dev);
+static int pxa168_eth_stop(struct net_device *dev);
+static int ethernet_phy_setup(struct net_device *dev);
+
+static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
+{
+       return readl(pep->base + offset);
+}
+
+static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
+{
+       writel(data, pep->base + offset);
+}
+
+static void abort_dma(struct pxa168_eth_private *pep)
+{
+       int delay;
+       int max_retries = 40;
+
+       do {
+               wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
+               udelay(100);
+
+               delay = 10;
+               while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
+                      && delay-- > 0) {
+                       udelay(10);
+               }
+       } while (max_retries-- > 0 && delay <= 0);
+
+       if (max_retries <= 0)
+               printk(KERN_ERR "%s : DMA Stuck\n", __func__);
+}
+
+static int ethernet_phy_get(struct pxa168_eth_private *pep)
+{
+       unsigned int reg_data;
+
+       reg_data = rdl(pep, PHY_ADDRESS);
+
+       return (reg_data >> (5 * pep->port_num)) & 0x1f;
+}
+
+static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
+{
+       u32 reg_data;
+       int addr_shift = 5 * pep->port_num;
+
+       reg_data = rdl(pep, PHY_ADDRESS);
+       reg_data &= ~(0x1f << addr_shift);
+       reg_data |= (phy_addr & 0x1f) << addr_shift;
+       wrl(pep, PHY_ADDRESS, reg_data);
+}
+
+static void ethernet_phy_reset(struct pxa168_eth_private *pep)
+{
+       int data;
+
+       data = phy_read(pep->phy, MII_BMCR);
+       if (data < 0)
+               return;
+
+       data |= BMCR_RESET;
+       if (phy_write(pep->phy, MII_BMCR, data) < 0)
+               return;
+
+       do {
+               data = phy_read(pep->phy, MII_BMCR);
+       } while (data >= 0 && data & BMCR_RESET);
+}
+
+static void rxq_refill(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct sk_buff *skb;
+       struct rx_desc *p_used_rx_desc;
+       int used_rx_desc;
+
+       while (pep->rx_desc_count < pep->rx_ring_size) {
+               int size;
+
+               skb = dev_alloc_skb(pep->skb_size);
+               if (!skb)
+                       break;
+               if (SKB_DMA_REALIGN)
+                       skb_reserve(skb, SKB_DMA_REALIGN);
+               pep->rx_desc_count++;
+               /* Get 'used' Rx descriptor */
+               used_rx_desc = pep->rx_used_desc_q;
+               p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
+               size = skb->end - skb->data;
+               p_used_rx_desc->buf_ptr = dma_map_single(NULL,
+                                                        skb->data,
+                                                        size,
+                                                        DMA_FROM_DEVICE);
+               p_used_rx_desc->buf_size = size;
+               pep->rx_skb[used_rx_desc] = skb;
+
+               /* Return the descriptor to DMA ownership */
+               wmb();
+               p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
+               wmb();
+
+               /* Move the used descriptor pointer to the next descriptor */
+               pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
+
+               /* Any Rx return cancels the Rx resource error status */
+               pep->rx_resource_err = 0;
+
+               skb_reserve(skb, ETH_HW_IP_ALIGN);
+       }
+
+       /*
+        * If RX ring is empty of SKB, set a timer to try allocating
+        * again at a later time.
+        */
+       if (pep->rx_desc_count == 0) {
+               pep->timeout.expires = jiffies + (HZ / 10);
+               add_timer(&pep->timeout);
+       }
+}
+
+static inline void rxq_refill_timer_wrapper(unsigned long data)
+{
+       struct pxa168_eth_private *pep = (void *)data;
+       napi_schedule(&pep->napi);
+}
+
+static inline u8 flip_8_bits(u8 x)
+{
+       return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
+           | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
+           | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
+           | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
+}
+
+static void nibble_swap_every_byte(unsigned char *mac_addr)
+{
+       int i;
+       for (i = 0; i < ETH_ALEN; i++) {
+               mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
+                               ((mac_addr[i] & 0xf0) >> 4);
+       }
+}
+
+static void inverse_every_nibble(unsigned char *mac_addr)
+{
+       int i;
+       for (i = 0; i < ETH_ALEN; i++)
+               mac_addr[i] = flip_8_bits(mac_addr[i]);
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will calculate the hash function of the address.
+ * Inputs
+ * mac_addr_orig    - MAC address.
+ * Outputs
+ * return the calculated entry.
+ */
+static u32 hash_function(unsigned char *mac_addr_orig)
+{
+       u32 hash_result;
+       u32 addr0;
+       u32 addr1;
+       u32 addr2;
+       u32 addr3;
+       unsigned char mac_addr[ETH_ALEN];
+
+       /* Make a copy of MAC address since we are going to performe bit
+        * operations on it
+        */
+       memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
+
+       nibble_swap_every_byte(mac_addr);
+       inverse_every_nibble(mac_addr);
+
+       addr0 = (mac_addr[5] >> 2) & 0x3f;
+       addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
+       addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
+       addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
+
+       hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
+       hash_result = hash_result & 0x07ff;
+       return hash_result;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will add/del an entry to the address table.
+ * Inputs
+ * pep - ETHERNET .
+ * mac_addr - MAC address.
+ * skip - if 1, skip this address.Used in case of deleting an entry which is a
+ *       part of chain in the hash table.We cant just delete the entry since
+ *       that will break the chain.We need to defragment the tables time to
+ *       time.
+ * rd   - 0 Discard packet upon match.
+ *     - 1 Receive packet upon match.
+ * Outputs
+ * address table entry is added/deleted.
+ * 0 if success.
+ * -ENOSPC if table full
+ */
+static int add_del_hash_entry(struct pxa168_eth_private *pep,
+                             unsigned char *mac_addr,
+                             u32 rd, u32 skip, int del)
+{
+       struct addr_table_entry *entry, *start;
+       u32 new_high;
+       u32 new_low;
+       u32 i;
+
+       new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
+           | (((mac_addr[1] >> 0) & 0xf) << 11)
+           | (((mac_addr[0] >> 4) & 0xf) << 7)
+           | (((mac_addr[0] >> 0) & 0xf) << 3)
+           | (((mac_addr[3] >> 4) & 0x1) << 31)
+           | (((mac_addr[3] >> 0) & 0xf) << 27)
+           | (((mac_addr[2] >> 4) & 0xf) << 23)
+           | (((mac_addr[2] >> 0) & 0xf) << 19)
+           | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
+           | HASH_ENTRY_VALID;
+
+       new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
+           | (((mac_addr[5] >> 0) & 0xf) << 11)
+           | (((mac_addr[4] >> 4) & 0xf) << 7)
+           | (((mac_addr[4] >> 0) & 0xf) << 3)
+           | (((mac_addr[3] >> 5) & 0x7) << 0);
+
+       /*
+        * Pick the appropriate table, start scanning for free/reusable
+        * entries at the index obtained by hashing the specified MAC address
+        */
+       start = (struct addr_table_entry *)(pep->htpr);
+       entry = start + hash_function(mac_addr);
+       for (i = 0; i < HOP_NUMBER; i++) {
+               if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
+                       break;
+               } else {
+                       /* if same address put in same position */
+                       if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
+                               (new_low & 0xfffffff8)) &&
+                               (le32_to_cpu(entry->hi) == new_high)) {
+                               break;
+                       }
+               }
+               if (entry == start + 0x7ff)
+                       entry = start;
+               else
+                       entry++;
+       }
+
+       if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
+           (le32_to_cpu(entry->hi) != new_high) && del)
+               return 0;
+
+       if (i == HOP_NUMBER) {
+               if (!del) {
+                       printk(KERN_INFO "%s: table section is full, need to "
+                                       "move to 16kB implementation?\n",
+                                        __FILE__);
+                       return -ENOSPC;
+               } else
+                       return 0;
+       }
+
+       /*
+        * Update the selected entry
+        */
+       if (del) {
+               entry->hi = 0;
+               entry->lo = 0;
+       } else {
+               entry->hi = cpu_to_le32(new_high);
+               entry->lo = cpu_to_le32(new_low);
+       }
+
+       return 0;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ *  Create an addressTable entry from MAC address info
+ *  found in the specifed net_device struct
+ *
+ *  Input : pointer to ethernet interface network device structure
+ *  Output : N/A
+ */
+static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
+                                         unsigned char *oaddr,
+                                         unsigned char *addr)
+{
+       /* Delete old entry */
+       if (oaddr)
+               add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
+       /* Add new entry */
+       add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
+}
+
+static int init_hash_table(struct pxa168_eth_private *pep)
+{
+       /*
+        * Hardware expects CPU to build a hash table based on a predefined
+        * hash function and populate it based on hardware address. The
+        * location of the hash table is identified by 32-bit pointer stored
+        * in HTPR internal register. Two possible sizes exists for the hash
+        * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
+        * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
+        * 1/2kB.
+        */
+       /* TODO: Add support for 8kB hash table and alternative hash
+        * function.Driver can dynamically switch to them if the 1/2kB hash
+        * table is full.
+        */
+       if (pep->htpr == NULL) {
+               pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
+                                             HASH_ADDR_TABLE_SIZE,
+                                             &pep->htpr_dma, GFP_KERNEL);
+               if (pep->htpr == NULL)
+                       return -ENOMEM;
+       }
+       memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+       wrl(pep, HTPR, pep->htpr_dma);
+       return 0;
+}
+
+static void pxa168_eth_set_rx_mode(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct netdev_hw_addr *ha;
+       u32 val;
+
+       val = rdl(pep, PORT_CONFIG);
+       if (dev->flags & IFF_PROMISC)
+               val |= PCR_PM;
+       else
+               val &= ~PCR_PM;
+       wrl(pep, PORT_CONFIG, val);
+
+       /*
+        * Remove the old list of MAC address and add dev->addr
+        * and multicast address.
+        */
+       memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+       update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+       netdev_for_each_mc_addr(ha, dev)
+               update_hash_table_mac_address(pep, NULL, ha->addr);
+}
+
+static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+       struct sockaddr *sa = addr;
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       unsigned char oldMac[ETH_ALEN];
+
+       if (!is_valid_ether_addr(sa->sa_data))
+               return -EINVAL;
+       memcpy(oldMac, dev->dev_addr, ETH_ALEN);
+       memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+       netif_addr_lock_bh(dev);
+       update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
+       netif_addr_unlock_bh(dev);
+       return 0;
+}
+
+static void eth_port_start(struct net_device *dev)
+{
+       unsigned int val = 0;
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       int tx_curr_desc, rx_curr_desc;
+
+       /* Perform PHY reset, if there is a PHY. */
+       if (pep->phy != NULL) {
+               struct ethtool_cmd cmd;
+
+               pxa168_get_settings(pep->dev, &cmd);
+               ethernet_phy_reset(pep);
+               pxa168_set_settings(pep->dev, &cmd);
+       }
+
+       /* Assignment of Tx CTRP of given queue */
+       tx_curr_desc = pep->tx_curr_desc_q;
+       wrl(pep, ETH_C_TX_DESC_1,
+           (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
+
+       /* Assignment of Rx CRDP of given queue */
+       rx_curr_desc = pep->rx_curr_desc_q;
+       wrl(pep, ETH_C_RX_DESC_0,
+           (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+       wrl(pep, ETH_F_RX_DESC_0,
+           (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
+
+       /* Clear all interrupts */
+       wrl(pep, INT_CAUSE, 0);
+
+       /* Enable all interrupts for receive, transmit and error. */
+       wrl(pep, INT_MASK, ALL_INTS);
+
+       val = rdl(pep, PORT_CONFIG);
+       val |= PCR_EN;
+       wrl(pep, PORT_CONFIG, val);
+
+       /* Start RX DMA engine */
+       val = rdl(pep, SDMA_CMD);
+       val |= SDMA_CMD_ERD;
+       wrl(pep, SDMA_CMD, val);
+}
+
+static void eth_port_reset(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       unsigned int val = 0;
+
+       /* Stop all interrupts for receive, transmit and error. */
+       wrl(pep, INT_MASK, 0);
+
+       /* Clear all interrupts */
+       wrl(pep, INT_CAUSE, 0);
+
+       /* Stop RX DMA */
+       val = rdl(pep, SDMA_CMD);
+       val &= ~SDMA_CMD_ERD;   /* abort dma command */
+
+       /* Abort any transmit and receive operations and put DMA
+        * in idle state.
+        */
+       abort_dma(pep);
+
+       /* Disable port */
+       val = rdl(pep, PORT_CONFIG);
+       val &= ~PCR_EN;
+       wrl(pep, PORT_CONFIG, val);
+}
+
+/*
+ * txq_reclaim - Free the tx desc data for completed descriptors
+ * If force is non-zero, frees uncompleted descriptors as well
+ */
+static int txq_reclaim(struct net_device *dev, int force)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct tx_desc *desc;
+       u32 cmd_sts;
+       struct sk_buff *skb;
+       int tx_index;
+       dma_addr_t addr;
+       int count;
+       int released = 0;
+
+       netif_tx_lock(dev);
+
+       pep->work_todo &= ~WORK_TX_DONE;
+       while (pep->tx_desc_count > 0) {
+               tx_index = pep->tx_used_desc_q;
+               desc = &pep->p_tx_desc_area[tx_index];
+               cmd_sts = desc->cmd_sts;
+               if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
+                       if (released > 0) {
+                               goto txq_reclaim_end;
+                       } else {
+                               released = -1;
+                               goto txq_reclaim_end;
+                       }
+               }
+               pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
+               pep->tx_desc_count--;
+               addr = desc->buf_ptr;
+               count = desc->byte_cnt;
+               skb = pep->tx_skb[tx_index];
+               if (skb)
+                       pep->tx_skb[tx_index] = NULL;
+
+               if (cmd_sts & TX_ERROR) {
+                       if (net_ratelimit())
+                               printk(KERN_ERR "%s: Error in TX\n", dev->name);
+                       dev->stats.tx_errors++;
+               }
+               dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
+               if (skb)
+                       dev_kfree_skb_irq(skb);
+               released++;
+       }
+txq_reclaim_end:
+       netif_tx_unlock(dev);
+       return released;
+}
+
+static void pxa168_eth_tx_timeout(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       printk(KERN_INFO "%s: TX timeout  desc_count %d\n",
+              dev->name, pep->tx_desc_count);
+
+       schedule_work(&pep->tx_timeout_task);
+}
+
+static void pxa168_eth_tx_timeout_task(struct work_struct *work)
+{
+       struct pxa168_eth_private *pep = container_of(work,
+                                                struct pxa168_eth_private,
+                                                tx_timeout_task);
+       struct net_device *dev = pep->dev;
+       pxa168_eth_stop(dev);
+       pxa168_eth_open(dev);
+}
+
+static int rxq_process(struct net_device *dev, int budget)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       unsigned int received_packets = 0;
+       struct sk_buff *skb;
+
+       while (budget-- > 0) {
+               int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
+               struct rx_desc *rx_desc;
+               unsigned int cmd_sts;
+
+               /* Do not process Rx ring in case of Rx ring resource error */
+               if (pep->rx_resource_err)
+                       break;
+               rx_curr_desc = pep->rx_curr_desc_q;
+               rx_used_desc = pep->rx_used_desc_q;
+               rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
+               cmd_sts = rx_desc->cmd_sts;
+               rmb();
+               if (cmd_sts & (BUF_OWNED_BY_DMA))
+                       break;
+               skb = pep->rx_skb[rx_curr_desc];
+               pep->rx_skb[rx_curr_desc] = NULL;
+
+               rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
+               pep->rx_curr_desc_q = rx_next_curr_desc;
+
+               /* Rx descriptors exhausted. */
+               /* Set the Rx ring resource error flag */
+               if (rx_next_curr_desc == rx_used_desc)
+                       pep->rx_resource_err = 1;
+               pep->rx_desc_count--;
+               dma_unmap_single(NULL, rx_desc->buf_ptr,
+                                rx_desc->buf_size,
+                                DMA_FROM_DEVICE);
+               received_packets++;
+               /*
+                * Update statistics.
+                * Note byte count includes 4 byte CRC count
+                */
+               stats->rx_packets++;
+               stats->rx_bytes += rx_desc->byte_cnt;
+               /*
+                * In case received a packet without first / last bits on OR
+                * the error summary bit is on, the packets needs to be droped.
+                */
+               if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+                    (RX_FIRST_DESC | RX_LAST_DESC))
+                   || (cmd_sts & RX_ERROR)) {
+
+                       stats->rx_dropped++;
+                       if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+                           (RX_FIRST_DESC | RX_LAST_DESC)) {
+                               if (net_ratelimit())
+                                       printk(KERN_ERR
+                                              "%s: Rx pkt on multiple desc\n",
+                                              dev->name);
+                       }
+                       if (cmd_sts & RX_ERROR)
+                               stats->rx_errors++;
+                       dev_kfree_skb_irq(skb);
+               } else {
+                       /*
+                        * The -4 is for the CRC in the trailer of the
+                        * received packet
+                        */
+                       skb_put(skb, rx_desc->byte_cnt - 4);
+                       skb->protocol = eth_type_trans(skb, dev);
+                       netif_receive_skb(skb);
+               }
+               dev->last_rx = jiffies;
+       }
+       /* Fill RX ring with skb's */
+       rxq_refill(dev);
+       return received_packets;
+}
+
+static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
+                                    struct net_device *dev)
+{
+       u32 icr;
+       int ret = 0;
+
+       icr = rdl(pep, INT_CAUSE);
+       if (icr == 0)
+               return IRQ_NONE;
+
+       wrl(pep, INT_CAUSE, ~icr);
+       if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
+               pep->work_todo |= WORK_TX_DONE;
+               ret = 1;
+       }
+       if (icr & ICR_RXBUF)
+               ret = 1;
+       if (icr & ICR_MII_CH) {
+               pep->work_todo |= WORK_LINK;
+               ret = 1;
+       }
+       return ret;
+}
+
+static void handle_link_event(struct pxa168_eth_private *pep)
+{
+       struct net_device *dev = pep->dev;
+       u32 port_status;
+       int speed;
+       int duplex;
+       int fc;
+
+       port_status = rdl(pep, PORT_STATUS);
+       if (!(port_status & LINK_UP)) {
+               if (netif_carrier_ok(dev)) {
+                       printk(KERN_INFO "%s: link down\n", dev->name);
+                       netif_carrier_off(dev);
+                       txq_reclaim(dev, 1);
+               }
+               return;
+       }
+       if (port_status & PORT_SPEED_100)
+               speed = 100;
+       else
+               speed = 10;
+
+       duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
+       fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
+       printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+              "flow control %sabled\n", dev->name,
+              speed, duplex ? "full" : "half", fc ? "en" : "dis");
+       if (!netif_carrier_ok(dev))
+               netif_carrier_on(dev);
+}
+
+static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
+{
+       struct net_device *dev = (struct net_device *)dev_id;
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       if (unlikely(!pxa168_eth_collect_events(pep, dev)))
+               return IRQ_NONE;
+       /* Disable interrupts */
+       wrl(pep, INT_MASK, 0);
+       napi_schedule(&pep->napi);
+       return IRQ_HANDLED;
+}
+
+static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
+{
+       int skb_size;
+
+       /*
+        * Reserve 2+14 bytes for an ethernet header (the hardware
+        * automatically prepends 2 bytes of dummy data to each
+        * received packet), 16 bytes for up to four VLAN tags, and
+        * 4 bytes for the trailing FCS -- 36 bytes total.
+        */
+       skb_size = pep->dev->mtu + 36;
+
+       /*
+        * Make sure that the skb size is a multiple of 8 bytes, as
+        * the lower three bits of the receive descriptor's buffer
+        * size field are ignored by the hardware.
+        */
+       pep->skb_size = (skb_size + 7) & ~7;
+
+       /*
+        * If NET_SKB_PAD is smaller than a cache line,
+        * netdev_alloc_skb() will cause skb->data to be misaligned
+        * to a cache line boundary.  If this is the case, include
+        * some extra space to allow re-aligning the data area.
+        */
+       pep->skb_size += SKB_DMA_REALIGN;
+
+}
+
+static int set_port_config_ext(struct pxa168_eth_private *pep)
+{
+       int skb_size;
+
+       pxa168_eth_recalc_skb_size(pep);
+       if  (pep->skb_size <= 1518)
+               skb_size = PCXR_MFL_1518;
+       else if (pep->skb_size <= 1536)
+               skb_size = PCXR_MFL_1536;
+       else if (pep->skb_size <= 2048)
+               skb_size = PCXR_MFL_2048;
+       else
+               skb_size = PCXR_MFL_64K;
+
+       /* Extended Port Configuration */
+       wrl(pep,
+           PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
+           PCXR_DSCP_EN |               /* Enable DSCP in IP */
+           skb_size | PCXR_FLP |        /* do not force link pass */
+           PCXR_TX_HIGH_PRI);           /* Transmit - high priority queue */
+
+       return 0;
+}
+
+static int pxa168_init_hw(struct pxa168_eth_private *pep)
+{
+       int err = 0;
+
+       /* Disable interrupts */
+       wrl(pep, INT_MASK, 0);
+       wrl(pep, INT_CAUSE, 0);
+       /* Write to ICR to clear interrupts. */
+       wrl(pep, INT_W_CLEAR, 0);
+       /* Abort any transmit and receive operations and put DMA
+        * in idle state.
+        */
+       abort_dma(pep);
+       /* Initialize address hash table */
+       err = init_hash_table(pep);
+       if (err)
+               return err;
+       /* SDMA configuration */
+       wrl(pep, SDMA_CONFIG, SDCR_BSZ8 |       /* Burst size = 32 bytes */
+           SDCR_RIFB |                         /* Rx interrupt on frame */
+           SDCR_BLMT |                         /* Little endian transmit */
+           SDCR_BLMR |                         /* Little endian receive */
+           SDCR_RC_MAX_RETRANS);               /* Max retransmit count */
+       /* Port Configuration */
+       wrl(pep, PORT_CONFIG, PCR_HS);          /* Hash size is 1/2kb */
+       set_port_config_ext(pep);
+
+       return err;
+}
+
+static int rxq_init(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct rx_desc *p_rx_desc;
+       int size = 0, i = 0;
+       int rx_desc_num = pep->rx_ring_size;
+
+       /* Allocate RX skb rings */
+       pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
+                            GFP_KERNEL);
+       if (!pep->rx_skb) {
+               printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
+               return -ENOMEM;
+       }
+       /* Allocate RX ring */
+       pep->rx_desc_count = 0;
+       size = pep->rx_ring_size * sizeof(struct rx_desc);
+       pep->rx_desc_area_size = size;
+       pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+                                               &pep->rx_desc_dma, GFP_KERNEL);
+       if (!pep->p_rx_desc_area) {
+               printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
+                      dev->name, size);
+               goto out;
+       }
+       memset((void *)pep->p_rx_desc_area, 0, size);
+       /* initialize the next_desc_ptr links in the Rx descriptors ring */
+       p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
+       for (i = 0; i < rx_desc_num; i++) {
+               p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
+                   ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
+       }
+       /* Save Rx desc pointer to driver struct. */
+       pep->rx_curr_desc_q = 0;
+       pep->rx_used_desc_q = 0;
+       pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
+       return 0;
+out:
+       kfree(pep->rx_skb);
+       return -ENOMEM;
+}
+
+static void rxq_deinit(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       int curr;
+
+       /* Free preallocated skb's on RX rings */
+       for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
+               if (pep->rx_skb[curr]) {
+                       dev_kfree_skb(pep->rx_skb[curr]);
+                       pep->rx_desc_count--;
+               }
+       }
+       if (pep->rx_desc_count)
+               printk(KERN_ERR
+                      "Error in freeing Rx Ring. %d skb's still\n",
+                      pep->rx_desc_count);
+       /* Free RX ring */
+       if (pep->p_rx_desc_area)
+               dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
+                                 pep->p_rx_desc_area, pep->rx_desc_dma);
+       kfree(pep->rx_skb);
+}
+
+static int txq_init(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct tx_desc *p_tx_desc;
+       int size = 0, i = 0;
+       int tx_desc_num = pep->tx_ring_size;
+
+       pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
+                            GFP_KERNEL);
+       if (!pep->tx_skb) {
+               printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
+               return -ENOMEM;
+       }
+       /* Allocate TX ring */
+       pep->tx_desc_count = 0;
+       size = pep->tx_ring_size * sizeof(struct tx_desc);
+       pep->tx_desc_area_size = size;
+       pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+                                               &pep->tx_desc_dma, GFP_KERNEL);
+       if (!pep->p_tx_desc_area) {
+               printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
+                      dev->name, size);
+               goto out;
+       }
+       memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
+       /* Initialize the next_desc_ptr links in the Tx descriptors ring */
+       p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
+       for (i = 0; i < tx_desc_num; i++) {
+               p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
+                   ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
+       }
+       pep->tx_curr_desc_q = 0;
+       pep->tx_used_desc_q = 0;
+       pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
+       return 0;
+out:
+       kfree(pep->tx_skb);
+       return -ENOMEM;
+}
+
+static void txq_deinit(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       /* Free outstanding skb's on TX ring */
+       txq_reclaim(dev, 1);
+       BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
+       /* Free TX ring */
+       if (pep->p_tx_desc_area)
+               dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
+                                 pep->p_tx_desc_area, pep->tx_desc_dma);
+       kfree(pep->tx_skb);
+}
+
+static int pxa168_eth_open(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       int err;
+
+       err = request_irq(dev->irq, pxa168_eth_int_handler,
+                         IRQF_DISABLED, dev->name, dev);
+       if (err) {
+               dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
+               return -EAGAIN;
+       }
+       pep->rx_resource_err = 0;
+       err = rxq_init(dev);
+       if (err != 0)
+               goto out_free_irq;
+       err = txq_init(dev);
+       if (err != 0)
+               goto out_free_rx_skb;
+       pep->rx_used_desc_q = 0;
+       pep->rx_curr_desc_q = 0;
+
+       /* Fill RX ring with skb's */
+       rxq_refill(dev);
+       pep->rx_used_desc_q = 0;
+       pep->rx_curr_desc_q = 0;
+       netif_carrier_off(dev);
+       eth_port_start(dev);
+       napi_enable(&pep->napi);
+       return 0;
+out_free_rx_skb:
+       rxq_deinit(dev);
+out_free_irq:
+       free_irq(dev->irq, dev);
+       return err;
+}
+
+static int pxa168_eth_stop(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       eth_port_reset(dev);
+
+       /* Disable interrupts */
+       wrl(pep, INT_MASK, 0);
+       wrl(pep, INT_CAUSE, 0);
+       /* Write to ICR to clear interrupts. */
+       wrl(pep, INT_W_CLEAR, 0);
+       napi_disable(&pep->napi);
+       del_timer_sync(&pep->timeout);
+       netif_carrier_off(dev);
+       free_irq(dev->irq, dev);
+       rxq_deinit(dev);
+       txq_deinit(dev);
+
+       return 0;
+}
+
+static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
+{
+       int retval;
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       if ((mtu > 9500) || (mtu < 68))
+               return -EINVAL;
+
+       dev->mtu = mtu;
+       retval = set_port_config_ext(pep);
+
+       if (!netif_running(dev))
+               return 0;
+
+       /*
+        * Stop and then re-open the interface. This will allocate RX
+        * skbs of the new MTU.
+        * There is a possible danger that the open will not succeed,
+        * due to memory being full.
+        */
+       pxa168_eth_stop(dev);
+       if (pxa168_eth_open(dev)) {
+               dev_printk(KERN_ERR, &dev->dev,
+                          "fatal error on re-opening device after "
+                          "MTU change\n");
+       }
+
+       return 0;
+}
+
+static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
+{
+       int tx_desc_curr;
+
+       tx_desc_curr = pep->tx_curr_desc_q;
+       pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
+       BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
+       pep->tx_desc_count++;
+
+       return tx_desc_curr;
+}
+
+static int pxa168_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct pxa168_eth_private *pep =
+           container_of(napi, struct pxa168_eth_private, napi);
+       struct net_device *dev = pep->dev;
+       int work_done = 0;
+
+       if (unlikely(pep->work_todo & WORK_LINK)) {
+               pep->work_todo &= ~(WORK_LINK);
+               handle_link_event(pep);
+       }
+       /*
+        * We call txq_reclaim every time since in NAPI interupts are disabled
+        * and due to this we miss the TX_DONE interrupt,which is not updated in
+        * interrupt status register.
+        */
+       txq_reclaim(dev, 0);
+       if (netif_queue_stopped(dev)
+           && pep->tx_ring_size - pep->tx_desc_count > 1) {
+               netif_wake_queue(dev);
+       }
+       work_done = rxq_process(dev, budget);
+       if (work_done < budget) {
+               napi_complete(napi);
+               wrl(pep, INT_MASK, ALL_INTS);
+       }
+
+       return work_done;
+}
+
+static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct tx_desc *desc;
+       int tx_index;
+       int length;
+
+       tx_index = eth_alloc_tx_desc_index(pep);
+       desc = &pep->p_tx_desc_area[tx_index];
+       length = skb->len;
+       pep->tx_skb[tx_index] = skb;
+       desc->byte_cnt = length;
+       desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+       wmb();
+       desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
+                       TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
+       wmb();
+       wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
+
+       stats->tx_bytes += skb->len;
+       stats->tx_packets++;
+       dev->trans_start = jiffies;
+       if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
+               /* We handled the current skb, but now we are out of space.*/
+               netif_stop_queue(dev);
+       }
+
+       return NETDEV_TX_OK;
+}
+
+static int smi_wait_ready(struct pxa168_eth_private *pep)
+{
+       int i = 0;
+
+       /* wait for the SMI register to become available */
+       for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
+               if (i == PHY_WAIT_ITERATIONS)
+                       return -ETIMEDOUT;
+               msleep(10);
+       }
+
+       return 0;
+}
+
+static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+       struct pxa168_eth_private *pep = bus->priv;
+       int i = 0;
+       int val;
+
+       if (smi_wait_ready(pep)) {
+               printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+               return -ETIMEDOUT;
+       }
+       wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
+       /* now wait for the data to be valid */
+       for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
+               if (i == PHY_WAIT_ITERATIONS) {
+                       printk(KERN_WARNING
+                               "pxa168_eth: SMI bus read not valid\n");
+                       return -ENODEV;
+               }
+               msleep(10);
+       }
+
+       return val & 0xffff;
+}
+
+static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
+                           u16 value)
+{
+       struct pxa168_eth_private *pep = bus->priv;
+
+       if (smi_wait_ready(pep)) {
+               printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+               return -ETIMEDOUT;
+       }
+
+       wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
+           SMI_OP_W | (value & 0xffff));
+
+       if (smi_wait_ready(pep)) {
+               printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
+                              int cmd)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       if (pep->phy != NULL)
+               return phy_mii_ioctl(pep->phy, ifr, cmd);
+
+       return -EOPNOTSUPP;
+}
+
+static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
+{
+       struct mii_bus *bus = pep->smi_bus;
+       struct phy_device *phydev;
+       int start;
+       int num;
+       int i;
+
+       if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
+               /* Scan entire range */
+               start = ethernet_phy_get(pep);
+               num = 32;
+       } else {
+               /* Use phy addr specific to platform */
+               start = phy_addr & 0x1f;
+               num = 1;
+       }
+       phydev = NULL;
+       for (i = 0; i < num; i++) {
+               int addr = (start + i) & 0x1f;
+               if (bus->phy_map[addr] == NULL)
+                       mdiobus_scan(bus, addr);
+
+               if (phydev == NULL) {
+                       phydev = bus->phy_map[addr];
+                       if (phydev != NULL)
+                               ethernet_phy_set_addr(pep, addr);
+               }
+       }
+
+       return phydev;
+}
+
+static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
+{
+       struct phy_device *phy = pep->phy;
+       ethernet_phy_reset(pep);
+
+       phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
+
+       if (speed == 0) {
+               phy->autoneg = AUTONEG_ENABLE;
+               phy->speed = 0;
+               phy->duplex = 0;
+               phy->supported &= PHY_BASIC_FEATURES;
+               phy->advertising = phy->supported | ADVERTISED_Autoneg;
+       } else {
+               phy->autoneg = AUTONEG_DISABLE;
+               phy->advertising = 0;
+               phy->speed = speed;
+               phy->duplex = duplex;
+       }
+       phy_start_aneg(phy);
+}
+
+static int ethernet_phy_setup(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       if (pep->pd->init)
+               pep->pd->init();
+       pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
+       if (pep->phy != NULL)
+               phy_init(pep, pep->pd->speed, pep->pd->duplex);
+       update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+       return 0;
+}
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       int err;
+
+       err = phy_read_status(pep->phy);
+       if (err == 0)
+               err = phy_ethtool_gset(pep->phy, cmd);
+
+       return err;
+}
+
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       return phy_ethtool_sset(pep->phy, cmd);
+}
+
+static void pxa168_get_drvinfo(struct net_device *dev,
+                              struct ethtool_drvinfo *info)
+{
+       strncpy(info->driver, DRIVER_NAME, 32);
+       strncpy(info->version, DRIVER_VERSION, 32);
+       strncpy(info->fw_version, "N/A", 32);
+       strncpy(info->bus_info, "N/A", 32);
+}
+
+static u32 pxa168_get_link(struct net_device *dev)
+{
+       return !!netif_carrier_ok(dev);
+}
+
+static const struct ethtool_ops pxa168_ethtool_ops = {
+       .get_settings = pxa168_get_settings,
+       .set_settings = pxa168_set_settings,
+       .get_drvinfo = pxa168_get_drvinfo,
+       .get_link = pxa168_get_link,
+};
+
+static const struct net_device_ops pxa168_eth_netdev_ops = {
+       .ndo_open = pxa168_eth_open,
+       .ndo_stop = pxa168_eth_stop,
+       .ndo_start_xmit = pxa168_eth_start_xmit,
+       .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
+       .ndo_set_mac_address = pxa168_eth_set_mac_address,
+       .ndo_validate_addr = eth_validate_addr,
+       .ndo_do_ioctl = pxa168_eth_do_ioctl,
+       .ndo_change_mtu = pxa168_eth_change_mtu,
+       .ndo_tx_timeout = pxa168_eth_tx_timeout,
+};
+
+static int pxa168_eth_probe(struct platform_device *pdev)
+{
+       struct pxa168_eth_private *pep = NULL;
+       struct net_device *dev = NULL;
+       struct resource *res;
+       struct clk *clk;
+       int err;
+
+       printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
+
+       clk = clk_get(&pdev->dev, "MFUCLK");
+       if (IS_ERR(clk)) {
+               printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
+                       DRIVER_NAME);
+               return -ENODEV;
+       }
+       clk_enable(clk);
+
+       dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
+       if (!dev) {
+               err = -ENOMEM;
+               goto err_clk;
+       }
+
+       platform_set_drvdata(pdev, dev);
+       pep = netdev_priv(dev);
+       pep->dev = dev;
+       pep->clk = clk;
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (res == NULL) {
+               err = -ENODEV;
+               goto err_netdev;
+       }
+       pep->base = ioremap(res->start, res->end - res->start + 1);
+       if (pep->base == NULL) {
+               err = -ENOMEM;
+               goto err_netdev;
+       }
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       BUG_ON(!res);
+       dev->irq = res->start;
+       dev->netdev_ops = &pxa168_eth_netdev_ops;
+       dev->watchdog_timeo = 2 * HZ;
+       dev->base_addr = 0;
+       SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
+
+       INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
+
+       printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
+       random_ether_addr(dev->dev_addr);
+
+       pep->pd = pdev->dev.platform_data;
+       pep->rx_ring_size = NUM_RX_DESCS;
+       if (pep->pd->rx_queue_size)
+               pep->rx_ring_size = pep->pd->rx_queue_size;
+
+       pep->tx_ring_size = NUM_TX_DESCS;
+       if (pep->pd->tx_queue_size)
+               pep->tx_ring_size = pep->pd->tx_queue_size;
+
+       pep->port_num = pep->pd->port_number;
+       /* Hardware supports only 3 ports */
+       BUG_ON(pep->port_num > 2);
+       netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
+
+       memset(&pep->timeout, 0, sizeof(struct timer_list));
+       init_timer(&pep->timeout);
+       pep->timeout.function = rxq_refill_timer_wrapper;
+       pep->timeout.data = (unsigned long)pep;
+
+       pep->smi_bus = mdiobus_alloc();
+       if (pep->smi_bus == NULL) {
+               err = -ENOMEM;
+               goto err_base;
+       }
+       pep->smi_bus->priv = pep;
+       pep->smi_bus->name = "pxa168_eth smi";
+       pep->smi_bus->read = pxa168_smi_read;
+       pep->smi_bus->write = pxa168_smi_write;
+       snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
+       pep->smi_bus->parent = &pdev->dev;
+       pep->smi_bus->phy_mask = 0xffffffff;
+       err = mdiobus_register(pep->smi_bus);
+       if (err)
+               goto err_free_mdio;
+
+       pxa168_init_hw(pep);
+       err = ethernet_phy_setup(dev);
+       if (err)
+               goto err_mdiobus;
+       SET_NETDEV_DEV(dev, &pdev->dev);
+       err = register_netdev(dev);
+       if (err)
+               goto err_mdiobus;
+       return 0;
+
+err_mdiobus:
+       mdiobus_unregister(pep->smi_bus);
+err_free_mdio:
+       mdiobus_free(pep->smi_bus);
+err_base:
+       iounmap(pep->base);
+err_netdev:
+       free_netdev(dev);
+err_clk:
+       clk_disable(clk);
+       clk_put(clk);
+       return err;
+}
+
+static int pxa168_eth_remove(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       if (pep->htpr) {
+               dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
+                                 pep->htpr, pep->htpr_dma);
+               pep->htpr = NULL;
+       }
+       if (pep->clk) {
+               clk_disable(pep->clk);
+               clk_put(pep->clk);
+               pep->clk = NULL;
+       }
+       if (pep->phy != NULL)
+               phy_detach(pep->phy);
+
+       iounmap(pep->base);
+       pep->base = NULL;
+       unregister_netdev(dev);
+       flush_scheduled_work();
+       free_netdev(dev);
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+static void pxa168_eth_shutdown(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       eth_port_reset(dev);
+}
+
+#ifdef CONFIG_PM
+static int pxa168_eth_resume(struct platform_device *pdev)
+{
+       return -ENOSYS;
+}
+
+static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       return -ENOSYS;
+}
+
+#else
+#define pxa168_eth_resume NULL
+#define pxa168_eth_suspend NULL
+#endif
+
+static struct platform_driver pxa168_eth_driver = {
+       .probe = pxa168_eth_probe,
+       .remove = pxa168_eth_remove,
+       .shutdown = pxa168_eth_shutdown,
+       .resume = pxa168_eth_resume,
+       .suspend = pxa168_eth_suspend,
+       .driver = {
+                  .name = DRIVER_NAME,
+                  },
+};
+
+static int __init pxa168_init_module(void)
+{
+       return platform_driver_register(&pxa168_eth_driver);
+}
+
+static void __exit pxa168_cleanup_module(void)
+{
+       platform_driver_unregister(&pxa168_eth_driver);
+}
+
+module_init(pxa168_init_module);
+module_exit(pxa168_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
+MODULE_ALIAS("platform:pxa168_eth");
index b9615bd745ea5d9175e6d6153d5c924084d550d2..66eea59720209f85278cf223b6d723f879337aac 100644 (file)
@@ -473,48 +473,58 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
 static int
 qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
 {
-       struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
+       struct qlcnic_pci_info *pci_info;
        int i, ret = 0, err;
        u8 pfn;
 
-       if (!adapter->npars)
-               adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
-                               QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
-       if (!adapter->npars)
+       pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+       if (!pci_info)
                return -ENOMEM;
 
-       if (!adapter->eswitch)
-               adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
+       adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
+                               QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
+       if (!adapter->npars) {
+               err = -ENOMEM;
+               goto err_pci_info;
+       }
+
+       adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
                                QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
        if (!adapter->eswitch) {
                err = -ENOMEM;
-               goto err_eswitch;
+               goto err_npars;
        }
 
        ret = qlcnic_get_pci_info(adapter, pci_info);
-       if (!ret) {
-               for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
-                       pfn = pci_info[i].id;
-                       if (pfn > QLCNIC_MAX_PCI_FUNC)
-                               return QL_STATUS_INVALID_PARAM;
-                       adapter->npars[pfn].active = pci_info[i].active;
-                       adapter->npars[pfn].type = pci_info[i].type;
-                       adapter->npars[pfn].phy_port = pci_info[i].default_port;
-                       adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
-                       adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
-                       adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
-               }
-
-               for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
-                       adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+       if (ret)
+               goto err_eswitch;
 
-               return ret;
+       for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+               pfn = pci_info[i].id;
+               if (pfn > QLCNIC_MAX_PCI_FUNC)
+                       return QL_STATUS_INVALID_PARAM;
+               adapter->npars[pfn].active = pci_info[i].active;
+               adapter->npars[pfn].type = pci_info[i].type;
+               adapter->npars[pfn].phy_port = pci_info[i].default_port;
+               adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
+               adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
+               adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
        }
 
+       for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+               adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+
+       kfree(pci_info);
+       return 0;
+
+err_eswitch:
        kfree(adapter->eswitch);
        adapter->eswitch = NULL;
-err_eswitch:
+err_npars:
        kfree(adapter->npars);
+       adapter->npars = NULL;
+err_pci_info:
+       kfree(pci_info);
 
        return ret;
 }
@@ -1973,8 +1983,6 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct net_device_stats *stats = &netdev->stats;
 
-       memset(stats, 0, sizeof(*stats));
-
        stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
        stats->tx_packets = adapter->stats.xmitfinished;
        stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
@@ -2180,9 +2188,16 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void qlcnic_poll_controller(struct net_device *netdev)
 {
+       int ring;
+       struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
        disable_irq(adapter->irq);
-       qlcnic_intr(adapter->irq, adapter);
+       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+               sds_ring = &recv_ctx->sds_rings[ring];
+               qlcnic_intr(adapter->irq, sds_ring);
+       }
        enable_irq(adapter->irq);
 }
 #endif
@@ -3361,15 +3376,21 @@ qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
        struct device *dev = container_of(kobj, struct device, kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
-       struct qlcnic_pci_info  pci_info[QLCNIC_MAX_PCI_FUNC];
+       struct qlcnic_pci_info *pci_info;
        int i, ret;
 
        if (size != sizeof(pci_cfg))
                return QL_STATUS_INVALID_PARAM;
 
+       pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+       if (!pci_info)
+               return -ENOMEM;
+
        ret = qlcnic_get_pci_info(adapter, pci_info);
-       if (ret)
+       if (ret) {
+               kfree(pci_info);
                return ret;
+       }
 
        for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
                pci_cfg[i].pci_func = pci_info[i].id;
@@ -3380,8 +3401,8 @@ qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
                memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
        }
        memcpy(buf, &pci_cfg, size);
+       kfree(pci_info);
        return size;
-
 }
 static struct bin_attribute bin_attr_npar_config = {
        .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
index 8d63f69b27d912de7dca94d980276963f25c7fe7..5f89e83501f4f5c1892ac15a84681a76ade08c19 100644 (file)
@@ -3919,12 +3919,12 @@ static int ql_adapter_down(struct ql_adapter *qdev)
        for (i = 0; i < qdev->rss_ring_count; i++)
                netif_napi_del(&qdev->rx_ring[i].napi);
 
-       ql_free_rx_buffers(qdev);
-
        status = ql_adapter_reset(qdev);
        if (status)
                netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
                          qdev->func);
+       ql_free_rx_buffers(qdev);
+
        return status;
 }
 
index f5a9eb1df59332f0d105d7ee83380471a0ff48f1..79fd02bc69fd0854f14e0fdac452980cd7fed74b 100644 (file)
@@ -1437,7 +1437,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
 
 static int sh_eth_drv_probe(struct platform_device *pdev)
 {
-       int ret, i, devno = 0;
+       int ret, devno = 0;
        struct resource *res;
        struct net_device *ndev = NULL;
        struct sh_eth_private *mdp;
index 7f62e2dea28f7423bd7539253e5698ab2094f049..ca7fc9df1ccf900533d56c35326b360316283af2 100644 (file)
@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
 
 static void rx_complete (struct urb *urb);
 
-static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
 {
        struct sk_buff          *skb;
        struct skb_data         *entry;
@@ -327,7 +327,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
                netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
                usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
                usb_free_urb (urb);
-               return;
+               return -ENOMEM;
        }
        skb_reserve (skb, NET_IP_ALIGN);
 
@@ -357,6 +357,9 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
                        netif_dbg(dev, ifdown, dev->net, "device gone\n");
                        netif_device_detach (dev->net);
                        break;
+               case -EHOSTUNREACH:
+                       retval = -ENOLINK;
+                       break;
                default:
                        netif_dbg(dev, rx_err, dev->net,
                                  "rx submit, %d\n", retval);
@@ -374,6 +377,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
                dev_kfree_skb_any (skb);
                usb_free_urb (urb);
        }
+       return retval;
 }
 
 
@@ -912,6 +916,7 @@ fail_halt:
        /* tasklet could resubmit itself forever if memory is tight */
        if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
                struct urb      *urb = NULL;
+               int resched = 1;
 
                if (netif_running (dev->net))
                        urb = usb_alloc_urb (0, GFP_KERNEL);
@@ -922,10 +927,12 @@ fail_halt:
                        status = usb_autopm_get_interface(dev->intf);
                        if (status < 0)
                                goto fail_lowmem;
-                       rx_submit (dev, urb, GFP_KERNEL);
+                       if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
+                               resched = 0;
                        usb_autopm_put_interface(dev->intf);
 fail_lowmem:
-                       tasklet_schedule (&dev->bh);
+                       if (resched)
+                               tasklet_schedule (&dev->bh);
                }
        }
 
@@ -1175,8 +1182,11 @@ static void usbnet_bh (unsigned long param)
                        // don't refill the queue all at once
                        for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
                                urb = usb_alloc_urb (0, GFP_ATOMIC);
-                               if (urb != NULL)
-                                       rx_submit (dev, urb, GFP_ATOMIC);
+                               if (urb != NULL) {
+                                       if (rx_submit (dev, urb, GFP_ATOMIC) ==
+                                           -ENOLINK)
+                                               return;
+                               }
                        }
                        if (temp != dev->rxq.qlen)
                                netif_dbg(dev, link, dev->net,
index ad7719fe6d0a2ced65c2db0a27faeeab6550486a..e050bd65e0378cbdd6643d893c01a4462a2dea2f 100644 (file)
@@ -885,20 +885,21 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
  *      Receive a frame through the DMA
  */
 static inline void
-fst_rx_dma(struct fst_card_info *card, unsigned char *skb,
-          unsigned char *mem, int len)
+fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
+          dma_addr_t mem, int len)
 {
        /*
         * This routine will setup the DMA and start it
         */
 
-       dbg(DBG_RX, "In fst_rx_dma %p %p %d\n", skb, mem, len);
+       dbg(DBG_RX, "In fst_rx_dma %lx %lx %d\n",
+           (unsigned long) skb, (unsigned long) mem, len);
        if (card->dmarx_in_progress) {
                dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
        }
 
-       outl((unsigned long) skb, card->pci_conf + DMAPADR0);   /* Copy to here */
-       outl((unsigned long) mem, card->pci_conf + DMALADR0);   /* from here */
+       outl(skb, card->pci_conf + DMAPADR0);   /* Copy to here */
+       outl(mem, card->pci_conf + DMALADR0);   /* from here */
        outl(len, card->pci_conf + DMASIZ0);    /* for this length */
        outl(0x00000000c, card->pci_conf + DMADPR0);    /* In this direction */
 
@@ -1309,8 +1310,8 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
                card->dma_port_rx = port;
                card->dma_len_rx = len;
                card->dma_rxpos = rxp;
-               fst_rx_dma(card, (char *) card->rx_dma_handle_card,
-                          (char *) BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
+               fst_rx_dma(card, card->rx_dma_handle_card,
+                          BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
        }
        if (rxp != port->rxpos) {
                dbg(DBG_ASS, "About to increment rxpos by more than 1\n");
index a75ed3083a6ae266d66b9630140b42db8d07266e..8e4153d740f3bf00d4a465bf5a1526ee66ad518e 100644 (file)
@@ -386,7 +386,7 @@ claw_tx(struct sk_buff *skb, struct net_device *dev)
         struct chbk *p_ch;
 
        CLAW_DBF_TEXT(4, trace, "claw_tx");
-        p_ch=&privptr->channel[WRITE];
+       p_ch = &privptr->channel[WRITE_CHANNEL];
         spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
         rc=claw_hw_tx( skb, dev, 1 );
         spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
@@ -407,7 +407,7 @@ static struct sk_buff *
 claw_pack_skb(struct claw_privbk *privptr)
 {
        struct sk_buff *new_skb,*held_skb;
-       struct chbk *p_ch = &privptr->channel[WRITE];
+       struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
        struct claw_env  *p_env = privptr->p_env;
        int     pkt_cnt,pk_ind,so_far;
 
@@ -515,15 +515,15 @@ claw_open(struct net_device *dev)
                privptr->p_env->write_size=CLAW_FRAME_SIZE;
        }
         claw_set_busy(dev);
-       tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet,
-               (unsigned long) &privptr->channel[READ]);
+       tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
+               (unsigned long) &privptr->channel[READ_CHANNEL]);
         for ( i = 0; i < 2;  i++) {
                CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
                 init_waitqueue_head(&privptr->channel[i].wait);
                /* skb_queue_head_init(&p_ch->io_queue); */
-               if (i == WRITE)
+               if (i == WRITE_CHANNEL)
                        skb_queue_head_init(
-                               &privptr->channel[WRITE].collect_queue);
+                               &privptr->channel[WRITE_CHANNEL].collect_queue);
                 privptr->channel[i].flag_a = 0;
                 privptr->channel[i].IO_active = 0;
                 privptr->channel[i].flag  &= ~CLAW_TIMER;
@@ -551,12 +551,12 @@ claw_open(struct net_device *dev)
                 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
                         del_timer(&timer);
         }
-        if ((((privptr->channel[READ].last_dstat |
-               privptr->channel[WRITE].last_dstat) &
+       if ((((privptr->channel[READ_CHANNEL].last_dstat |
+               privptr->channel[WRITE_CHANNEL].last_dstat) &
            ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
-           (((privptr->channel[READ].flag |
-               privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) {
-               dev_info(&privptr->channel[READ].cdev->dev,
+          (((privptr->channel[READ_CHANNEL].flag |
+               privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
+               dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
                        "%s: remote side is not ready\n", dev->name);
                CLAW_DBF_TEXT(2, trace, "notrdy");
 
@@ -608,8 +608,8 @@ claw_open(struct net_device *dev)
                         }
                 }
                privptr->buffs_alloc = 0;
-               privptr->channel[READ].flag= 0x00;
-               privptr->channel[WRITE].flag = 0x00;
+               privptr->channel[READ_CHANNEL].flag = 0x00;
+               privptr->channel[WRITE_CHANNEL].flag = 0x00;
                 privptr->p_buff_ccw=NULL;
                 privptr->p_buff_read=NULL;
                 privptr->p_buff_write=NULL;
@@ -652,10 +652,10 @@ claw_irq_handler(struct ccw_device *cdev,
         }
 
        /* Try to extract channel from driver data. */
-       if (privptr->channel[READ].cdev == cdev)
-               p_ch = &privptr->channel[READ];
-       else if (privptr->channel[WRITE].cdev == cdev)
-               p_ch = &privptr->channel[WRITE];
+       if (privptr->channel[READ_CHANNEL].cdev == cdev)
+               p_ch = &privptr->channel[READ_CHANNEL];
+       else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
+               p_ch = &privptr->channel[WRITE_CHANNEL];
        else {
                dev_warn(&cdev->dev, "The device is not a CLAW device\n");
                CLAW_DBF_TEXT(2, trace, "badchan");
@@ -813,7 +813,7 @@ claw_irq_handler(struct ccw_device *cdev,
                        claw_clearbit_busy(TB_TX, dev);
                        claw_clear_busy(dev);
                }
-               p_ch_r = (struct chbk *)&privptr->channel[READ];
+               p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
                if (test_and_set_bit(CLAW_BH_ACTIVE,
                        (void *)&p_ch_r->flag_a) == 0)
                        tasklet_schedule(&p_ch_r->tasklet);
@@ -878,13 +878,13 @@ claw_release(struct net_device *dev)
         for ( i = 1; i >=0 ;  i--) {
                 spin_lock_irqsave(
                        get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
-             /*   del_timer(&privptr->channel[READ].timer);  */
+            /*   del_timer(&privptr->channel[READ_CHANNEL].timer);  */
                privptr->channel[i].claw_state = CLAW_STOP;
                 privptr->channel[i].IO_active = 0;
                 parm = (unsigned long) &privptr->channel[i];
-               if (i == WRITE)
+               if (i == WRITE_CHANNEL)
                        claw_purge_skb_queue(
-                               &privptr->channel[WRITE].collect_queue);
+                               &privptr->channel[WRITE_CHANNEL].collect_queue);
                 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
                if (privptr->system_validate_comp==0x00)  /* never opened? */
                    init_waitqueue_head(&privptr->channel[i].wait);
@@ -971,16 +971,16 @@ claw_release(struct net_device *dev)
         privptr->mtc_skipping = 1;
         privptr->mtc_offset=0;
 
-        if (((privptr->channel[READ].last_dstat |
-               privptr->channel[WRITE].last_dstat) &
+       if (((privptr->channel[READ_CHANNEL].last_dstat |
+               privptr->channel[WRITE_CHANNEL].last_dstat) &
                ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
-               dev_warn(&privptr->channel[READ].cdev->dev,
+               dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
                        "Deactivating %s completed with incorrect"
                        " subchannel status "
                        "(read %02x, write %02x)\n",
                 dev->name,
-               privptr->channel[READ].last_dstat,
-               privptr->channel[WRITE].last_dstat);
+               privptr->channel[READ_CHANNEL].last_dstat,
+               privptr->channel[WRITE_CHANNEL].last_dstat);
                 CLAW_DBF_TEXT(2, trace, "badclose");
         }
        CLAW_DBF_TEXT(4, trace, "rlsexit");
@@ -1324,7 +1324,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
 
        CLAW_DBF_TEXT(4, trace, "hw_tx");
        privptr = (struct claw_privbk *)(dev->ml_priv);
-        p_ch=(struct chbk *)&privptr->channel[WRITE];
+       p_ch = (struct chbk *)&privptr->channel[WRITE_CHANNEL];
        p_env =privptr->p_env;
        claw_free_wrt_buf(dev); /* Clean up free chain if posible */
         /*  scan the write queue to free any completed write packets   */
@@ -1357,7 +1357,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
                                 claw_strt_out_IO(dev );
                                 claw_free_wrt_buf( dev );
                                 if (privptr->write_free_count==0) {
-                                       ch = &privptr->channel[WRITE];
+                                       ch = &privptr->channel[WRITE_CHANNEL];
                                        atomic_inc(&skb->users);
                                        skb_queue_tail(&ch->collect_queue, skb);
                                        goto Done;
@@ -1369,7 +1369,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
                 }
                 /*  tx lock  */
                 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
-                       ch = &privptr->channel[WRITE];
+                       ch = &privptr->channel[WRITE_CHANNEL];
                        atomic_inc(&skb->users);
                        skb_queue_tail(&ch->collect_queue, skb);
                         claw_strt_out_IO(dev );
@@ -1385,7 +1385,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
             privptr->p_write_free_chain == NULL ) {
 
                 claw_setbit_busy(TB_NOBUFFER,dev);
-               ch = &privptr->channel[WRITE];
+               ch = &privptr->channel[WRITE_CHANNEL];
                atomic_inc(&skb->users);
                skb_queue_tail(&ch->collect_queue, skb);
                CLAW_DBF_TEXT(2, trace, "clawbusy");
@@ -1397,7 +1397,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
         while (len_of_data > 0) {
                 p_this_ccw=privptr->p_write_free_chain;  /* get a block */
                if (p_this_ccw == NULL) { /* lost the race */
-                       ch = &privptr->channel[WRITE];
+                       ch = &privptr->channel[WRITE_CHANNEL];
                        atomic_inc(&skb->users);
                        skb_queue_tail(&ch->collect_queue, skb);
                        goto Done2;
@@ -2067,7 +2067,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
                        *catch up to each other */
        privptr = dev->ml_priv;
         p_env=privptr->p_env;
-       tdev = &privptr->channel[READ].cdev->dev;
+       tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
        memcpy( &temp_host_name, p_env->host_name, 8);
         memcpy( &temp_ws_name, p_env->adapter_name , 8);
        dev_info(tdev, "%s: CLAW device %.8s: "
@@ -2245,7 +2245,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
                        dev->name, temp_ws_name,
                        p_ctlbk->linkid);
                        privptr->active_link_ID = p_ctlbk->linkid;
-                       p_ch = &privptr->channel[WRITE];
+                       p_ch = &privptr->channel[WRITE_CHANNEL];
                        wake_up(&p_ch->wait);  /* wake up claw_open ( WRITE) */
                break;
        case CONNECTION_RESPONSE:
@@ -2296,7 +2296,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
                                "%s: Confirmed Now packing\n", dev->name);
                                p_env->packing = DO_PACKED;
                        }
-                       p_ch = &privptr->channel[WRITE];
+                       p_ch = &privptr->channel[WRITE_CHANNEL];
                        wake_up(&p_ch->wait);
                } else {
                        dev_warn(tdev, "Activating %s failed because of"
@@ -2556,7 +2556,7 @@ unpack_read(struct net_device *dev )
        p_packd=NULL;
        privptr = dev->ml_priv;
 
-       p_dev = &privptr->channel[READ].cdev->dev;
+       p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
        p_env = privptr->p_env;
         p_this_ccw=privptr->p_read_active_first;
        while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
@@ -2728,7 +2728,7 @@ claw_strt_read (struct net_device *dev, int lock )
         struct ccwbk*p_ccwbk;
         struct chbk *p_ch;
         struct clawh *p_clawh;
-        p_ch=&privptr->channel[READ];
+       p_ch = &privptr->channel[READ_CHANNEL];
 
        CLAW_DBF_TEXT(4, trace, "StRdNter");
         p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
@@ -2782,7 +2782,7 @@ claw_strt_out_IO( struct net_device *dev )
                return;
        }
        privptr = (struct claw_privbk *)dev->ml_priv;
-        p_ch=&privptr->channel[WRITE];
+       p_ch = &privptr->channel[WRITE_CHANNEL];
 
        CLAW_DBF_TEXT(4, trace, "strt_io");
         p_first_ccw=privptr->p_write_active_first;
@@ -2875,7 +2875,7 @@ claw_free_netdevice(struct net_device * dev, int free_dev)
        if (dev->flags & IFF_RUNNING)
                claw_release(dev);
        if (privptr) {
-               privptr->channel[READ].ndev = NULL;  /* say it's free */
+               privptr->channel[READ_CHANNEL].ndev = NULL;  /* say it's free */
        }
        dev->ml_priv = NULL;
 #ifdef MODULE
@@ -2960,18 +2960,18 @@ claw_new_device(struct ccwgroup_device *cgdev)
        struct ccw_dev_id dev_id;
 
        dev_info(&cgdev->dev, "add for %s\n",
-                dev_name(&cgdev->cdev[READ]->dev));
+                dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
        CLAW_DBF_TEXT(2, setup, "new_dev");
        privptr = dev_get_drvdata(&cgdev->dev);
-       dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
-       dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
+       dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
+       dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
        if (!privptr)
                return -ENODEV;
        p_env = privptr->p_env;
-       ccw_device_get_id(cgdev->cdev[READ], &dev_id);
-       p_env->devno[READ] = dev_id.devno;
-       ccw_device_get_id(cgdev->cdev[WRITE], &dev_id);
-       p_env->devno[WRITE] = dev_id.devno;
+       ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
+       p_env->devno[READ_CHANNEL] = dev_id.devno;
+       ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
+       p_env->devno[WRITE_CHANNEL] = dev_id.devno;
        ret = add_channel(cgdev->cdev[0],0,privptr);
        if (ret == 0)
                ret = add_channel(cgdev->cdev[1],1,privptr);
@@ -2980,14 +2980,14 @@ claw_new_device(struct ccwgroup_device *cgdev)
                        " failed with error code %d\n", ret);
                goto out;
        }
-       ret = ccw_device_set_online(cgdev->cdev[READ]);
+       ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
        if (ret != 0) {
                dev_warn(&cgdev->dev,
                        "Setting the read subchannel online"
                        " failed with error code %d\n", ret);
                goto out;
        }
-       ret = ccw_device_set_online(cgdev->cdev[WRITE]);
+       ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
        if (ret != 0) {
                dev_warn(&cgdev->dev,
                        "Setting the write subchannel online "
@@ -3002,8 +3002,8 @@ claw_new_device(struct ccwgroup_device *cgdev)
        }
        dev->ml_priv = privptr;
        dev_set_drvdata(&cgdev->dev, privptr);
-       dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
-       dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
+       dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
+       dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
        /* sysfs magic */
         SET_NETDEV_DEV(dev, &cgdev->dev);
        if (register_netdev(dev) != 0) {
@@ -3021,16 +3021,16 @@ claw_new_device(struct ccwgroup_device *cgdev)
                        goto out;
                }
        }
-       privptr->channel[READ].ndev = dev;
-       privptr->channel[WRITE].ndev = dev;
+       privptr->channel[READ_CHANNEL].ndev = dev;
+       privptr->channel[WRITE_CHANNEL].ndev = dev;
        privptr->p_env->ndev = dev;
 
        dev_info(&cgdev->dev, "%s:readsize=%d  writesize=%d "
                "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
                 dev->name, p_env->read_size,
                p_env->write_size, p_env->read_buffers,
-                p_env->write_buffers, p_env->devno[READ],
-               p_env->devno[WRITE]);
+               p_env->write_buffers, p_env->devno[READ_CHANNEL],
+               p_env->devno[WRITE_CHANNEL]);
        dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
                ":%.8s api_type: %.8s\n",
                 dev->name, p_env->host_name,
@@ -3072,10 +3072,10 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
        priv = dev_get_drvdata(&cgdev->dev);
        if (!priv)
                return -ENODEV;
-       ndev = priv->channel[READ].ndev;
+       ndev = priv->channel[READ_CHANNEL].ndev;
        if (ndev) {
                /* Close the device */
-               dev_info(&cgdev->dev, "%s: shutting down \n",
+               dev_info(&cgdev->dev, "%s: shutting down\n",
                        ndev->name);
                if (ndev->flags & IFF_RUNNING)
                        ret = claw_release(ndev);
@@ -3083,8 +3083,8 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
                unregister_netdev(ndev);
                ndev->ml_priv = NULL;  /* cgdev data, not ndev's to free */
                claw_free_netdevice(ndev, 1);
-               priv->channel[READ].ndev = NULL;
-               priv->channel[WRITE].ndev = NULL;
+               priv->channel[READ_CHANNEL].ndev = NULL;
+               priv->channel[WRITE_CHANNEL].ndev = NULL;
                priv->p_env->ndev = NULL;
        }
        ccw_device_set_offline(cgdev->cdev[1]);
@@ -3115,8 +3115,8 @@ claw_remove_device(struct ccwgroup_device *cgdev)
        priv->channel[1].irb=NULL;
        kfree(priv);
        dev_set_drvdata(&cgdev->dev, NULL);
-       dev_set_drvdata(&cgdev->cdev[READ]->dev, NULL);
-       dev_set_drvdata(&cgdev->cdev[WRITE]->dev, NULL);
+       dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
+       dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
        put_device(&cgdev->dev);
 
        return;
index 46d59a13db12d1aabdaf87d497dbe2f3d1c02d55..1bc5904df19ff550d3094ed939faab28f5cac44c 100644 (file)
@@ -74,8 +74,8 @@
 #define MAX_ENVELOPE_SIZE       65536
 #define CLAW_DEFAULT_MTU_SIZE   4096
 #define DEF_PACK_BUFSIZE       32768
-#define READ                    0
-#define WRITE                   1
+#define READ_CHANNEL           0
+#define WRITE_CHANNEL          1
 
 #define TB_TX                   0          /* sk buffer handling in process  */
 #define TB_STOP                 1          /* network device stop in process */
index 70eb7f1384146e4138e5853534a87966cf79fb5a..8c921fc3511a5e741b187e9f32e763f004b11427 100644 (file)
@@ -454,7 +454,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
        if ((fsmstate == CTC_STATE_SETUPWAIT) &&
            (ch->protocol == CTCM_PROTO_OS390)) {
                /* OS/390 resp. z/OS */
-               if (CHANNEL_DIRECTION(ch->flags) == READ) {
+               if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
                        *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
                        fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
                                     CTC_EVENT_TIMER, ch);
@@ -472,14 +472,14 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
         * if in compatibility mode, since VM TCP delays the initial
         * frame until it has some data to send.
         */
-       if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
+       if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
            (ch->protocol != CTCM_PROTO_S390))
                fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
 
        *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
        ch->ccw[1].count = 2;   /* Transfer only length */
 
-       fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
+       fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
                     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
        rc = ccw_device_start(ch->cdev, &ch->ccw[0],
                                        (unsigned long)ch, 0xff, 0);
@@ -495,7 +495,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
         * reply from VM TCP which brings up the RX channel to it's
         * final state.
         */
-       if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
+       if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
            (ch->protocol == CTCM_PROTO_S390)) {
                struct net_device *dev = ch->netdev;
                struct ctcm_priv *priv = dev->ml_priv;
@@ -600,15 +600,15 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
        int rc;
 
        CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
-                       CTCM_FUNTAIL, ch->id,
-                       (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+               CTCM_FUNTAIL, ch->id,
+               (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
 
        if (ch->trans_skb != NULL) {
                clear_normalized_cda(&ch->ccw[1]);
                dev_kfree_skb(ch->trans_skb);
                ch->trans_skb = NULL;
        }
-       if (CHANNEL_DIRECTION(ch->flags) == READ) {
+       if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
                ch->ccw[1].cmd_code = CCW_CMD_READ;
                ch->ccw[1].flags = CCW_FLAG_SLI;
                ch->ccw[1].count = 0;
@@ -622,7 +622,8 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
                        "%s(%s): %s trans_skb alloc delayed "
                        "until first transfer",
                        CTCM_FUNTAIL, ch->id,
-                       (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+                       (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+                               "RX" : "TX");
        }
        ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
        ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
@@ -720,7 +721,7 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state,
 
        ch->th_seg = 0x00;
        ch->th_seq_num = 0x00;
-       if (CHANNEL_DIRECTION(ch->flags) == READ) {
+       if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
                skb_queue_purge(&ch->io_queue);
                fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
        } else {
@@ -799,7 +800,8 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
                fsm_newstate(fi, CTC_STATE_STARTRETRY);
                fsm_deltimer(&ch->timer);
                fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
-               if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == READ)) {
+               if (!IS_MPC(ch) &&
+                   (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
                        int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
                        if (rc != 0)
                                ctcm_ccw_check_rc(ch, rc,
@@ -811,10 +813,10 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
        CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
                "%s(%s) : %s error during %s channel setup state=%s\n",
                CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
-               (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
+               (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
                fsm_getstate_str(fi));
 
-       if (CHANNEL_DIRECTION(ch->flags) == READ) {
+       if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
                fsm_newstate(fi, CTC_STATE_RXERR);
                fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
        } else {
@@ -945,7 +947,7 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
        fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
 
        fsm_newstate(fi, CTC_STATE_DTERM);
-       ch2 = priv->channel[WRITE];
+       ch2 = priv->channel[CTCM_WRITE];
        fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
 
        ccw_device_halt(ch->cdev, (unsigned long)ch);
@@ -1074,13 +1076,13 @@ static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
        fsm_deltimer(&ch->timer);
        CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
                "%s: %s: %s unrecoverable channel error",
-                       CTCM_FUNTAIL, ch->id, rd == READ ? "RX" : "TX");
+                       CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
 
        if (IS_MPC(ch)) {
                priv->stats.tx_dropped++;
                priv->stats.tx_errors++;
        }
-       if (rd == READ) {
+       if (rd == CTCM_READ) {
                fsm_newstate(fi, CTC_STATE_RXERR);
                fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
        } else {
@@ -1503,7 +1505,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
        switch (fsm_getstate(fi)) {
        case CTC_STATE_STARTRETRY:
        case CTC_STATE_SETUPWAIT:
-               if (CHANNEL_DIRECTION(ch->flags) == READ) {
+               if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
                        ctcmpc_chx_rxidle(fi, event, arg);
                } else {
                        fsm_newstate(fi, CTC_STATE_TXIDLE);
@@ -1514,7 +1516,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
                break;
        };
 
-       fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
+       fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
                     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
 
 done:
@@ -1753,8 +1755,8 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
        struct net_device *dev = ach->netdev;
        struct ctcm_priv *priv = dev->ml_priv;
        struct mpc_group *grp = priv->mpcg;
-       struct channel *wch = priv->channel[WRITE];
-       struct channel *rch = priv->channel[READ];
+       struct channel *wch = priv->channel[CTCM_WRITE];
+       struct channel *rch = priv->channel[CTCM_READ];
        struct sk_buff *skb;
        struct th_sweep *header;
        int rc = 0;
@@ -2070,7 +2072,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg)
        fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
        if (IS_MPC(priv))
                priv->mpcg->channels_terminating = 0;
-       for (direction = READ; direction <= WRITE; direction++) {
+       for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
                struct channel *ch = priv->channel[direction];
                fsm_event(ch->fsm, CTC_EVENT_START, ch);
        }
@@ -2092,7 +2094,7 @@ static void dev_action_stop(fsm_instance *fi, int event, void *arg)
        CTCMY_DBF_DEV_NAME(SETUP, dev, "");
 
        fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
-       for (direction = READ; direction <= WRITE; direction++) {
+       for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
                struct channel *ch = priv->channel[direction];
                fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
                ch->th_seq_num = 0x00;
@@ -2183,11 +2185,11 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg)
 
        if (IS_MPC(priv)) {
                if (event == DEV_EVENT_RXUP)
-                       mpc_channel_action(priv->channel[READ],
-                               READ, MPC_CHANNEL_ADD);
+                       mpc_channel_action(priv->channel[CTCM_READ],
+                               CTCM_READ, MPC_CHANNEL_ADD);
                else
-                       mpc_channel_action(priv->channel[WRITE],
-                               WRITE, MPC_CHANNEL_ADD);
+                       mpc_channel_action(priv->channel[CTCM_WRITE],
+                               CTCM_WRITE, MPC_CHANNEL_ADD);
        }
 }
 
@@ -2239,11 +2241,11 @@ static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
        }
        if (IS_MPC(priv)) {
                if (event == DEV_EVENT_RXDOWN)
-                       mpc_channel_action(priv->channel[READ],
-                               READ, MPC_CHANNEL_REMOVE);
+                       mpc_channel_action(priv->channel[CTCM_READ],
+                               CTCM_READ, MPC_CHANNEL_REMOVE);
                else
-                       mpc_channel_action(priv->channel[WRITE],
-                               WRITE, MPC_CHANNEL_REMOVE);
+                       mpc_channel_action(priv->channel[CTCM_WRITE],
+                               CTCM_WRITE, MPC_CHANNEL_REMOVE);
        }
 }
 
index 4ecafbf91211b72775315a95e20a322e0e11e834..6edf20b62de5bae28214275931f0db9fd1fcd1f2 100644 (file)
@@ -267,7 +267,7 @@ static struct channel *channel_get(enum ctcm_channel_types type,
                else {
                        ch->flags |= CHANNEL_FLAGS_INUSE;
                        ch->flags &= ~CHANNEL_FLAGS_RWMASK;
-                       ch->flags |= (direction == WRITE)
+                       ch->flags |= (direction == CTCM_WRITE)
                            ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
                        fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
                }
@@ -388,7 +388,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
                CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
                        "%s(%s): %s trans_skb allocation error",
                        CTCM_FUNTAIL, ch->id,
-                       (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+                       (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+                               "RX" : "TX");
                return -ENOMEM;
        }
 
@@ -399,7 +400,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
                CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
                        "%s(%s): %s set norm_cda failed",
                        CTCM_FUNTAIL, ch->id,
-                       (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
+                       (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+                               "RX" : "TX");
                return -ENOMEM;
        }
 
@@ -603,14 +605,14 @@ static void ctcmpc_send_sweep_req(struct channel *rch)
 
        priv = dev->ml_priv;
        grp = priv->mpcg;
-       ch = priv->channel[WRITE];
+       ch = priv->channel[CTCM_WRITE];
 
        /* sweep processing is not complete until response and request */
        /* has completed for all read channels in group                */
        if (grp->in_sweep == 0) {
                grp->in_sweep = 1;
-               grp->sweep_rsp_pend_num = grp->active_channels[READ];
-               grp->sweep_req_pend_num = grp->active_channels[READ];
+               grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
+               grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
        }
 
        sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
@@ -911,7 +913,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
 
        dev->trans_start = jiffies;
-       if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0)
+       if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
                return NETDEV_TX_BUSY;
        return NETDEV_TX_OK;
 }
@@ -994,7 +996,7 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
        }
 
        dev->trans_start = jiffies;
-       if (ctcmpc_transmit_skb(priv->channel[WRITE], skb) != 0) {
+       if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
                CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
                        "%s(%s): device error - dropped",
                                        CTCM_FUNTAIL, dev->name);
@@ -1035,7 +1037,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
                return -EINVAL;
 
        priv = dev->ml_priv;
-       max_bufsize = priv->channel[READ]->max_bufsize;
+       max_bufsize = priv->channel[CTCM_READ]->max_bufsize;
 
        if (IS_MPC(priv)) {
                if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
@@ -1226,10 +1228,10 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
        priv = dev_get_drvdata(&cgdev->dev);
 
        /* Try to extract channel from driver data. */
-       if (priv->channel[READ]->cdev == cdev)
-               ch = priv->channel[READ];
-       else if (priv->channel[WRITE]->cdev == cdev)
-               ch = priv->channel[WRITE];
+       if (priv->channel[CTCM_READ]->cdev == cdev)
+               ch = priv->channel[CTCM_READ];
+       else if (priv->channel[CTCM_WRITE]->cdev == cdev)
+               ch = priv->channel[CTCM_WRITE];
        else {
                dev_err(&cdev->dev,
                        "%s: Internal error: Can't determine channel for "
@@ -1587,13 +1589,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
                goto out_ccw2;
        }
 
-       for (direction = READ; direction <= WRITE; direction++) {
+       for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
                priv->channel[direction] =
-                   channel_get(type, direction == READ ? read_id : write_id,
-                               direction);
+                       channel_get(type, direction == CTCM_READ ?
+                               read_id : write_id, direction);
                if (priv->channel[direction] == NULL) {
-                       if (direction == WRITE)
-                               channel_free(priv->channel[READ]);
+                       if (direction == CTCM_WRITE)
+                               channel_free(priv->channel[CTCM_READ]);
                        goto out_dev;
                }
                priv->channel[direction]->netdev = dev;
@@ -1617,13 +1619,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
 
        dev_info(&dev->dev,
                "setup OK : r/w = %s/%s, protocol : %d\n",
-                       priv->channel[READ]->id,
-                       priv->channel[WRITE]->id, priv->protocol);
+                       priv->channel[CTCM_READ]->id,
+                       priv->channel[CTCM_WRITE]->id, priv->protocol);
 
        CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
                "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
-                       priv->channel[READ]->id,
-                       priv->channel[WRITE]->id, priv->protocol);
+                       priv->channel[CTCM_READ]->id,
+                       priv->channel[CTCM_WRITE]->id, priv->protocol);
 
        return 0;
 out_unregister:
@@ -1635,10 +1637,10 @@ out_ccw2:
 out_ccw1:
        ccw_device_set_offline(cgdev->cdev[0]);
 out_remove_channel2:
-       readc = channel_get(type, read_id, READ);
+       readc = channel_get(type, read_id, CTCM_READ);
        channel_remove(readc);
 out_remove_channel1:
-       writec = channel_get(type, write_id, WRITE);
+       writec = channel_get(type, write_id, CTCM_WRITE);
        channel_remove(writec);
 out_err_result:
        return result;
@@ -1660,19 +1662,19 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
        if (!priv)
                return -ENODEV;
 
-       if (priv->channel[READ]) {
-               dev = priv->channel[READ]->netdev;
+       if (priv->channel[CTCM_READ]) {
+               dev = priv->channel[CTCM_READ]->netdev;
                CTCM_DBF_DEV(SETUP, dev, "");
                /* Close the device */
                ctcm_close(dev);
                dev->flags &= ~IFF_RUNNING;
                ctcm_remove_attributes(&cgdev->dev);
-               channel_free(priv->channel[READ]);
+               channel_free(priv->channel[CTCM_READ]);
        } else
                dev = NULL;
 
-       if (priv->channel[WRITE])
-               channel_free(priv->channel[WRITE]);
+       if (priv->channel[CTCM_WRITE])
+               channel_free(priv->channel[CTCM_WRITE]);
 
        if (dev) {
                unregister_netdev(dev);
@@ -1685,11 +1687,11 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
        ccw_device_set_offline(cgdev->cdev[1]);
        ccw_device_set_offline(cgdev->cdev[0]);
 
-       if (priv->channel[READ])
-               channel_remove(priv->channel[READ]);
-       if (priv->channel[WRITE])
-               channel_remove(priv->channel[WRITE]);
-       priv->channel[READ] = priv->channel[WRITE] = NULL;
+       if (priv->channel[CTCM_READ])
+               channel_remove(priv->channel[CTCM_READ]);
+       if (priv->channel[CTCM_WRITE])
+               channel_remove(priv->channel[CTCM_WRITE]);
+       priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL;
 
        return 0;
 
@@ -1720,11 +1722,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
 
        if (gdev->state == CCWGROUP_OFFLINE)
                return 0;
-       netif_device_detach(priv->channel[READ]->netdev);
-       ctcm_close(priv->channel[READ]->netdev);
+       netif_device_detach(priv->channel[CTCM_READ]->netdev);
+       ctcm_close(priv->channel[CTCM_READ]->netdev);
        if (!wait_event_timeout(priv->fsm->wait_q,
            fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
-               netif_device_attach(priv->channel[READ]->netdev);
+               netif_device_attach(priv->channel[CTCM_READ]->netdev);
                return -EBUSY;
        }
        ccw_device_set_offline(gdev->cdev[1]);
@@ -1745,9 +1747,9 @@ static int ctcm_pm_resume(struct ccwgroup_device *gdev)
        rc = ccw_device_set_online(gdev->cdev[0]);
        if (rc)
                goto err_out;
-       ctcm_open(priv->channel[READ]->netdev);
+       ctcm_open(priv->channel[CTCM_READ]->netdev);
 err_out:
-       netif_device_attach(priv->channel[READ]->netdev);
+       netif_device_attach(priv->channel[CTCM_READ]->netdev);
        return rc;
 }
 
index d34fa14f44e767c7ddf866b2d325406c48607ce2..24d5215eb0c40af4816115579bde366149eb159f 100644 (file)
@@ -111,8 +111,8 @@ enum ctcm_channel_types {
 
 #define CTCM_INITIAL_BLOCKLEN  2
 
-#define READ                   0
-#define WRITE                  1
+#define CTCM_READ              0
+#define CTCM_WRITE             1
 
 #define CTCM_ID_SIZE           20+3
 
index 87c24d2936d6e34d1de09da7ebe27aff148d8f61..2861e78773cb5f0e4ee319e4cc0928a00953c717 100644 (file)
@@ -419,8 +419,8 @@ void ctc_mpc_establish_connectivity(int port_num,
                return;
        priv = dev->ml_priv;
        grp = priv->mpcg;
-       rch = priv->channel[READ];
-       wch = priv->channel[WRITE];
+       rch = priv->channel[CTCM_READ];
+       wch = priv->channel[CTCM_WRITE];
 
        CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
                        "%s(%s): state=%s",
@@ -578,7 +578,7 @@ void ctc_mpc_flow_control(int port_num, int flowc)
                        "%s: %s: flowc = %d",
                                CTCM_FUNTAIL, dev->name, flowc);
 
-       rch = priv->channel[READ];
+       rch = priv->channel[CTCM_READ];
 
        mpcg_state = fsm_getstate(grp->fsm);
        switch (flowc) {
@@ -622,7 +622,7 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
        struct net_device *dev = rch->netdev;
        struct ctcm_priv   *priv = dev->ml_priv;
        struct mpc_group  *grp = priv->mpcg;
-       struct channel    *ch = priv->channel[WRITE];
+       struct channel    *ch = priv->channel[CTCM_WRITE];
 
        CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id);
        CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
@@ -656,7 +656,7 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
        int rc = 0;
        struct th_sweep *header;
        struct sk_buff *sweep_skb;
-       struct channel *ch  = priv->channel[WRITE];
+       struct channel *ch  = priv->channel[CTCM_WRITE];
 
        CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id);
 
@@ -712,7 +712,7 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
        struct net_device *dev     = rch->netdev;
        struct ctcm_priv  *priv = dev->ml_priv;
        struct mpc_group  *grp  = priv->mpcg;
-       struct channel    *ch      = priv->channel[WRITE];
+       struct channel    *ch      = priv->channel[CTCM_WRITE];
 
        if (do_debug)
                CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
@@ -721,8 +721,8 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
        if (grp->in_sweep == 0) {
                grp->in_sweep = 1;
                ctcm_test_and_set_busy(dev);
-               grp->sweep_req_pend_num = grp->active_channels[READ];
-               grp->sweep_rsp_pend_num = grp->active_channels[READ];
+               grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
+               grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
        }
 
        CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
@@ -906,14 +906,14 @@ void mpc_group_ready(unsigned long adev)
        fsm_newstate(grp->fsm, MPCG_STATE_READY);
 
        /* Put up a read on the channel */
-       ch = priv->channel[READ];
+       ch = priv->channel[CTCM_READ];
        ch->pdu_seq = 0;
        CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
                        __func__, ch->pdu_seq);
 
        ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
        /* Put the write channel in idle state */
-       ch = priv->channel[WRITE];
+       ch = priv->channel[CTCM_WRITE];
        if (ch->collect_len > 0) {
                spin_lock(&ch->collect_lock);
                ctcm_purge_skb_queue(&ch->collect_queue);
@@ -960,7 +960,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
                "%s: %i / Grp:%s total_channels=%i, active_channels: "
                "read=%i, write=%i\n", __func__, action,
                fsm_getstate_str(grp->fsm), grp->num_channel_paths,
-               grp->active_channels[READ], grp->active_channels[WRITE]);
+               grp->active_channels[CTCM_READ],
+               grp->active_channels[CTCM_WRITE]);
 
        if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
                grp->num_channel_paths++;
@@ -994,10 +995,11 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
                                grp->xid_skb->data,
                                grp->xid_skb->len);
 
-               ch->xid->xid2_dlc_type = ((CHANNEL_DIRECTION(ch->flags) == READ)
+               ch->xid->xid2_dlc_type =
+                       ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
                                ? XID2_READ_SIDE : XID2_WRITE_SIDE);
 
-               if (CHANNEL_DIRECTION(ch->flags) == WRITE)
+               if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE)
                        ch->xid->xid2_buf_len = 0x00;
 
                ch->xid_skb->data = ch->xid_skb_data;
@@ -1006,8 +1008,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
 
                fsm_newstate(ch->fsm, CH_XID0_PENDING);
 
-               if ((grp->active_channels[READ]  > 0) &&
-                   (grp->active_channels[WRITE] > 0) &&
+               if ((grp->active_channels[CTCM_READ] > 0) &&
+                   (grp->active_channels[CTCM_WRITE] > 0) &&
                        (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
                        fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
                        CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
@@ -1027,10 +1029,10 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
                if (grp->channels_terminating)
                                        goto done;
 
-               if (((grp->active_channels[READ] == 0) &&
-                                       (grp->active_channels[WRITE] > 0))
-                       || ((grp->active_channels[WRITE] == 0) &&
-                                       (grp->active_channels[READ] > 0)))
+               if (((grp->active_channels[CTCM_READ] == 0) &&
+                                       (grp->active_channels[CTCM_WRITE] > 0))
+                       || ((grp->active_channels[CTCM_WRITE] == 0) &&
+                                       (grp->active_channels[CTCM_READ] > 0)))
                        fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
        }
 done:
@@ -1038,7 +1040,8 @@ done:
                "exit %s: %i / Grp:%s total_channels=%i, active_channels: "
                "read=%i, write=%i\n", __func__, action,
                fsm_getstate_str(grp->fsm), grp->num_channel_paths,
-               grp->active_channels[READ], grp->active_channels[WRITE]);
+               grp->active_channels[CTCM_READ],
+               grp->active_channels[CTCM_WRITE]);
 
        CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
 }
@@ -1392,8 +1395,8 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
                (grp->port_persist == 0))
                fsm_deltimer(&priv->restart_timer);
 
-       wch = priv->channel[WRITE];
-       rch = priv->channel[READ];
+       wch = priv->channel[CTCM_WRITE];
+       rch = priv->channel[CTCM_READ];
 
        switch (grp->saved_state) {
        case MPCG_STATE_RESET:
@@ -1480,8 +1483,8 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
 
        priv = dev->ml_priv;
        grp = priv->mpcg;
-       wch = priv->channel[WRITE];
-       rch = priv->channel[READ];
+       wch = priv->channel[CTCM_WRITE];
+       rch = priv->channel[CTCM_READ];
 
        switch (fsm_getstate(grp->fsm)) {
        case MPCG_STATE_XID2INITW:
@@ -1586,7 +1589,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo)
        CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
 
        /*the received direction should be the opposite of ours  */
-       if (((CHANNEL_DIRECTION(ch->flags) == READ) ? XID2_WRITE_SIDE :
+       if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE :
                                XID2_READ_SIDE) != xid->xid2_dlc_type) {
                rc = 2;
                /* XID REJECTED: r/w channel pairing mismatch */
@@ -1912,7 +1915,7 @@ static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
        if (grp == NULL)
                return;
 
-       for (direction = READ; direction <= WRITE; direction++) {
+       for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
                struct channel *ch = priv->channel[direction];
                struct xid2 *thisxid = ch->xid;
                ch->xid_skb->data = ch->xid_skb_data;
@@ -2152,14 +2155,15 @@ static int mpc_send_qllc_discontact(struct net_device *dev)
                        return -ENOMEM;
                }
 
-               *((__u32 *)skb_push(skb, 4)) = priv->channel[READ]->pdu_seq;
-               priv->channel[READ]->pdu_seq++;
+               *((__u32 *)skb_push(skb, 4)) =
+                       priv->channel[CTCM_READ]->pdu_seq;
+               priv->channel[CTCM_READ]->pdu_seq++;
                CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
-                               __func__, priv->channel[READ]->pdu_seq);
+                               __func__, priv->channel[CTCM_READ]->pdu_seq);
 
                /* receipt of CC03 resets anticipated sequence number on
                      receiving side */
-               priv->channel[READ]->pdu_seq = 0x00;
+               priv->channel[CTCM_READ]->pdu_seq = 0x00;
                skb_reset_mac_header(skb);
                skb->dev = dev;
                skb->protocol = htons(ETH_P_SNAP);
index 2b24550e865e672e9ef6761b667beba2231b235e..8305319b2a846c31328f5b773e8e0177cf6d41d0 100644 (file)
@@ -38,8 +38,8 @@ static ssize_t ctcm_buffer_write(struct device *dev,
        int bs1;
        struct ctcm_priv *priv = dev_get_drvdata(dev);
 
-       if (!(priv && priv->channel[READ] &&
-                       (ndev = priv->channel[READ]->netdev))) {
+       ndev = priv->channel[CTCM_READ]->netdev;
+       if (!(priv && priv->channel[CTCM_READ] && ndev)) {
                CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
                return -ENODEV;
        }
@@ -55,12 +55,12 @@ static ssize_t ctcm_buffer_write(struct device *dev,
            (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
                                        goto einval;
 
-       priv->channel[READ]->max_bufsize = bs1;
-       priv->channel[WRITE]->max_bufsize = bs1;
+       priv->channel[CTCM_READ]->max_bufsize = bs1;
+       priv->channel[CTCM_WRITE]->max_bufsize = bs1;
        if (!(ndev->flags & IFF_RUNNING))
                ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
-       priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
-       priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+       priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+       priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
 
        CTCM_DBF_DEV(SETUP, ndev, buf);
        return count;
@@ -85,9 +85,9 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
        p += sprintf(p, "  Device FSM state: %s\n",
                     fsm_getstate_str(priv->fsm));
        p += sprintf(p, "  RX channel FSM state: %s\n",
-                    fsm_getstate_str(priv->channel[READ]->fsm));
+                    fsm_getstate_str(priv->channel[CTCM_READ]->fsm));
        p += sprintf(p, "  TX channel FSM state: %s\n",
-                    fsm_getstate_str(priv->channel[WRITE]->fsm));
+                    fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm));
        p += sprintf(p, "  Max. TX buffer used: %ld\n",
                     priv->channel[WRITE]->prof.maxmulti);
        p += sprintf(p, "  Max. chained SKBs: %ld\n",
@@ -102,7 +102,7 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
                     priv->channel[WRITE]->prof.tx_time);
 
        printk(KERN_INFO "Statistics for %s:\n%s",
-                               priv->channel[WRITE]->netdev->name, sbuf);
+                               priv->channel[CTCM_WRITE]->netdev->name, sbuf);
        kfree(sbuf);
        return;
 }
@@ -125,7 +125,7 @@ static ssize_t stats_write(struct device *dev, struct device_attribute *attr,
                return -ENODEV;
        /* Reset statistics */
        memset(&priv->channel[WRITE]->prof, 0,
-                               sizeof(priv->channel[WRITE]->prof));
+                               sizeof(priv->channel[CTCM_WRITE]->prof));
        return count;
 }
 
index 848480bc2bf93846b14e43c0c94493cb2109f889..2308fbb4523a399abce0809860f6f398c8844ed5 100644 (file)
@@ -129,7 +129,7 @@ static inline void random_ether_addr(u8 *addr)
 /**
  * dev_hw_addr_random - Create random MAC and set device flag
  * @dev: pointer to net_device structure
- * @addr: Pointer to a six-byte array containing the Ethernet address
+ * @hwaddr: Pointer to a six-byte array containing the Ethernet address
  *
  * Generate random MAC to be used by a device and set addr_assign_type
  * so the state can be read by sysfs and be used by udev.
index c831467774d0e71963ccf774d310bd4f6dd91b8e..bed7a4682b90734935e3dd6680d5e646b0b895c0 100644 (file)
@@ -119,7 +119,7 @@ struct ethhdr {
        unsigned char   h_dest[ETH_ALEN];       /* destination eth addr */
        unsigned char   h_source[ETH_ALEN];     /* source ether addr    */
        __be16          h_proto;                /* packet type ID field */
-} __packed;
+} __attribute__((packed));
 
 #ifdef __KERNEL__
 #include <linux/skbuff.h>
index 9947c39e62f6fa49ea95505d0afc8053a27fbebb..e6dc11e7f9a54613f3700af8fe9d6557d8bd6135 100644 (file)
@@ -67,7 +67,7 @@ struct fddi_8022_1_hdr {
        __u8    dsap;                                   /* destination service access point */
        __u8    ssap;                                   /* source service access point */
        __u8    ctrl;                                   /* control byte #1 */
-} __packed;
+} __attribute__((packed));
 
 /* Define 802.2 Type 2 header */
 struct fddi_8022_2_hdr {
@@ -75,7 +75,7 @@ struct fddi_8022_2_hdr {
        __u8    ssap;                                   /* source service access point */
        __u8    ctrl_1;                                 /* control byte #1 */
        __u8    ctrl_2;                                 /* control byte #2 */
-} __packed;
+} __attribute__((packed));
 
 /* Define 802.2 SNAP header */
 #define FDDI_K_OUI_LEN 3
@@ -85,7 +85,7 @@ struct fddi_snap_hdr {
        __u8    ctrl;                                   /* always 0x03 */
        __u8    oui[FDDI_K_OUI_LEN];    /* organizational universal id */
        __be16  ethertype;                              /* packet type ID field */
-} __packed;
+} __attribute__((packed));
 
 /* Define FDDI LLC frame header */
 struct fddihdr {
@@ -98,7 +98,7 @@ struct fddihdr {
                struct fddi_8022_2_hdr          llc_8022_2;
                struct fddi_snap_hdr            llc_snap;
                } hdr;
-} __packed;
+} __attribute__((packed));
 
 #ifdef __KERNEL__
 #include <linux/netdevice.h>
index 5fe5f307c6f560f424f16252ad61a1a9bf4b7b45..cdc049f1829a8ca6ccc9aabb2103b1aacd3fb990 100644 (file)
@@ -104,7 +104,7 @@ struct hippi_fp_hdr {
        __be32          fixed;
 #endif
        __be32          d2_size;
-} __packed;
+} __attribute__((packed));
 
 struct hippi_le_hdr {
 #if defined (__BIG_ENDIAN_BITFIELD)
@@ -129,7 +129,7 @@ struct hippi_le_hdr {
        __u8            daddr[HIPPI_ALEN];
        __u16           locally_administered;
        __u8            saddr[HIPPI_ALEN];
-} __packed;
+} __attribute__((packed));
 
 #define HIPPI_OUI_LEN  3
 /*
@@ -142,12 +142,12 @@ struct hippi_snap_hdr {
        __u8    ctrl;                   /* always 0x03 */
        __u8    oui[HIPPI_OUI_LEN];     /* organizational universal id (zero)*/
        __be16  ethertype;              /* packet type ID field */
-} __packed;
+} __attribute__((packed));
 
 struct hippi_hdr {
        struct hippi_fp_hdr     fp;
        struct hippi_le_hdr     le;
        struct hippi_snap_hdr   snap;
-} __packed;
+} __attribute__((packed));
 
 #endif /* _LINUX_IF_HIPPI_H */
index 1925e0c3f1623e0d515a22ac6cbc01074fd83e68..27741e05446f97dfad3e5f35e83f328d02520588 100644 (file)
@@ -59,7 +59,7 @@ struct sockaddr_pppox {
        union{ 
                struct pppoe_addr       pppoe; 
        }sa_addr; 
-} __packed;
+} __attribute__((packed));
 
 /* The use of the above union isn't viable because the size of this
  * struct must stay fixed over time -- applications use sizeof(struct
@@ -70,7 +70,7 @@ struct sockaddr_pppol2tp {
        sa_family_t     sa_family;      /* address family, AF_PPPOX */
        unsigned int    sa_protocol;    /* protocol identifier */
        struct pppol2tp_addr pppol2tp;
-} __packed;
+} __attribute__((packed));
 
 /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
  * bits. So we need a different sockaddr structure.
@@ -79,7 +79,7 @@ struct sockaddr_pppol2tpv3 {
        sa_family_t     sa_family;      /* address family, AF_PPPOX */
        unsigned int    sa_protocol;    /* protocol identifier */
        struct pppol2tpv3_addr pppol2tp;
-} __packed;
+} __attribute__((packed));
 
 /*********************************************************************
  *
@@ -101,7 +101,7 @@ struct pppoe_tag {
        __be16 tag_type;
        __be16 tag_len;
        char tag_data[0];
-} __attribute ((packed));
+} __attribute__ ((packed));
 
 /* Tag identifiers */
 #define PTT_EOL                __cpu_to_be16(0x0000)
@@ -129,7 +129,7 @@ struct pppoe_hdr {
        __be16 sid;
        __be16 length;
        struct pppoe_tag tag[0];
-} __packed;
+} __attribute__((packed));
 
 /* Length of entire PPPoE + PPP header */
 #define PPPOE_SES_HLEN 8
index ab9e9e89e4074318405a595c1147642e344255c0..e62683ba88e6824e72b3e8c81c7315998868f606 100644 (file)
@@ -58,7 +58,7 @@ struct ipv6_opt_hdr {
        /* 
         * TLV encoded option data follows.
         */
-} __packed;    /* required for some archs */
+} __attribute__((packed));     /* required for some archs */
 
 #define ipv6_destopt_hdr ipv6_opt_hdr
 #define ipv6_hopopt_hdr  ipv6_opt_hdr
@@ -99,7 +99,7 @@ struct ipv6_destopt_hao {
        __u8                    type;
        __u8                    length;
        struct in6_addr         addr;
-} __packed;
+} __attribute__((packed));
 
 /*
  *     IPv6 fixed header
index bb58854a806196d8dea6ab9599daa244012ef5d8..d146ca10c0f52449dd528e9fec61be47e0c85221 100644 (file)
@@ -88,7 +88,7 @@ struct nbd_request {
        char handle[8];
        __be64 from;
        __be32 len;
-} __packed;
+} __attribute__((packed));
 
 /*
  * This is the reply packet that nbd-server sends back to the client after
index 3ace8370e61e9855cfda5d7237362c1538c701a6..99f0adeeb3f348e58c65312133217055a6ccf3c0 100644 (file)
@@ -27,7 +27,7 @@ struct ncp_request_header {
        __u8 conn_high;
        __u8 function;
        __u8 data[0];
-} __packed;
+} __attribute__((packed));
 
 #define NCP_REPLY                (0x3333)
 #define NCP_WATCHDOG            (0x3E3E)
@@ -42,7 +42,7 @@ struct ncp_reply_header {
        __u8 completion_code;
        __u8 connection_state;
        __u8 data[0];
-} __packed;
+} __attribute__((packed));
 
 #define NCP_VOLNAME_LEN (16)
 #define NCP_NUMBER_OF_VOLUMES (256)
@@ -158,7 +158,7 @@ struct nw_info_struct {
 #ifdef __KERNEL__
        struct nw_nfs_info nfs;
 #endif
-} __packed;
+} __attribute__((packed));
 
 /* modify mask - use with MODIFY_DOS_INFO structure */
 #define DM_ATTRIBUTES            (cpu_to_le32(0x02))
@@ -190,12 +190,12 @@ struct nw_modify_dos_info {
        __u16 inheritanceGrantMask;
        __u16 inheritanceRevokeMask;
        __u32 maximumSpace;
-} __packed;
+} __attribute__((packed));
 
 struct nw_search_sequence {
        __u8 volNumber;
        __u32 dirBase;
        __u32 sequence;
-} __packed;
+} __attribute__((packed));
 
 #endif                         /* _LINUX_NCP_H */
index 3e1aa1be942ef2297637d0bf4a778eec605d7040..208ae938733143ce0ba2117378423d443a3d8312 100644 (file)
@@ -39,7 +39,7 @@ struct idletimer_tg_info {
        char label[MAX_IDLETIMER_LABEL_SIZE];
 
        /* for kernel module internal use only */
-       struct idletimer_tg *timer __attribute((aligned(8)));
+       struct idletimer_tg *timer __attribute__((aligned(8)));
 };
 
 #endif
index 1167aeb7a34793ff288aff2b7190a966378e5080..eff34ac1880883f70d88282a8ac881a1e8bff8a5 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _XT_IPVS_H
 #define _XT_IPVS_H
 
+#include <linux/types.h>
+
 enum {
        XT_IPVS_IPVS_PROPERTY = 1 << 0, /* all other options imply this one */
        XT_IPVS_PROTO =         1 << 1,
index 413742c92d14efce0e98743a101a67fb3e8c2f97..791d5109f34c12207de65f06cca05fa4b35b44b8 100644 (file)
@@ -122,7 +122,7 @@ static inline int netpoll_tx_running(struct net_device *dev)
 }
 
 #else
-static inline int netpoll_rx(struct sk_buff *skb)
+static inline bool netpoll_rx(struct sk_buff *skb)
 {
        return 0;
 }
index 24426c3d6b5ab34a463cf3ee650b497d9e545522..76edadf046d3d3b68215c23ef2a30ce18a86992a 100644 (file)
@@ -56,7 +56,7 @@ struct phonethdr {
        __be16  pn_length;
        __u8    pn_robj;
        __u8    pn_sobj;
-} __packed;
+} __attribute__((packed));
 
 /* Common Phonet payload header */
 struct phonetmsg {
@@ -98,7 +98,7 @@ struct sockaddr_pn {
        __u8 spn_dev;
        __u8 spn_resource;
        __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3];
-} __packed;
+} __attribute__((packed));
 
 /* Well known address */
 #define PN_DEV_PC      0x10
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h
new file mode 100644 (file)
index 0000000..18d75e7
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ *pxa168 ethernet platform device data definition file.
+ */
+#ifndef __LINUX_PXA168_ETH_H
+#define __LINUX_PXA168_ETH_H
+
+struct pxa168_eth_platform_data {
+       int     port_number;
+       int     phy_addr;
+
+       /*
+        * If speed is 0, then speed and duplex are autonegotiated.
+        */
+       int     speed;          /* 0, SPEED_10, SPEED_100 */
+       int     duplex;         /* DUPLEX_HALF or DUPLEX_FULL */
+
+       /*
+        * Override default RX/TX queue sizes if nonzero.
+        */
+       int     rx_queue_size;
+       int     tx_queue_size;
+
+       /*
+        * init callback is used for board specific initialization
+        * e.g on Aspenite its used to initialize the PHY transceiver.
+        */
+       int (*init)(void);
+};
+
+#endif /* __LINUX_PXA168_ETH_H */
index 4f82326eb2945f2cd275bbcff64151a06ca6ccb4..08c32e4f261aca004ac06d895a1a8bbd73cfd887 100644 (file)
@@ -81,7 +81,7 @@ struct rfkill_event {
        __u8  type;
        __u8  op;
        __u8  soft, hard;
-} __packed;
+} __attribute__((packed));
 
 /*
  * We are planning to be backward and forward compatible with changes
index a441c9cdd62540cd7100189c5fa7aea2f6b0b22b..ac53bfbdfe16b57038cf6c0b7f88cc88f5221594 100644 (file)
@@ -195,7 +195,8 @@ struct sock_common {
   *    @sk_priority: %SO_PRIORITY setting
   *    @sk_type: socket type (%SOCK_STREAM, etc)
   *    @sk_protocol: which protocol this socket belongs in this network family
-  *    @sk_peercred: %SO_PEERCRED setting
+  *    @sk_peer_pid: &struct pid for this socket's peer
+  *    @sk_peer_cred: %SO_PEERCRED setting
   *    @sk_rcvlowat: %SO_RCVLOWAT setting
   *    @sk_rcvtimeo: %SO_RCVTIMEO setting
   *    @sk_sndtimeo: %SO_SNDTIMEO setting
@@ -211,6 +212,7 @@ struct sock_common {
   *    @sk_send_head: front of stuff to transmit
   *    @sk_security: used by security modules
   *    @sk_mark: generic packet mark
+  *    @sk_classid: this socket's cgroup classid
   *    @sk_write_pending: a write to stream socket waits to start
   *    @sk_state_change: callback to indicate change in the state of the sock
   *    @sk_data_ready: callback to indicate there is data to be processed
index df6a2eb20193f935bc72204ca22c7f2f21b5070f..eaa9582779d029a9362c32b3816fed8c157e4d3e 100644 (file)
@@ -268,11 +268,21 @@ static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
        return seq3 - seq2 >= seq1 - seq2;
 }
 
-static inline int tcp_too_many_orphans(struct sock *sk, int num)
+static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
 {
-       return (num > sysctl_tcp_max_orphans) ||
-               (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
-                atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
+       struct percpu_counter *ocp = sk->sk_prot->orphan_count;
+       int orphans = percpu_counter_read_positive(ocp);
+
+       if (orphans << shift > sysctl_tcp_max_orphans) {
+               orphans = percpu_counter_sum_positive(ocp);
+               if (orphans << shift > sysctl_tcp_max_orphans)
+                       return true;
+       }
+
+       if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
+           atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
+               return true;
+       return false;
 }
 
 /* syncookies: remember time of last synqueue overflow */
index 3d59c9bf8febf5064150ff50d22db9d569453131..3bccdd12a2642a06e1c5078b5c4065419a57428a 100644 (file)
@@ -510,7 +510,8 @@ static int vlan_dev_open(struct net_device *dev)
        if (vlan->flags & VLAN_FLAG_GVRP)
                vlan_gvrp_request_join(dev);
 
-       netif_carrier_on(dev);
+       if (netif_carrier_ok(real_dev))
+               netif_carrier_on(dev);
        return 0;
 
 clear_allmulti:
index 2ce79df00680eee3d9d290fd1156ac1d2ab74557..c7d81436213d13e24e67edf2630930e5c4ed08c0 100644 (file)
@@ -112,8 +112,8 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
                        if (sk) {
                                sock_hold(sk);
                                ax25_destroy_socket(ax25);
-                               sock_put(sk);
                                bh_unlock_sock(sk);
+                               sock_put(sk);
                        } else
                                ax25_destroy_socket(ax25);
                        return;
index 2c911c0759c27bb6e4867b3bce9af0af3946d288..5ed00bd7009f55d1bdb5b8697889594442848e3f 100644 (file)
@@ -162,8 +162,8 @@ static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
                if (tmp) {
                        memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
                        atomic_set(&tmp->use, 1);
-                       nf_bridge_put(nf_bridge);
                }
+               nf_bridge_put(nf_bridge);
                nf_bridge = tmp;
        }
        return nf_bridge;
index 01f238ff23466912ad1c243b6ca88b8a0c998f8b..c49a6695793ac8361b072077aefa4e2b863e8f5c 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/hardirq.h>
 #include <net/caif/cfpkt.h>
 
-#define PKT_PREFIX  16
+#define PKT_PREFIX  48
 #define PKT_POSTFIX 2
 #define PKT_LEN_WHEN_EXTENDING 128
 #define PKT_ERROR(pkt, errmsg) do {       \
index eb1602022ac0643af4e4ad655c3de4ae42a200f4..9a699242d104be7fbef70ed4560d6ca3a63ebda3 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/stddef.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
-#include <linux/unaligned/le_byteshift.h>
+#include <asm/unaligned.h>
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfsrvl.h>
 #include <net/caif/cfpkt.h>
index 9c65e9deb9c3ff6d6f958f4b52ee7e85d6299935..08ffe9e4be20aa49ab2bba82c36c5e988a4880fd 100644 (file)
 #include <net/sock.h>
 #include <net/net_namespace.h>
 
+/*
+ * To send multiple CAN frame content within TX_SETUP or to filter
+ * CAN messages with multiplex index within RX_SETUP, the number of
+ * different filters is limited to 256 due to the one byte index value.
+ */
+#define MAX_NFRAMES 256
+
 /* use of last_frames[index].can_dlc */
 #define RX_RECV    0x40 /* received data for this element */
 #define RX_THR     0x80 /* element not been sent due to throttle feature */
@@ -89,16 +96,16 @@ struct bcm_op {
        struct list_head list;
        int ifindex;
        canid_t can_id;
-       int flags;
+       u32 flags;
        unsigned long frames_abs, frames_filtered;
        struct timeval ival1, ival2;
        struct hrtimer timer, thrtimer;
        struct tasklet_struct tsklet, thrtsklet;
        ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
        int rx_ifindex;
-       int count;
-       int nframes;
-       int currframe;
+       u32 count;
+       u32 nframes;
+       u32 currframe;
        struct can_frame *frames;
        struct can_frame *last_frames;
        struct can_frame sframe;
@@ -175,7 +182,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
 
                seq_printf(m, "rx_op: %03X %-5s ",
                                op->can_id, bcm_proc_getifname(ifname, op->ifindex));
-               seq_printf(m, "[%d]%c ", op->nframes,
+               seq_printf(m, "[%u]%c ", op->nframes,
                                (op->flags & RX_CHECK_DLC)?'d':' ');
                if (op->kt_ival1.tv64)
                        seq_printf(m, "timeo=%lld ",
@@ -198,7 +205,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
 
        list_for_each_entry(op, &bo->tx_ops, list) {
 
-               seq_printf(m, "tx_op: %03X %s [%d] ",
+               seq_printf(m, "tx_op: %03X %s [%u] ",
                                op->can_id,
                                bcm_proc_getifname(ifname, op->ifindex),
                                op->nframes);
@@ -283,7 +290,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
        struct can_frame *firstframe;
        struct sockaddr_can *addr;
        struct sock *sk = op->sk;
-       int datalen = head->nframes * CFSIZ;
+       unsigned int datalen = head->nframes * CFSIZ;
        int err;
 
        skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
@@ -468,7 +475,7 @@ rx_changed_settime:
  * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
  *                       received data stored in op->last_frames[]
  */
-static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
+static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
                                const struct can_frame *rxdata)
 {
        /*
@@ -554,7 +561,8 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
 /*
  * bcm_rx_do_flush - helper for bcm_rx_thr_flush
  */
-static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
+static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
+                                 unsigned int index)
 {
        if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
                if (update)
@@ -575,7 +583,7 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update)
        int updated = 0;
 
        if (op->nframes > 1) {
-               int i;
+               unsigned int i;
 
                /* for MUX filter we start at index 1 */
                for (i = 1; i < op->nframes; i++)
@@ -624,7 +632,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
 {
        struct bcm_op *op = (struct bcm_op *)data;
        const struct can_frame *rxframe = (struct can_frame *)skb->data;
-       int i;
+       unsigned int i;
 
        /* disable timeout */
        hrtimer_cancel(&op->timer);
@@ -822,14 +830,15 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 {
        struct bcm_sock *bo = bcm_sk(sk);
        struct bcm_op *op;
-       int i, err;
+       unsigned int i;
+       int err;
 
        /* we need a real device to send frames */
        if (!ifindex)
                return -ENODEV;
 
-       /* we need at least one can_frame */
-       if (msg_head->nframes < 1)
+       /* check nframes boundaries - we need at least one can_frame */
+       if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
                return -EINVAL;
 
        /* check the given can_id */
@@ -993,6 +1002,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                msg_head->nframes = 0;
        }
 
+       /* the first element contains the mux-mask => MAX_NFRAMES + 1  */
+       if (msg_head->nframes > MAX_NFRAMES + 1)
+               return -EINVAL;
+
        if ((msg_head->flags & RX_RTR_FRAME) &&
            ((msg_head->nframes != 1) ||
             (!(msg_head->can_id & CAN_RTR_FLAG))))
index 1ae654391442049083fae7040d5d1e5568f2b424..3721fbb9a83c3c7761c05ae39d8acab21b6f6b66 100644 (file)
@@ -3143,7 +3143,7 @@ pull:
                        put_page(skb_shinfo(skb)->frags[0].page);
                        memmove(skb_shinfo(skb)->frags,
                                skb_shinfo(skb)->frags + 1,
-                               --skb_shinfo(skb)->nr_frags);
+                               --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
                }
        }
 
index 11201784d29a32d6fdddd972a8348d3202de0ec9..87bb5f4de0e84601817a0f63733a725ebb478b03 100644 (file)
@@ -1,7 +1,7 @@
 menuconfig NET_DSA
        bool "Distributed Switch Architecture support"
        default n
-       depends on EXPERIMENTAL && NET_ETHERNET && !S390
+       depends on EXPERIMENTAL && NETDEVICES && !S390
        select PHYLIB
        ---help---
          This allows you to use hardware switch chips that use
index 6bccba31d13208d03f042002f5808c3396c1f37f..e8f4f9a57f1258fb1a589c87a956797019c67504 100644 (file)
@@ -735,6 +735,7 @@ static void get_counters(const struct xt_table_info *t,
                if (cpu == curcpu)
                        continue;
                i = 0;
+               local_bh_disable();
                xt_info_wrlock(cpu);
                xt_entry_foreach(iter, t->entries[cpu], t->size) {
                        ADD_COUNTER(counters[i], iter->counters.bcnt,
@@ -742,6 +743,7 @@ static void get_counters(const struct xt_table_info *t,
                        ++i;
                }
                xt_info_wrunlock(cpu);
+               local_bh_enable();
        }
        put_cpu();
 }
@@ -1418,6 +1420,9 @@ static int translate_compat_table(const char *name,
                if (ret != 0)
                        break;
                ++i;
+               if (strcmp(arpt_get_target(iter1)->u.user.name,
+                   XT_ERROR_TARGET) == 0)
+                       ++newinfo->stacksize;
        }
        if (ret) {
                /*
index c439721b165a6369acd1bd2ea1d64d4b0580b1bb..d163f2e3b2e99e5f18ae9997d3c74867b3e79354 100644 (file)
@@ -909,6 +909,7 @@ get_counters(const struct xt_table_info *t,
                if (cpu == curcpu)
                        continue;
                i = 0;
+               local_bh_disable();
                xt_info_wrlock(cpu);
                xt_entry_foreach(iter, t->entries[cpu], t->size) {
                        ADD_COUNTER(counters[i], iter->counters.bcnt,
@@ -916,6 +917,7 @@ get_counters(const struct xt_table_info *t,
                        ++i; /* macro does multi eval of i */
                }
                xt_info_wrunlock(cpu);
+               local_bh_enable();
        }
        put_cpu();
 }
@@ -1749,6 +1751,9 @@ translate_compat_table(struct net *net,
                if (ret != 0)
                        break;
                ++i;
+               if (strcmp(ipt_get_target(iter1)->u.user.name,
+                   XT_ERROR_TARGET) == 0)
+                       ++newinfo->stacksize;
        }
        if (ret) {
                /*
index 176e11aaea771795b21c0be6b1453b46c6349f0c..3fb1428e526eedb521057a49624fa28dde8b41cd 100644 (file)
@@ -451,7 +451,8 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
                                if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
                                        mask |= POLLOUT | POLLWRNORM;
                        }
-               }
+               } else
+                       mask |= POLLOUT | POLLWRNORM;
 
                if (tp->urg_data & TCP_URG_VALID)
                        mask |= POLLPRI;
@@ -2011,11 +2012,8 @@ adjudge_to_death:
                }
        }
        if (sk->sk_state != TCP_CLOSE) {
-               int orphan_count = percpu_counter_read_positive(
-                                               sk->sk_prot->orphan_count);
-
                sk_mem_reclaim(sk);
-               if (tcp_too_many_orphans(sk, orphan_count)) {
+               if (tcp_too_many_orphans(sk, 0)) {
                        if (net_ratelimit())
                                printk(KERN_INFO "TCP: too many of orphaned "
                                       "sockets\n");
@@ -3212,7 +3210,7 @@ void __init tcp_init(void)
 {
        struct sk_buff *skb = NULL;
        unsigned long nr_pages, limit;
-       int order, i, max_share;
+       int i, max_share, cnt;
        unsigned long jiffy = jiffies;
 
        BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -3261,22 +3259,12 @@ void __init tcp_init(void)
                INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
        }
 
-       /* Try to be a bit smarter and adjust defaults depending
-        * on available memory.
-        */
-       for (order = 0; ((1 << order) << PAGE_SHIFT) <
-                       (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
-                       order++)
-               ;
-       if (order >= 4) {
-               tcp_death_row.sysctl_max_tw_buckets = 180000;
-               sysctl_tcp_max_orphans = 4096 << (order - 4);
-               sysctl_max_syn_backlog = 1024;
-       } else if (order < 3) {
-               tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
-               sysctl_tcp_max_orphans >>= (3 - order);
-               sysctl_max_syn_backlog = 128;
-       }
+
+       cnt = tcp_hashinfo.ehash_mask + 1;
+
+       tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
+       sysctl_tcp_max_orphans = cnt / 2;
+       sysctl_max_syn_backlog = max(128, cnt / 256);
 
        /* Set the pressure threshold to be a fraction of global memory that
         * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
index 0ec9bd0ae94f2ef37024ac11c5e411f0179c816a..850c737e08e2a4a9185c64c8776711d83b8c7b08 100644 (file)
@@ -196,10 +196,10 @@ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
 int tcp_set_allowed_congestion_control(char *val)
 {
        struct tcp_congestion_ops *ca;
-       char *clone, *name;
+       char *saved_clone, *clone, *name;
        int ret = 0;
 
-       clone = kstrdup(val, GFP_USER);
+       saved_clone = clone = kstrdup(val, GFP_USER);
        if (!clone)
                return -ENOMEM;
 
@@ -226,6 +226,7 @@ int tcp_set_allowed_congestion_control(char *val)
        }
 out:
        spin_unlock(&tcp_cong_list_lock);
+       kfree(saved_clone);
 
        return ret;
 }
index 808bb920c9f5e67249f6747c90cdeb7dddd9ddea..c35b469e851c298814d69583bd593ec1c580dde5 100644 (file)
@@ -66,18 +66,18 @@ static void tcp_write_err(struct sock *sk)
 static int tcp_out_of_resources(struct sock *sk, int do_reset)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       int orphans = percpu_counter_read_positive(&tcp_orphan_count);
+       int shift = 0;
 
        /* If peer does not open window for long time, or did not transmit
         * anything for long time, penalize it. */
        if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
-               orphans <<= 1;
+               shift++;
 
        /* If some dubious ICMP arrived, penalize even more. */
        if (sk->sk_err_soft)
-               orphans <<= 1;
+               shift++;
 
-       if (tcp_too_many_orphans(sk, orphans)) {
+       if (tcp_too_many_orphans(sk, shift)) {
                if (net_ratelimit())
                        printk(KERN_INFO "Out of socket memory\n");
 
index 5359ef4daac5230e4c691c2100d8016dcb69e102..8e754be92c2450e7142a958466b493dc077f5772 100644 (file)
@@ -922,6 +922,7 @@ get_counters(const struct xt_table_info *t,
                if (cpu == curcpu)
                        continue;
                i = 0;
+               local_bh_disable();
                xt_info_wrlock(cpu);
                xt_entry_foreach(iter, t->entries[cpu], t->size) {
                        ADD_COUNTER(counters[i], iter->counters.bcnt,
@@ -929,6 +930,7 @@ get_counters(const struct xt_table_info *t,
                        ++i;
                }
                xt_info_wrunlock(cpu);
+               local_bh_enable();
        }
        put_cpu();
 }
@@ -1764,6 +1766,9 @@ translate_compat_table(struct net *net,
                if (ret != 0)
                        break;
                ++i;
+               if (strcmp(ip6t_get_target(iter1)->u.user.name,
+                   XT_ERROR_TARGET) == 0)
+                       ++newinfo->stacksize;
        }
        if (ret) {
                /*
index 8f2d0400cf8ae616ad4283a6d846d8fa4b684d40..d126365ac0463bc075d62446562a8d9f07d548a2 100644 (file)
@@ -2580,7 +2580,7 @@ ctl_table ipv6_route_table_template[] = {
                .data           =       &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
                .maxlen         =       sizeof(int),
                .mode           =       0644,
-               .proc_handler   =       proc_dointvec_jiffies,
+               .proc_handler   =       proc_dointvec,
        },
        {
                .procname       =       "mtu_expires",
@@ -2594,7 +2594,7 @@ ctl_table ipv6_route_table_template[] = {
                .data           =       &init_net.ipv6.sysctl.ip6_rt_min_advmss,
                .maxlen         =       sizeof(int),
                .mode           =       0644,
-               .proc_handler   =       proc_dointvec_jiffies,
+               .proc_handler   =       proc_dointvec,
        },
        {
                .procname       =       "gc_min_interval_ms",
index 79986a674f6ea23329fb5a3960f1d3ece82b9a9d..fd55b5135de5aad91f547281a9c7a07c44f676b0 100644 (file)
@@ -824,8 +824,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 
        err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
        if (err < 0) {
-               kfree(self->ias_obj->name);
-               kfree(self->ias_obj);
+               irias_delete_object(self->ias_obj);
+               self->ias_obj = NULL;
                goto out;
        }
 
index 9616c32d1076dda982fff4c6da5c6e1057cf39d1..5bb8353105cca7c761647b94c346cc1da82c42e9 100644 (file)
@@ -169,6 +169,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
 {
        struct irlan_cb *self = netdev_priv(dev);
        int ret;
+       unsigned int len;
 
        /* skb headroom large enough to contain all IrDA-headers? */
        if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
@@ -188,6 +189,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
 
        dev->trans_start = jiffies;
 
+       len = skb->len;
        /* Now queue the packet in the transport layer */
        if (self->use_udata)
                ret = irttp_udata_request(self->tsap_data, skb);
@@ -209,7 +211,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
                self->stats.tx_dropped++;
        } else {
                self->stats.tx_packets++;
-               self->stats.tx_bytes += skb->len;
+               self->stats.tx_bytes += len;
        }
 
        return NETDEV_TX_OK;
index 58c6c4cda73b576cd65d3121a9abdf67bc6f7668..1ae697681bc735f3f4ed8cf98f9f63745157be36 100644 (file)
@@ -132,7 +132,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
                printk("\n");
        }
 
-       if (data_len < ETH_HLEN)
+       if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
                goto error;
 
        secpath_reset(skb);
index 2cbf380377d5e009fa972d85af18ded9971a5248..cd96ed3ccee4602a9fee20464e4a54d3fb0783b2 100644 (file)
@@ -1406,7 +1406,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        struct netlink_sock *nlk = nlk_sk(sk);
        int noblock = flags&MSG_DONTWAIT;
        size_t copied;
-       struct sk_buff *skb;
+       struct sk_buff *skb, *data_skb;
        int err;
 
        if (flags&MSG_OOB)
@@ -1418,59 +1418,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        if (skb == NULL)
                goto out;
 
+       data_skb = skb;
+
 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
        if (unlikely(skb_shinfo(skb)->frag_list)) {
-               bool need_compat = !!(flags & MSG_CMSG_COMPAT);
-
                /*
-                * If this skb has a frag_list, then here that means that
-                * we will have to use the frag_list skb for compat tasks
-                * and the regular skb for non-compat tasks.
+                * If this skb has a frag_list, then here that means that we
+                * will have to use the frag_list skb's data for compat tasks
+                * and the regular skb's data for normal (non-compat) tasks.
                 *
-                * The skb might (and likely will) be cloned, so we can't
-                * just reset frag_list and go on with things -- we need to
-                * keep that. For the compat case that's easy -- simply get
-                * a reference to the compat skb and free the regular one
-                * including the frag. For the non-compat case, we need to
-                * avoid sending the frag to the user -- so assign NULL but
-                * restore it below before freeing the skb.
+                * If we need to send the compat skb, assign it to the
+                * 'data_skb' variable so that it will be used below for data
+                * copying. We keep 'skb' for everything else, including
+                * freeing both later.
                 */
-               if (need_compat) {
-                       struct sk_buff *compskb = skb_shinfo(skb)->frag_list;
-                       skb_get(compskb);
-                       kfree_skb(skb);
-                       skb = compskb;
-               } else {
-                       /*
-                        * Before setting frag_list to NULL, we must get a
-                        * private copy of skb if shared (because of MSG_PEEK)
-                        */
-                       if (skb_shared(skb)) {
-                               struct sk_buff *nskb;
-
-                               nskb = pskb_copy(skb, GFP_KERNEL);
-                               kfree_skb(skb);
-                               skb = nskb;
-                               err = -ENOMEM;
-                               if (!skb)
-                                       goto out;
-                       }
-                       kfree_skb(skb_shinfo(skb)->frag_list);
-                       skb_shinfo(skb)->frag_list = NULL;
-               }
+               if (flags & MSG_CMSG_COMPAT)
+                       data_skb = skb_shinfo(skb)->frag_list;
        }
 #endif
 
        msg->msg_namelen = 0;
 
-       copied = skb->len;
+       copied = data_skb->len;
        if (len < copied) {
                msg->msg_flags |= MSG_TRUNC;
                copied = len;
        }
 
-       skb_reset_transport_header(skb);
-       err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+       skb_reset_transport_header(data_skb);
+       err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
 
        if (msg->msg_name) {
                struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
@@ -1490,7 +1466,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        }
        siocb->scm->creds = *NETLINK_CREDS(skb);
        if (flags & MSG_TRUNC)
-               copied = skb->len;
+               copied = data_skb->len;
 
        skb_free_datagram(sk, skb);
 
@@ -2126,6 +2102,26 @@ static void __net_exit netlink_net_exit(struct net *net)
 #endif
 }
 
+static void __init netlink_add_usersock_entry(void)
+{
+       unsigned long *listeners;
+       int groups = 32;
+
+       listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
+                           GFP_KERNEL);
+       if (!listeners)
+               panic("netlink_add_usersock_entry: Cannot allocate listneres\n");
+
+       netlink_table_grab();
+
+       nl_table[NETLINK_USERSOCK].groups = groups;
+       nl_table[NETLINK_USERSOCK].listeners = listeners;
+       nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
+       nl_table[NETLINK_USERSOCK].registered = 1;
+
+       netlink_table_ungrab();
+}
+
 static struct pernet_operations __net_initdata netlink_net_ops = {
        .init = netlink_net_init,
        .exit = netlink_net_exit,
@@ -2174,6 +2170,8 @@ static int __init netlink_proto_init(void)
                hash->rehash_time = jiffies;
        }
 
+       netlink_add_usersock_entry();
+
        sock_register(&netlink_family_ops);
        register_pernet_subsys(&netlink_net_ops);
        /* The netlink device handler may be needed early. */
index 795a00b7f2cb7aa6a539adb3c86a2cad180575f1..c93588c2d553cf6b162ab500cf1fd72dbbc9c26c 100644 (file)
@@ -297,7 +297,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
 {
        struct rds_notifier *notifier;
-       struct rds_rdma_notify cmsg;
+       struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
        unsigned int count = 0, max_messages = ~0U;
        unsigned long flags;
        LIST_HEAD(copy);
index 8406c66549909c763a6b2ae3d7594ea5aeceb361..c2ed90a4c0b428a984c7329e1af0f8d3957a0ae4 100644 (file)
@@ -152,21 +152,24 @@ static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result
 static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 {
        unsigned char *b = skb_tail_pointer(skb);
-       struct tc_gact opt;
        struct tcf_gact *gact = a->priv;
+       struct tc_gact opt = {
+               .index   = gact->tcf_index,
+               .refcnt  = gact->tcf_refcnt - ref,
+               .bindcnt = gact->tcf_bindcnt - bind,
+               .action  = gact->tcf_action,
+       };
        struct tcf_t t;
 
-       opt.index = gact->tcf_index;
-       opt.refcnt = gact->tcf_refcnt - ref;
-       opt.bindcnt = gact->tcf_bindcnt - bind;
-       opt.action = gact->tcf_action;
        NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt);
 #ifdef CONFIG_GACT_PROB
        if (gact->tcfg_ptype) {
-               struct tc_gact_p p_opt;
-               p_opt.paction = gact->tcfg_paction;
-               p_opt.pval = gact->tcfg_pval;
-               p_opt.ptype = gact->tcfg_ptype;
+               struct tc_gact_p p_opt = {
+                       .paction = gact->tcfg_paction,
+                       .pval    = gact->tcfg_pval,
+                       .ptype   = gact->tcfg_ptype,
+               };
+
                NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt);
        }
 #endif
index 11f195af2da0732aaf362380928e298f7f35a199..0c311be9282798ea6b27d1109f482bfdb20c7dac 100644 (file)
@@ -219,15 +219,16 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_mirred *m = a->priv;
-       struct tc_mirred opt;
+       struct tc_mirred opt = {
+               .index   = m->tcf_index,
+               .action  = m->tcf_action,
+               .refcnt  = m->tcf_refcnt - ref,
+               .bindcnt = m->tcf_bindcnt - bind,
+               .eaction = m->tcfm_eaction,
+               .ifindex = m->tcfm_ifindex,
+       };
        struct tcf_t t;
 
-       opt.index = m->tcf_index;
-       opt.action = m->tcf_action;
-       opt.refcnt = m->tcf_refcnt - ref;
-       opt.bindcnt = m->tcf_bindcnt - bind;
-       opt.eaction = m->tcfm_eaction;
-       opt.ifindex = m->tcfm_ifindex;
        NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt);
        t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
index 509a2d53a99d483980d33ab4b71460d209cac0dc..186eb837e600da750dd3347f96f033478d131b76 100644 (file)
@@ -272,19 +272,19 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_nat *p = a->priv;
-       struct tc_nat opt;
+       struct tc_nat opt = {
+               .old_addr = p->old_addr,
+               .new_addr = p->new_addr,
+               .mask     = p->mask,
+               .flags    = p->flags,
+
+               .index    = p->tcf_index,
+               .action   = p->tcf_action,
+               .refcnt   = p->tcf_refcnt - ref,
+               .bindcnt  = p->tcf_bindcnt - bind,
+       };
        struct tcf_t t;
 
-       opt.old_addr = p->old_addr;
-       opt.new_addr = p->new_addr;
-       opt.mask = p->mask;
-       opt.flags = p->flags;
-
-       opt.index = p->tcf_index;
-       opt.action = p->tcf_action;
-       opt.refcnt = p->tcf_refcnt - ref;
-       opt.bindcnt = p->tcf_bindcnt - bind;
-
        NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt);
        t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
        t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
index 4a1d640b0cf16d842a26e3de807df2d937752cc8..97e84f3ee77563fb14aa7b8e0faf2db476a81501 100644 (file)
@@ -164,13 +164,14 @@ static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_defact *d = a->priv;
-       struct tc_defact opt;
+       struct tc_defact opt = {
+               .index   = d->tcf_index,
+               .refcnt  = d->tcf_refcnt - ref,
+               .bindcnt = d->tcf_bindcnt - bind,
+               .action  = d->tcf_action,
+       };
        struct tcf_t t;
 
-       opt.index = d->tcf_index;
-       opt.refcnt = d->tcf_refcnt - ref;
-       opt.bindcnt = d->tcf_bindcnt - bind;
-       opt.action = d->tcf_action;
        NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt);
        NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata);
        t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
index e9607fe55b58006af76880c8e5364a16c3cebdbd..66cbf4eb8855452477ec0f459d69cd6db2559ce7 100644 (file)
@@ -159,13 +159,14 @@ static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_skbedit *d = a->priv;
-       struct tc_skbedit opt;
+       struct tc_skbedit opt = {
+               .index   = d->tcf_index,
+               .refcnt  = d->tcf_refcnt - ref,
+               .bindcnt = d->tcf_bindcnt - bind,
+               .action  = d->tcf_action,
+       };
        struct tcf_t t;
 
-       opt.index = d->tcf_index;
-       opt.refcnt = d->tcf_refcnt - ref;
-       opt.bindcnt = d->tcf_bindcnt - bind;
-       opt.action = d->tcf_action;
        NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt);
        if (d->flags & SKBEDIT_F_PRIORITY)
                NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
index b9e8c3b7d406aacd9cc2319ddbffe0440cc6c311..408eea7086aace341d1ed9812d956db35cfdfb86 100644 (file)
@@ -150,22 +150,34 @@ int register_qdisc(struct Qdisc_ops *qops)
        if (qops->enqueue == NULL)
                qops->enqueue = noop_qdisc_ops.enqueue;
        if (qops->peek == NULL) {
-               if (qops->dequeue == NULL) {
+               if (qops->dequeue == NULL)
                        qops->peek = noop_qdisc_ops.peek;
-               } else {
-                       rc = -EINVAL;
-                       goto out;
-               }
+               else
+                       goto out_einval;
        }
        if (qops->dequeue == NULL)
                qops->dequeue = noop_qdisc_ops.dequeue;
 
+       if (qops->cl_ops) {
+               const struct Qdisc_class_ops *cops = qops->cl_ops;
+
+               if (!(cops->get && cops->put && cops->walk && cops->leaf))
+                       goto out_einval;
+
+               if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
+                       goto out_einval;
+       }
+
        qops->next = NULL;
        *qp = qops;
        rc = 0;
 out:
        write_unlock(&qdisc_mod_lock);
        return rc;
+
+out_einval:
+       rc = -EINVAL;
+       goto out;
 }
 EXPORT_SYMBOL(register_qdisc);
 
index e114f23d5eaeb189428fbf4d307669da80e4f255..3406627895298324fdd9d27186ad8c9c8d9a9964 100644 (file)
@@ -418,7 +418,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        }
 
        ret = qdisc_enqueue(skb, flow->q);
-       if (ret != 0) {
+       if (ret != NET_XMIT_SUCCESS) {
 drop: __maybe_unused
                if (net_xmit_drop_count(ret)) {
                        sch->qstats.drops++;
@@ -442,7 +442,7 @@ drop: __maybe_unused
         */
        if (flow == &p->link) {
                sch->q.qlen++;
-               return 0;
+               return NET_XMIT_SUCCESS;
        }
        tasklet_schedule(&p->task);
        return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
index 534f33231c17d83593f124fa62d110d630c2fc0d..201cbac2b32ce0ccd8375b066005b6ba69759ddd 100644 (file)
@@ -334,7 +334,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        if (++sch->q.qlen <= q->limit) {
                sch->bstats.bytes += qdisc_pkt_len(skb);
                sch->bstats.packets++;
-               return 0;
+               return NET_XMIT_SUCCESS;
        }
 
        sfq_drop(sch);
@@ -508,6 +508,11 @@ nla_put_failure:
        return -1;
 }
 
+static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
+{
+       return NULL;
+}
+
 static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
 {
        return 0;
@@ -519,6 +524,10 @@ static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
        return 0;
 }
 
+static void sfq_put(struct Qdisc *q, unsigned long cl)
+{
+}
+
 static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
 {
        struct sfq_sched_data *q = qdisc_priv(sch);
@@ -571,9 +580,12 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 }
 
 static const struct Qdisc_class_ops sfq_class_ops = {
+       .leaf           =       sfq_leaf,
        .get            =       sfq_get,
+       .put            =       sfq_put,
        .tcf_chain      =       sfq_find_tcf,
        .bind_tcf       =       sfq_bind,
+       .unbind_tcf     =       sfq_put,
        .dump           =       sfq_dump_class,
        .dump_stats     =       sfq_dump_class_stats,
        .walk           =       sfq_walk,
index 0991c640cd3e8f3e5ae836c44b16b9e27f743c7e..641a30d646356867b808ac86d7bac59901fe2729 100644 (file)
@@ -127,7 +127,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
                return qdisc_reshape_fail(skb, sch);
 
        ret = qdisc_enqueue(skb, q->qdisc);
-       if (ret != 0) {
+       if (ret != NET_XMIT_SUCCESS) {
                if (net_xmit_drop_count(ret))
                        sch->qstats.drops++;
                return ret;
@@ -136,7 +136,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
        sch->q.qlen++;
        sch->bstats.bytes += qdisc_pkt_len(skb);
        sch->bstats.packets++;
-       return 0;
+       return NET_XMIT_SUCCESS;
 }
 
 static unsigned int tbf_drop(struct Qdisc* sch)
index 807643bdcbac30817edecfb4185dd45826b750eb..feaabc103ce6a061e350ddb07e1a2faf5d1ee4bd 100644 (file)
@@ -85,7 +85,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
                __skb_queue_tail(&q->q, skb);
                sch->bstats.bytes += qdisc_pkt_len(skb);
                sch->bstats.packets++;
-               return 0;
+               return NET_XMIT_SUCCESS;
        }
 
        kfree_skb(skb);
index ba59983aaffee6e3d7f7a0afb241f92d787b152d..b14ed4b1f27c3bd70f5837c2f032baf11a88e9a1 100644 (file)
@@ -2504,7 +2504,7 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
        if (p->dir > XFRM_POLICY_OUT)
                return NULL;
 
-       xp = xfrm_policy_alloc(net, GFP_KERNEL);
+       xp = xfrm_policy_alloc(net, GFP_ATOMIC);
        if (xp == NULL) {
                *dir = -ENOBUFS;
                return NULL;