]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <davem@davemloft.net>
Sun, 14 Nov 2010 19:57:05 +0000 (11:57 -0800)
committerDavid S. Miller <davem@davemloft.net>
Sun, 14 Nov 2010 19:57:05 +0000 (11:57 -0800)
61 files changed:
Documentation/networking/stmmac.txt
drivers/block/aoe/aoecmd.c
drivers/infiniband/core/addr.c
drivers/net/3c507.c
drivers/net/3c515.c
drivers/net/82596.c
drivers/net/Kconfig
drivers/net/arm/w90p910_ether.c
drivers/net/at1700.c
drivers/net/atarilance.c
drivers/net/bonding/bond_main.c
drivers/net/can/mscan/mscan.c
drivers/net/eepro.c
drivers/net/ks8851.c
drivers/net/lance.c
drivers/net/lib82596.c
drivers/net/qlge/qlge.h
drivers/net/qlge/qlge_dbg.c
drivers/net/qlge/qlge_ethtool.c
drivers/net/qlge/qlge_mpi.c
drivers/net/usb/ipheth.c
drivers/net/vxge/vxge-config.c
drivers/net/vxge/vxge-config.h
drivers/net/vxge/vxge-ethtool.c
drivers/net/vxge/vxge-main.c
drivers/net/vxge/vxge-main.h
drivers/net/vxge/vxge-reg.h
drivers/net/vxge/vxge-traffic.h
drivers/net/vxge/vxge-version.h
drivers/net/znet.c
include/linux/igmp.h
include/linux/inetdevice.h
include/linux/netdevice.h
include/net/dn_dev.h
include/net/dn_route.h
include/net/dst.h
include/net/inet_sock.h
include/net/neighbour.h
include/net/route.h
net/core/dev.c
net/dccp/ackvec.c
net/dccp/ackvec.h
net/dccp/ccids/ccid2.c
net/dccp/dccp.h
net/dccp/input.c
net/dccp/options.c
net/decnet/af_decnet.c
net/decnet/dn_dev.c
net/decnet/dn_fib.c
net/decnet/dn_neigh.c
net/decnet/dn_route.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/ip_gre.c
net/ipv4/ipmr.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/xfrm4_policy.c
net/netfilter/ipvs/ip_vs_xmit.c
net/socket.c
net/unix/af_unix.c

index 7ee770b5ef5fc0664d75d3b62a9798af0c06a10d..80a7a34549022147a68ddb4821e16be4f9e589ea 100644 (file)
@@ -7,7 +7,7 @@ This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
 (Synopsys IP blocks); it has been fully tested on STLinux platforms.
 
 Currently this network device driver is for all STM embedded MAC/GMAC
-(7xxx SoCs).
+(7xxx SoCs). Other platforms start using it i.e. ARM SPEAr.
 
 DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100
 Universal version 4.0 have been used for developing the first code
@@ -95,9 +95,14 @@ Several information came from the platform; please refer to the
 driver's Header file in include/linux directory.
 
 struct plat_stmmacenet_data {
-        int bus_id;
-        int pbl;
-        int has_gmac;
+       int bus_id;
+       int pbl;
+       int clk_csr;
+       int has_gmac;
+       int enh_desc;
+       int tx_coe;
+       int bugged_jumbo;
+       int pmt;
         void (*fix_mac_speed)(void *priv, unsigned int speed);
         void (*bus_setup)(unsigned long ioaddr);
 #ifdef CONFIG_STM_DRIVERS
@@ -114,6 +119,12 @@ Where:
   registers (on STM platforms);
 - has_gmac: GMAC core is on board (get it at run-time in the next step);
 - bus_id: bus identifier.
+- tx_coe: core is able to perform the tx csum in HW.
+- enh_desc: if sets the MAC will use the enhanced descriptor structure.
+- clk_csr: CSR Clock range selection.
+- bugged_jumbo: some HWs are not able to perform the csum in HW for
+  over-sized frames due to limited buffer sizes. Setting this
+  flag the csum will be done in SW on JUMBO frames.
 
 struct plat_stmmacphy_data {
         int bus_id;
@@ -131,13 +142,28 @@ Where:
 - interface: physical MII interface mode;
 - phy_reset: hook to reset HW function.
 
+SOURCES:
+- Kconfig
+- Makefile
+- stmmac_main.c: main network device driver;
+- stmmac_mdio.c: mdio functions;
+- stmmac_ethtool.c: ethtool support;
+- stmmac_timer.[ch]: timer code used for mitigating the driver dma interrupts
+  Only tested on ST40 platforms based.
+- stmmac.h: private driver structure;
+- common.h: common definitions and VFTs;
+- descs.h: descriptor structure definitions;
+- dwmac1000_core.c: GMAC core functions;
+- dwmac1000_dma.c:  dma functions for the GMAC chip;
+- dwmac1000.h: specific header file for the GMAC;
+- dwmac100_core: MAC 100 core and dma code;
+- dwmac100_dma.c: dma funtions for the MAC chip;
+- dwmac1000.h: specific header file for the MAC;
+- dwmac_lib.c: generic DMA functions shared among chips
+- enh_desc.c: functions for handling enhanced descriptors
+- norm_desc.c: functions for handling normal descriptors
+
 TODO:
-- Continue to make the driver more generic and suitable for other Synopsys
-  Ethernet controllers used on other architectures (i.e. ARM).
-- 10G controllers are not supported.
-- MAC uses Normal descriptors and GMAC uses enhanced ones.
-  This is a limit that should be reviewed. MAC could want to
-  use the enhanced structure.
-- Checksumming: Rx/Tx csum is done in HW in case of GMAC only.
+- XGMAC controller is not supported.
 - Review the timer optimisation code to use an embedded device that seems to be
   available in new chip generations.
index 5674bd01d96dffc86a8818f6a62a72207092c71e..de0435e63b02cbd349c5dcc282682359f6f85934 100644 (file)
@@ -297,8 +297,8 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
        struct sk_buff *skb;
        struct net_device *ifp;
 
-       read_lock(&dev_base_lock);
-       for_each_netdev(&init_net, ifp) {
+       rcu_read_lock();
+       for_each_netdev_rcu(&init_net, ifp) {
                dev_hold(ifp);
                if (!is_aoe_netif(ifp))
                        goto cont;
@@ -325,7 +325,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
 cont:
                dev_put(ifp);
        }
-       read_unlock(&dev_base_lock);
+       rcu_read_unlock();
 }
 
 static void
index a5ea1bce9689601edb299eab667ea0be35290cd2..c15fd2ea56c1ae169946bd6a7323f6e625add339 100644 (file)
@@ -200,7 +200,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
        src_in->sin_family = AF_INET;
        src_in->sin_addr.s_addr = rt->rt_src;
 
-       if (rt->idev->dev->flags & IFF_LOOPBACK) {
+       if (rt->dst.dev->flags & IFF_LOOPBACK) {
                ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
                if (!ret)
                        memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
@@ -208,12 +208,12 @@ static int addr4_resolve(struct sockaddr_in *src_in,
        }
 
        /* If the device does ARP internally, return 'done' */
-       if (rt->idev->dev->flags & IFF_NOARP) {
-               rdma_copy_addr(addr, rt->idev->dev, NULL);
+       if (rt->dst.dev->flags & IFF_NOARP) {
+               rdma_copy_addr(addr, rt->dst.dev, NULL);
                goto put;
        }
 
-       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev);
+       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
        if (!neigh || !(neigh->nud_state & NUD_VALID)) {
                neigh_event_send(rt->dst.neighbour, NULL);
                ret = -ENODATA;
index ea9b7a098c9bd75e2023fe7e5164f4baf7a86fee..475a66d95b3402a77addb60ceb2be1f0143ad187 100644 (file)
@@ -201,7 +201,7 @@ struct net_local {
 #define RX_BUF_SIZE    (1518+14+18)    /* packet+header+RBD */
 #define RX_BUF_END             (dev->mem_end - dev->mem_start)
 
-#define TX_TIMEOUT 5
+#define TX_TIMEOUT (HZ/20)
 
 /*
   That's it: only 86 bytes to set up the beast, including every extra
index cdf7226a7c43a775b0505b09ed1a0d3ba21a623e..d2bb4b254c57ddf0c4e51bd0eebd33960732892b 100644 (file)
@@ -98,7 +98,7 @@ static int rx_nocopy, rx_copy, queued_packet;
 #define WAIT_TX_AVAIL 200
 
 /* Operational parameter that usually are not changed. */
-#define TX_TIMEOUT  40         /* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT  ((4*HZ)/10)        /* Time in jiffies before concluding Tx hung */
 
 /* The size here is somewhat misleading: the Corkscrew also uses the ISA
    aliased registers at <base>+0x400.
index e2c9c5b949f97f5f67ef481fd32021969889e6f9..be1f1970c8422610d7ef37ebc85692a4372a9fbe 100644 (file)
@@ -191,7 +191,7 @@ enum commands {
 #define         RX_SUSPEND     0x0030
 #define         RX_ABORT       0x0040
 
-#define TX_TIMEOUT     5
+#define TX_TIMEOUT     (HZ/20)
 
 
 struct i596_reg {
index f6668cdaac85487b3c621b881af0c9afd975595b..0a7e6cea0082afb3c688cd645aede34b9c3cf9e9 100644 (file)
@@ -1533,7 +1533,7 @@ config E100
 
          <http://support.intel.com/support/network/adapter/pro100/21397.htm>
 
-          to identify the adapter.
+         to identify the adapter.
 
          For the latest Intel PRO/100 network driver for Linux, see:
 
@@ -1786,17 +1786,17 @@ config KS8842
        tristate "Micrel KSZ8841/42 with generic bus interface"
        depends on HAS_IOMEM && DMA_ENGINE
        help
-        This platform driver is for KSZ8841(1-port) / KS8842(2-port)
-        ethernet switch chip (managed, VLAN, QoS) from Micrel or
-        Timberdale(FPGA).
+         This platform driver is for KSZ8841(1-port) / KS8842(2-port)
+         ethernet switch chip (managed, VLAN, QoS) from Micrel or
+         Timberdale(FPGA).
 
 config KS8851
-       tristate "Micrel KS8851 SPI"
-       depends on SPI
-       select MII
+       tristate "Micrel KS8851 SPI"
+       depends on SPI
+       select MII
        select CRC32
-       help
-         SPI driver for Micrel KS8851 SPI attached network chip.
+       help
+         SPI driver for Micrel KS8851 SPI attached network chip.
 
 config KS8851_MLL
        tristate "Micrel KS8851 MLL"
@@ -2133,25 +2133,25 @@ config IP1000
          will be called ipg.  This is recommended.
 
 config IGB
-       tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
-       depends on PCI
-       ---help---
-         This driver supports Intel(R) 82575/82576 gigabit ethernet family of
-         adapters.  For more information on how to identify your adapter, go
-         to the Adapter & Driver ID Guide at:
+       tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
+       depends on PCI
+       ---help---
+         This driver supports Intel(R) 82575/82576 gigabit ethernet family of
+         adapters.  For more information on how to identify your adapter, go
+         to the Adapter & Driver ID Guide at:
 
-         <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+         <http://support.intel.com/support/network/adapter/pro100/21397.htm>
 
-         For general information and support, go to the Intel support
-         website at:
+         For general information and support, go to the Intel support
+         website at:
 
-         <http://support.intel.com>
+         <http://support.intel.com>
 
-         More specific information on configuring the driver is in
-         <file:Documentation/networking/e1000.txt>.
+         More specific information on configuring the driver is in
+         <file:Documentation/networking/e1000.txt>.
 
-         To compile this driver as a module, choose M here. The module
-         will be called igb.
+         To compile this driver as a module, choose M here. The module
+         will be called igb.
 
 config IGB_DCA
        bool "Direct Cache Access (DCA) Support"
@@ -2163,25 +2163,25 @@ config IGB_DCA
          is used, with the intent of lessening the impact of cache misses.
 
 config IGBVF
-       tristate "Intel(R) 82576 Virtual Function Ethernet support"
-       depends on PCI
-       ---help---
-         This driver supports Intel(R) 82576 virtual functions.  For more
-         information on how to identify your adapter, go to the Adapter &
-         Driver ID Guide at:
+       tristate "Intel(R) 82576 Virtual Function Ethernet support"
+       depends on PCI
+       ---help---
+         This driver supports Intel(R) 82576 virtual functions.  For more
+         information on how to identify your adapter, go to the Adapter &
+         Driver ID Guide at:
 
-         <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+         <http://support.intel.com/support/network/adapter/pro100/21397.htm>
 
-         For general information and support, go to the Intel support
-         website at:
+         For general information and support, go to the Intel support
+         website at:
 
-         <http://support.intel.com>
+         <http://support.intel.com>
 
-         More specific information on configuring the driver is in
-         <file:Documentation/networking/e1000.txt>.
+         More specific information on configuring the driver is in
+         <file:Documentation/networking/e1000.txt>.
 
-         To compile this driver as a module, choose M here. The module
-         will be called igbvf.
+         To compile this driver as a module, choose M here. The module
+         will be called igbvf.
 
 source "drivers/net/ixp2000/Kconfig"
 
@@ -2300,14 +2300,14 @@ config SKGE
          will be called skge.  This is recommended.
 
 config SKGE_DEBUG
-       bool "Debugging interface"
-       depends on SKGE && DEBUG_FS
-       help
-        This option adds the ability to dump driver state for debugging.
-        The file /sys/kernel/debug/skge/ethX displays the state of the internal
-        transmit and receive rings.
+       bool "Debugging interface"
+       depends on SKGE && DEBUG_FS
+       help
+         This option adds the ability to dump driver state for debugging.
+         The file /sys/kernel/debug/skge/ethX displays the state of the internal
+         transmit and receive rings.
 
-        If unsure, say N.
+         If unsure, say N.
 
 config SKY2
        tristate "SysKonnect Yukon2 support"
@@ -2326,14 +2326,14 @@ config SKY2
          will be called sky2.  This is recommended.
 
 config SKY2_DEBUG
-       bool "Debugging interface"
-       depends on SKY2 && DEBUG_FS
-       help
-        This option adds the ability to dump driver state for debugging.
-        The file /sys/kernel/debug/sky2/ethX displays the state of the internal
-        transmit and receive rings.
+       bool "Debugging interface"
+       depends on SKY2 && DEBUG_FS
+       help
+         This option adds the ability to dump driver state for debugging.
+         The file /sys/kernel/debug/sky2/ethX displays the state of the internal
+         transmit and receive rings.
 
-        If unsure, say N.
+         If unsure, say N.
 
 config VIA_VELOCITY
        tristate "VIA Velocity support"
@@ -2573,32 +2573,32 @@ config MDIO
        tristate
 
 config CHELSIO_T1
-        tristate "Chelsio 10Gb Ethernet support"
-        depends on PCI
+       tristate "Chelsio 10Gb Ethernet support"
+       depends on PCI
        select CRC32
        select MDIO
-        help
-          This driver supports Chelsio gigabit and 10-gigabit
-          Ethernet cards. More information about adapter features and
+       help
+         This driver supports Chelsio gigabit and 10-gigabit
+         Ethernet cards. More information about adapter features and
          performance tuning is in <file:Documentation/networking/cxgb.txt>.
 
-          For general information about Chelsio and our products, visit
-          our website at <http://www.chelsio.com>.
+         For general information about Chelsio and our products, visit
+         our website at <http://www.chelsio.com>.
 
-          For customer support, please visit our customer support page at
-          <http://www.chelsio.com/support.html>.
+         For customer support, please visit our customer support page at
+         <http://www.chelsio.com/support.html>.
 
-          Please send feedback to <linux-bugs@chelsio.com>.
+         Please send feedback to <linux-bugs@chelsio.com>.
 
-          To compile this driver as a module, choose M here: the module
-          will be called cxgb.
+         To compile this driver as a module, choose M here: the module
+         will be called cxgb.
 
 config CHELSIO_T1_1G
-        bool "Chelsio gigabit Ethernet support"
-        depends on CHELSIO_T1
-        help
-          Enables support for Chelsio's gigabit Ethernet PCI cards.  If you
-          are using only 10G cards say 'N' here.
+       bool "Chelsio gigabit Ethernet support"
+       depends on CHELSIO_T1
+       help
+         Enables support for Chelsio's gigabit Ethernet PCI cards.  If you
+         are using only 10G cards say 'N' here.
 
 config CHELSIO_T3_DEPENDS
        tristate
@@ -2728,26 +2728,26 @@ config IXGBE_DCB
          If unsure, say N.
 
 config IXGBEVF
-       tristate "Intel(R) 82599 Virtual Function Ethernet support"
-       depends on PCI_MSI
-       ---help---
-         This driver supports Intel(R) 82599 virtual functions.  For more
-         information on how to identify your adapter, go to the Adapter &
-         Driver ID Guide at:
+       tristate "Intel(R) 82599 Virtual Function Ethernet support"
+       depends on PCI_MSI
+       ---help---
+         This driver supports Intel(R) 82599 virtual functions.  For more
+         information on how to identify your adapter, go to the Adapter &
+         Driver ID Guide at:
 
-         <http://support.intel.com/support/network/sb/CS-008441.htm>
+         <http://support.intel.com/support/network/sb/CS-008441.htm>
 
-         For general information and support, go to the Intel support
-         website at:
+         For general information and support, go to the Intel support
+         website at:
 
-         <http://support.intel.com>
+         <http://support.intel.com>
 
-         More specific information on configuring the driver is in
-         <file:Documentation/networking/ixgbevf.txt>.
+         More specific information on configuring the driver is in
+         <file:Documentation/networking/ixgbevf.txt>.
 
-         To compile this driver as a module, choose M here. The module
-         will be called ixgbevf.  MSI-X interrupt support is required
-         for this driver to work correctly.
+         To compile this driver as a module, choose M here. The module
+         will be called ixgbevf.  MSI-X interrupt support is required
+         for this driver to work correctly.
 
 config IXGB
        tristate "Intel(R) PRO/10GbE support"
@@ -2772,29 +2772,38 @@ config IXGB
          will be called ixgb.
 
 config S2IO
-       tristate "S2IO 10Gbe XFrame NIC"
+       tristate "Exar Xframe 10Gb Ethernet Adapter"
        depends on PCI
        ---help---
-         This driver supports the 10Gbe XFrame NIC of S2IO. 
+         This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
+
          More specific information on configuring the driver is in 
          <file:Documentation/networking/s2io.txt>.
 
+         To compile this driver as a module, choose M here. The module
+         will be called s2io.
+
 config VXGE
-       tristate "Neterion X3100 Series 10GbE PCIe Server Adapter"
+       tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
        depends on PCI && INET
        ---help---
-         This driver supports Neterion Inc's X3100 Series 10 GbE PCIe
+         This driver supports Exar Corp's X3100 Series 10 GbE PCIe
          I/O Virtualized Server Adapter.
+
          More specific information on configuring the driver is in
          <file:Documentation/networking/vxge.txt>.
 
+         To compile this driver as a module, choose M here. The module
+         will be called vxge.
+
 config VXGE_DEBUG_TRACE_ALL
        bool "Enabling All Debug trace statments in driver"
        default n
        depends on VXGE
        ---help---
          Say Y here if you want to enabling all the debug trace statements in
-         driver. By  default only few debug trace statements are enabled.
+         the vxge driver. By default only few debug trace statements are
+         enabled.
 
 config MYRI10GE
        tristate "Myricom Myri-10G Ethernet support"
@@ -2906,18 +2915,18 @@ config QLGE
          will be called qlge.
 
 config BNA
-        tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
-        depends on PCI
-        ---help---
-          This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
-          cards.
-          To compile this driver as a module, choose M here: the module
-          will be called bna.
+       tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
+       depends on PCI
+       ---help---
+         This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
+         cards.
+         To compile this driver as a module, choose M here: the module
+         will be called bna.
 
-          For general information and support, go to the Brocade support
-          website at:
+         For general information and support, go to the Brocade support
+         website at:
 
-          <http://support.brocade.com>
+         <http://support.brocade.com>
 
 source "drivers/net/sfc/Kconfig"
 
@@ -3227,18 +3236,18 @@ config PPP_BSDCOMP
          modules once you have said "make modules". If unsure, say N.
 
 config PPP_MPPE
-       tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
-       depends on PPP && EXPERIMENTAL
-       select CRYPTO
-       select CRYPTO_SHA1
-       select CRYPTO_ARC4
-       select CRYPTO_ECB
-       ---help---
-         Support for the MPPE Encryption protocol, as employed by the
-        Microsoft Point-to-Point Tunneling Protocol.
-
-        See http://pptpclient.sourceforge.net/ for information on
-        configuring PPTP clients and servers to utilize this method.
+       tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
+       depends on PPP && EXPERIMENTAL
+       select CRYPTO
+       select CRYPTO_SHA1
+       select CRYPTO_ARC4
+       select CRYPTO_ECB
+       ---help---
+         Support for the MPPE Encryption protocol, as employed by the
+         Microsoft Point-to-Point Tunneling Protocol.
+
+         See http://pptpclient.sourceforge.net/ for information on
+         configuring PPTP clients and servers to utilize this method.
 
 config PPPOE
        tristate "PPP over Ethernet (EXPERIMENTAL)"
@@ -3397,14 +3406,14 @@ config VIRTIO_NET
        depends on EXPERIMENTAL && VIRTIO
        ---help---
          This is the virtual network driver for virtio.  It can be used with
-          lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
+         lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
 
 config VMXNET3
-       tristate "VMware VMXNET3 ethernet driver"
-       depends on PCI && INET
-       help
-         This driver supports VMware's vmxnet3 virtual ethernet NIC.
-         To compile this driver as a module, choose M here: the
-         module will be called vmxnet3.
+       tristate "VMware VMXNET3 ethernet driver"
+       depends on PCI && INET
+       help
+         This driver supports VMware's vmxnet3 virtual ethernet NIC.
+         To compile this driver as a module, choose M here: the
+         module will be called vmxnet3.
 
 endif # NETDEVICES
index 4545d5a06c24f9e9cea64a70db01327a1d52ad21..bfea499a351309f2292998208e9b302ff3a58c07 100644 (file)
 #define TX_DESC_SIZE           10
 #define MAX_RBUFF_SZ           0x600
 #define MAX_TBUFF_SZ           0x600
-#define TX_TIMEOUT             50
+#define TX_TIMEOUT             (HZ/2)
 #define DELAY                  1000
 #define CAM0                   0x0
 
index 89876897a6fed5244358740795e98207507799a6..871b1633f543c2d515a88261317cf526a87484bb 100644 (file)
@@ -150,7 +150,7 @@ struct net_local {
 #define PORT_OFFSET(o) (o)
 
 
-#define TX_TIMEOUT             10
+#define TX_TIMEOUT             (HZ/10)
 
 
 /* Index to functions, as function prototypes. */
index 8cb27cb7bca1b347c0b072d25e98ef5b58acdb5a..ce0091eb06f580a2b63bcc35f9f019a078d3acbb 100644 (file)
@@ -116,7 +116,7 @@ MODULE_LICENSE("GPL");
 #define RX_RING_LEN_BITS               (RX_LOG_RING_SIZE << 5)
 #define        RX_RING_MOD_MASK                (RX_RING_SIZE - 1)
 
-#define TX_TIMEOUT     20
+#define TX_TIMEOUT     (HZ/5)
 
 /* The LANCE Rx and Tx ring descriptors. */
 struct lance_rx_head {
index bdb68a600382bdcfbe8a9e2cf70f8360821e2567..518844852f061861be791205cdbdcd73ddb9cf1a 100644 (file)
@@ -3209,7 +3209,7 @@ out:
 #ifdef CONFIG_PROC_FS
 
 static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(&dev_base_lock)
+       __acquires(RCU)
        __acquires(&bond->lock)
 {
        struct bonding *bond = seq->private;
@@ -3218,7 +3218,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
        int i;
 
        /* make sure the bond won't be taken away */
-       read_lock(&dev_base_lock);
+       rcu_read_lock();
        read_lock(&bond->lock);
 
        if (*pos == 0)
@@ -3248,12 +3248,12 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 
 static void bond_info_seq_stop(struct seq_file *seq, void *v)
        __releases(&bond->lock)
-       __releases(&dev_base_lock)
+       __releases(RCU)
 {
        struct bonding *bond = seq->private;
 
        read_unlock(&bond->lock);
-       read_unlock(&dev_base_lock);
+       rcu_read_unlock();
 }
 
 static void bond_info_show_master(struct seq_file *seq)
index 64c378cd0c34125471f6cb3fcae10482ae1e0656..74cd880c7e0676c2387f300a9a79b0b3f3e63ae0 100644 (file)
@@ -182,7 +182,7 @@ static int mscan_restart(struct net_device *dev)
 
                priv->can.state = CAN_STATE_ERROR_ACTIVE;
                WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
-                    "bus-off state expected");
+                    "bus-off state expected\n");
                out_8(&regs->canmisc, MSCAN_BOHOLD);
                /* Re-enable receive interrupts. */
                out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
index 7c826319ee5a153111a59a1c517992dadcd5a84e..9e19fbc2f1764921d7b3a9a6f4b5ddf11b069910 100644 (file)
@@ -302,7 +302,7 @@ struct eepro_local {
 #define ee_id_eepro10p0 0x10   /* ID for eepro/10+ */
 #define ee_id_eepro10p1 0x31
 
-#define TX_TIMEOUT 40
+#define TX_TIMEOUT ((4*HZ)/10)
 
 /* Index to functions, as function prototypes. */
 
index 51919fcd50c26e2c0c6b8c23ba393887eca50254..0fa4a9887ba2668e42cd328ad150148af0fd88fb 100644 (file)
@@ -1545,6 +1545,37 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
 
 /* driver bus management functions */
 
+#ifdef CONFIG_PM
+static int ks8851_suspend(struct spi_device *spi, pm_message_t state)
+{
+       struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
+       struct net_device *dev = ks->netdev;
+
+       if (netif_running(dev)) {
+               netif_device_detach(dev);
+               ks8851_net_stop(dev);
+       }
+
+       return 0;
+}
+
+static int ks8851_resume(struct spi_device *spi)
+{
+       struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
+       struct net_device *dev = ks->netdev;
+
+       if (netif_running(dev)) {
+               ks8851_net_open(dev);
+               netif_device_attach(dev);
+       }
+
+       return 0;
+}
+#else
+#define ks8851_suspend NULL
+#define ks8851_resume NULL
+#endif
+
 static int __devinit ks8851_probe(struct spi_device *spi)
 {
        struct net_device *ndev;
@@ -1679,6 +1710,8 @@ static struct spi_driver ks8851_driver = {
        },
        .probe = ks8851_probe,
        .remove = __devexit_p(ks8851_remove),
+       .suspend = ks8851_suspend,
+       .resume = ks8851_resume,
 };
 
 static int __init ks8851_init(void)
index f06296bfe293fd8ddcce2b4906c897d45472f92b..02336edce748db5b4e959d8b83e637d48c20aac4 100644 (file)
@@ -207,7 +207,7 @@ tx_full and tbusy flags.
 #define LANCE_BUS_IF 0x16
 #define LANCE_TOTAL_SIZE 0x18
 
-#define TX_TIMEOUT     20
+#define TX_TIMEOUT     (HZ/5)
 
 /* The LANCE Rx and Tx ring descriptors. */
 struct lance_rx_head {
index c27f4291b350422978ab9fbf55bd6318ec0953ae..9e042894479b6c5e74d9ef28c4540c2370bad0a9 100644 (file)
@@ -161,7 +161,7 @@ enum commands {
 #define         RX_SUSPEND     0x0030
 #define         RX_ABORT       0x0040
 
-#define TX_TIMEOUT     5
+#define TX_TIMEOUT     (HZ/20)
 
 
 struct i596_reg {
index 22821398fc63765054eff056ab2529beedfb30fc..bdb8fe868539d68f8b1388887e560bf2626b7d42 100644 (file)
@@ -16,7 +16,7 @@
  */
 #define DRV_NAME       "qlge"
 #define DRV_STRING     "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION    "v1.00.00.25.00.00-01"
+#define DRV_VERSION    "v1.00.00.27.00.00-01"
 
 #define WQ_ADDR_ALIGN  0x3     /* 4 byte alignment */
 
@@ -2221,6 +2221,7 @@ int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
 int ql_unpause_mpi_risc(struct ql_adapter *qdev);
 int ql_pause_mpi_risc(struct ql_adapter *qdev);
 int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
 int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
                u32 ram_addr, int word_count);
 int ql_core_dump(struct ql_adapter *qdev,
@@ -2236,6 +2237,7 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
 int ql_mb_get_port_cfg(struct ql_adapter *qdev);
 int ql_mb_set_port_cfg(struct ql_adapter *qdev);
 int ql_wait_fifo_empty(struct ql_adapter *qdev);
+void ql_get_dump(struct ql_adapter *qdev, void *buff);
 void ql_gen_reg_dump(struct ql_adapter *qdev,
                        struct ql_reg_dump *mpi_coredump);
 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
index 4747492935ef8bf190cc575fe7146e3072417623..fca804f36d61a2e6f2d79140945adee3732a2ec1 100644 (file)
@@ -1317,9 +1317,28 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
        status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
        if (status)
                return;
+}
+
+void ql_get_dump(struct ql_adapter *qdev, void *buff)
+{
+       /*
+        * If the dump has already been taken and is stored
+        * in our internal buffer and if force dump is set then
+        * just start the spool to dump it to the log file
+        * and also, take a snapshot of the general regs to
+        * to the user's buffer or else take complete dump
+        * to the user's buffer if force is not set.
+        */
 
-       if (test_bit(QL_FRC_COREDUMP, &qdev->flags))
+       if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
+               if (!ql_core_dump(qdev, buff))
+                       ql_soft_reset_mpi_risc(qdev);
+               else
+                       netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
+       } else {
+               ql_gen_reg_dump(qdev, buff);
                ql_get_core_dump(qdev);
+       }
 }
 
 /* Coredump to messages log file using separate worker thread */
index 4892d64f4e054b2a630a13f2249f6ca1be7f83cb..8149cc9de4ca05a0bfaca39f396359eae1eac070 100644 (file)
@@ -375,7 +375,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
        strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
        drvinfo->n_stats = 0;
        drvinfo->testinfo_len = 0;
-       drvinfo->regdump_len = 0;
+       if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+               drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
+       else
+               drvinfo->regdump_len = sizeof(struct ql_reg_dump);
        drvinfo->eedump_len = 0;
 }
 
@@ -547,7 +550,12 @@ static void ql_self_test(struct net_device *ndev,
 
 static int ql_get_regs_len(struct net_device *ndev)
 {
-       return sizeof(struct ql_reg_dump);
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+               return sizeof(struct ql_mpi_coredump);
+       else
+               return sizeof(struct ql_reg_dump);
 }
 
 static void ql_get_regs(struct net_device *ndev,
@@ -555,7 +563,12 @@ static void ql_get_regs(struct net_device *ndev,
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
 
-       ql_gen_reg_dump(qdev, p);
+       ql_get_dump(qdev, p);
+       qdev->core_is_dumped = 0;
+       if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+               regs->len = sizeof(struct ql_mpi_coredump);
+       else
+               regs->len = sizeof(struct ql_reg_dump);
 }
 
 static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
index 0e7c7c7ee1647006baf561ffd90308290fbd2399..100a462cc9163e578ed48c5ce505c69a41cbbea6 100644 (file)
@@ -87,7 +87,7 @@ exit:
        return status;
 }
 
-static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
 {
        int status;
        status = ql_write_mpi_reg(qdev, 0x00001010, 1);
index b2bcf99e6f087ab1dfca2aef092233eea9aa5fe6..7d42f9a2c06868027ac77c6265c76f9ce2288c27 100644 (file)
@@ -363,7 +363,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
 
        /* Paranoid */
        if (skb->len > IPHETH_BUF_SIZE) {
-               WARN(1, "%s: skb too large: %d bytes", __func__, skb->len);
+               WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
                dev->net->stats.tx_dropped++;
                dev_kfree_skb_irq(skb);
                return NETDEV_TX_OK;
index 906a3ca3676b94c3aa6d8442bd7e56b9d0fa3eca..409c2e6053d03efe21e36ef14c4cae105b671ada 100644 (file)
 
 #include "vxge-traffic.h"
 #include "vxge-config.h"
-
-static enum vxge_hw_status
-__vxge_hw_fifo_create(
-       struct __vxge_hw_vpath_handle *vpath_handle,
-       struct vxge_hw_fifo_attr *attr);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_abort(
-       struct __vxge_hw_fifo *fifoh);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_reset(
-       struct __vxge_hw_fifo *ringh);
+#include "vxge-main.h"
 
 static enum vxge_hw_status
 __vxge_hw_fifo_delete(
@@ -71,53 +59,15 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
                        u32 size,
                        struct vxge_hw_mempool_dma *dma_object);
 
-
-static struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
-                       enum __vxge_hw_channel_type type, u32 length,
-                       u32 per_dtr_space, void *userdata);
-
 static void
 __vxge_hw_channel_free(
        struct __vxge_hw_channel *channel);
 
-static enum vxge_hw_status
-__vxge_hw_channel_initialize(
-       struct __vxge_hw_channel *channel);
-
-static enum vxge_hw_status
-__vxge_hw_channel_reset(
-       struct __vxge_hw_channel *channel);
-
 static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
 
-static enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
-
 static enum vxge_hw_status
 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
 
-static void
-__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
-
-static void
-__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
-       u32 vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg,
-       struct vxge_hw_device_hw_info *hw_info);
-
-static enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
-
-static void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
-
-static enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
-
 static enum vxge_hw_status
 __vxge_hw_device_register_poll(
        void __iomem    *reg,
@@ -138,9 +88,10 @@ __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
 
 static struct vxge_hw_mempool*
 __vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
-                        u32 item_size, u32 private_size, u32 items_initial,
-                        u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
-                        void *userdata);
+                       u32 item_size, u32 private_size, u32 items_initial,
+                       u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
+                       void *userdata);
+
 static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
 
 static enum vxge_hw_status
@@ -153,52 +104,353 @@ vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
 static enum vxge_hw_status
 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
 
-static u64
-__vxge_hw_vpath_pci_func_mode_get(u32  vp_id,
-                                 struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-static u32
-__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
+static void
+__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
 
 static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
-                        u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
+__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
+                                 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
 
 static enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
+__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
+                                 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
 
+static void
+vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
+{
+       u64 val64;
 
-static enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
+       val64 = readq(&vp_reg->rxmac_vcfg0);
+       val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
+       writeq(val64, &vp_reg->rxmac_vcfg0);
+       val64 = readq(&vp_reg->rxmac_vcfg0);
 
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
-                          struct vxge_hw_device_hw_info *hw_info);
+       return;
+}
 
-static enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
+/*
+ * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
+ */
+int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+       struct vxge_hw_vpath_reg __iomem *vp_reg;
+       struct __vxge_hw_virtualpath *vpath;
+       u64 val64, rxd_count, rxd_spat;
+       int count = 0, total_count = 0;
 
-static void
-__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
+       vpath = &hldev->virtual_paths[vp_id];
+       vp_reg = vpath->vp_reg;
 
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
-                            u32 operation, u32 offset, u64 *stat);
+       vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
 
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
-                                 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
+       /* Check that the ring controller for this vpath has enough free RxDs
+        * to send frames to the host.  This is done by reading the
+        * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
+        * RXD_SPAT value for the vpath.
+        */
+       val64 = readq(&vp_reg->prc_cfg6);
+       rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
+       /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
+        * leg room.
+        */
+       rxd_spat *= 2;
+
+       do {
+               mdelay(1);
+
+               rxd_count = readq(&vp_reg->prc_rxd_doorbell);
+
+               /* Check that the ring controller for this vpath does
+                * not have any frame in its pipeline.
+                */
+               val64 = readq(&vp_reg->frm_in_progress_cnt);
+               if ((rxd_count <= rxd_spat) || (val64 > 0))
+                       count = 0;
+               else
+                       count++;
+               total_count++;
+       } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
+                       (total_count < VXGE_HW_MAX_POLLING_COUNT));
+
+       if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
+               printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
+                       __func__);
+
+       return total_count;
+}
+
+/* vxge_hw_device_wait_receive_idle - This function waits until all frames
+ * stored in the frame buffer for each vpath assigned to the given
+ * function (hldev) have been sent to the host.
+ */
+void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
+{
+       int i, total_count = 0;
+
+       for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+               if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+                       continue;
+
+               total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
+               if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
+                       break;
+       }
+}
 
 static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
-                                 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
+vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
+                    u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
+                    u64 *steer_ctrl)
+{
+       struct vxge_hw_vpath_reg __iomem *vp_reg;
+       enum vxge_hw_status status;
+       u64 val64;
+       u32 retry = 0, max_retry = 100;
+
+       vp_reg = vpath->vp_reg;
+
+       if (vpath->vp_open) {
+               max_retry = 3;
+               spin_lock(&vpath->lock);
+       }
+
+       writeq(*data0, &vp_reg->rts_access_steer_data0);
+       writeq(*data1, &vp_reg->rts_access_steer_data1);
+       wmb();
+
+       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
+               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
+               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
+               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
+               *steer_ctrl;
+
+       status = __vxge_hw_pio_mem_write64(val64,
+                                          &vp_reg->rts_access_steer_ctrl,
+                                          VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
+                                          VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+
+       /* The __vxge_hw_device_register_poll can udelay for a significant
+        * amount of time, blocking other proccess from the CPU.  If it delays
+        * for ~5secs, a NMI error can occur.  A way around this is to give up
+        * the processor via msleep, but this is not allowed is under lock.
+        * So, only allow it to sleep for ~4secs if open.  Otherwise, delay for
+        * 1sec and sleep for 10ms until the firmware operation has completed
+        * or timed-out.
+        */
+       while ((status != VXGE_HW_OK) && retry++ < max_retry) {
+               if (!vpath->vp_open)
+                       msleep(20);
+               status = __vxge_hw_device_register_poll(
+                                       &vp_reg->rts_access_steer_ctrl,
+                                       VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
+                                       VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+       }
+
+       if (status != VXGE_HW_OK)
+               goto out;
+
+       val64 = readq(&vp_reg->rts_access_steer_ctrl);
+       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+               *data0 = readq(&vp_reg->rts_access_steer_data0);
+               *data1 = readq(&vp_reg->rts_access_steer_data1);
+               *steer_ctrl = val64;
+       } else
+               status = VXGE_HW_FAIL;
+
+out:
+       if (vpath->vp_open)
+               spin_unlock(&vpath->lock);
+       return status;
+}
+
+enum vxge_hw_status
+vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
+                            u32 *minor, u32 *build)
+{
+       u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status;
+
+       vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+       status = vxge_hw_vpath_fw_api(vpath,
+                                     VXGE_HW_FW_UPGRADE_ACTION,
+                                     VXGE_HW_FW_UPGRADE_MEMO,
+                                     VXGE_HW_FW_UPGRADE_OFFSET_READ,
+                                     &data0, &data1, &steer_ctrl);
+       if (status != VXGE_HW_OK)
+               return status;
+
+       *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
+       *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
+       *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
+
+       return status;
+}
+
+enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
+{
+       u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status;
+       u32 ret;
+
+       vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+       status = vxge_hw_vpath_fw_api(vpath,
+                                     VXGE_HW_FW_UPGRADE_ACTION,
+                                     VXGE_HW_FW_UPGRADE_MEMO,
+                                     VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
+                                     &data0, &data1, &steer_ctrl);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
+               goto exit;
+       }
+
+       ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
+       if (ret != 1) {
+               vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
+                               __func__, ret);
+               status = VXGE_HW_FAIL;
+       }
+
+exit:
+       return status;
+}
+
+enum vxge_hw_status
+vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
+{
+       u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status;
+       int ret_code, sec_code;
+
+       vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+       /* send upgrade start command */
+       status = vxge_hw_vpath_fw_api(vpath,
+                                     VXGE_HW_FW_UPGRADE_ACTION,
+                                     VXGE_HW_FW_UPGRADE_MEMO,
+                                     VXGE_HW_FW_UPGRADE_OFFSET_START,
+                                     &data0, &data1, &steer_ctrl);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
+                               __func__);
+               return status;
+       }
+
+       /* Transfer fw image to adapter 16 bytes at a time */
+       for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
+               steer_ctrl = 0;
+
+               /* The next 128bits of fwdata to be loaded onto the adapter */
+               data0 = *((u64 *)fwdata);
+               data1 = *((u64 *)fwdata + 1);
+
+               status = vxge_hw_vpath_fw_api(vpath,
+                                             VXGE_HW_FW_UPGRADE_ACTION,
+                                             VXGE_HW_FW_UPGRADE_MEMO,
+                                             VXGE_HW_FW_UPGRADE_OFFSET_SEND,
+                                             &data0, &data1, &steer_ctrl);
+               if (status != VXGE_HW_OK) {
+                       vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
+                                       __func__);
+                       goto out;
+               }
+
+               ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
+               switch (ret_code) {
+               case VXGE_HW_FW_UPGRADE_OK:
+                       /* All OK, send next 16 bytes. */
+                       break;
+               case VXGE_FW_UPGRADE_BYTES2SKIP:
+                       /* skip bytes in the stream */
+                       fwdata += (data0 >> 8) & 0xFFFFFFFF;
+                       break;
+               case VXGE_HW_FW_UPGRADE_DONE:
+                       goto out;
+               case VXGE_HW_FW_UPGRADE_ERR:
+                       sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
+                       switch (sec_code) {
+                       case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
+                       case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
+                               printk(KERN_ERR
+                                      "corrupted data from .ncf file\n");
+                               break;
+                       case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
+                       case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
+                       case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
+                       case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
+                       case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
+                               printk(KERN_ERR "invalid .ncf file\n");
+                               break;
+                       case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
+                               printk(KERN_ERR "buffer overflow\n");
+                               break;
+                       case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
+                               printk(KERN_ERR "failed to flash the image\n");
+                               break;
+                       case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
+                               printk(KERN_ERR
+                                      "generic error. Unknown error type\n");
+                               break;
+                       default:
+                               printk(KERN_ERR "Unknown error of type %d\n",
+                                      sec_code);
+                               break;
+                       }
+                       status = VXGE_HW_FAIL;
+                       goto out;
+               default:
+                       printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
+                       status = VXGE_HW_FAIL;
+                       goto out;
+               }
+               /* point to next 16 bytes */
+               fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
+       }
+out:
+       return status;
+}
+
+enum vxge_hw_status
+vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
+                               struct eprom_image *img)
+{
+       u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status;
+       int i;
+
+       vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+       for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+               data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
+               data1 = steer_ctrl = 0;
+
+               status = vxge_hw_vpath_fw_api(vpath,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+                       VXGE_HW_FW_API_GET_EPROM_REV,
+                       0, &data0, &data1, &steer_ctrl);
+               if (status != VXGE_HW_OK)
+                       break;
+
+               img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
+               img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
+               img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
+               img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
+       }
+
+       return status;
+}
 
 /*
  * __vxge_hw_channel_allocate - Allocate memory for channel
  * This function allocates required memory for the channel and various arrays
  * in the channel
  */
-struct __vxge_hw_channel*
+static struct __vxge_hw_channel *
 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
                           enum __vxge_hw_channel_type type,
        u32 length, u32 per_dtr_space, void *userdata)
@@ -269,7 +521,7 @@ exit0:
  * This function deallocates memory from the channel and various arrays
  * in the channel
  */
-void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
+static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
 {
        kfree(channel->work_arr);
        kfree(channel->free_arr);
@@ -283,7 +535,7 @@ void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
  * This function initializes a channel by properly setting the
  * various references
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
 {
        u32 i;
@@ -318,7 +570,7 @@ __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
  * __vxge_hw_channel_reset - Resets a channel
  * This function resets a channel by properly setting the various references
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
 {
        u32 i;
@@ -345,8 +597,7 @@ __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
  * Initialize certain PCI/PCI-X configuration registers
  * with recommended values. Save config space for future hw resets.
  */
-void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
+static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
 {
        u16 cmd = 0;
 
@@ -390,7 +641,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
        return ret;
 }
 
- /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
+/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
  * in progress
  * This routine checks the vpath reset in progress register is turned zero
  */
@@ -435,7 +686,7 @@ exit:
  * register location pointers in the device object. It waits until the ric is
  * completed initializing registers.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
 {
        u64 val64;
@@ -495,26 +746,6 @@ exit:
        return status;
 }
 
-/*
- * __vxge_hw_device_id_get
- * This routine returns sets the device id and revision numbers into the device
- * structure
- */
-void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
-{
-       u64 val64;
-
-       val64 = readq(&hldev->common_reg->titan_asic_id);
-       hldev->device_id =
-               (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
-
-       hldev->major_revision =
-               (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
-
-       hldev->minor_revision =
-               (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
-}
-
 /*
  * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
  * This routine returns the Access Rights of the driver
@@ -567,11 +798,26 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
                return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
 }
 
+/*
+ * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
+ * Returns the function number of the vpath.
+ */
+static u32
+__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
+{
+       u64 val64;
+
+       val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
+
+       return
+        (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
+}
+
 /*
  * __vxge_hw_device_host_info_get
  * This routine returns the host type assignments
  */
-void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
+static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
 {
        u64 val64;
        u32 i;
@@ -584,16 +830,18 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
        hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
                if (!(hldev->vpath_assignments & vxge_mBIT(i)))
                        continue;
 
                hldev->func_id =
-                       __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
+                       __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
 
                hldev->access_rights = __vxge_hw_device_access_rights_get(
                        hldev->host_type, hldev->func_id);
 
+               hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
+               hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
+
                hldev->first_vp_id = i;
                break;
        }
@@ -627,25 +875,216 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
                return VXGE_HW_ERR_INVALID_PCI_INFO;
        }
 
-       return VXGE_HW_OK;
+       return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_initialize
+ * Initialize Titan-V hardware.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
+{
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
+                               hldev->func_id)) {
+               /* Validate the pci-e link width and speed */
+               status = __vxge_hw_verify_pci_e_info(hldev);
+               if (status != VXGE_HW_OK)
+                       goto exit;
+       }
+
+exit:
+       return status;
+}
+
+/*
+ * __vxge_hw_vpath_fw_ver_get - Get the fw version
+ * Returns FW Version
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
+                          struct vxge_hw_device_hw_info *hw_info)
+{
+       struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
+       struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
+       struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
+       struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
+       u64 data0, data1 = 0, steer_ctrl = 0;
+       enum vxge_hw_status status;
+
+       status = vxge_hw_vpath_fw_api(vpath,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+                       0, &data0, &data1, &steer_ctrl);
+       if (status != VXGE_HW_OK)
+               goto exit;
+
+       fw_date->day =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
+       fw_date->month =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
+       fw_date->year =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
+
+       snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
+                fw_date->month, fw_date->day, fw_date->year);
+
+       fw_version->major =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
+       fw_version->minor =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
+       fw_version->build =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
+
+       snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
+                fw_version->major, fw_version->minor, fw_version->build);
+
+       flash_date->day =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
+       flash_date->month =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
+       flash_date->year =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
+
+       snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
+                flash_date->month, flash_date->day, flash_date->year);
+
+       flash_version->major =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
+       flash_version->minor =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
+       flash_version->build =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
+
+       snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
+                flash_version->major, flash_version->minor,
+                flash_version->build);
+
+exit:
+       return status;
+}
+
+/*
+ * __vxge_hw_vpath_card_info_get - Get the serial numbers,
+ * part number and product description.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
+                             struct vxge_hw_device_hw_info *hw_info)
+{
+       enum vxge_hw_status status;
+       u64 data0, data1 = 0, steer_ctrl = 0;
+       u8 *serial_number = hw_info->serial_number;
+       u8 *part_number = hw_info->part_number;
+       u8 *product_desc = hw_info->product_desc;
+       u32 i, j = 0;
+
+       data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
+
+       status = vxge_hw_vpath_fw_api(vpath,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+                       0, &data0, &data1, &steer_ctrl);
+       if (status != VXGE_HW_OK)
+               return status;
+
+       ((u64 *)serial_number)[0] = be64_to_cpu(data0);
+       ((u64 *)serial_number)[1] = be64_to_cpu(data1);
+
+       data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
+       data1 = steer_ctrl = 0;
+
+       status = vxge_hw_vpath_fw_api(vpath,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+                       0, &data0, &data1, &steer_ctrl);
+       if (status != VXGE_HW_OK)
+               return status;
+
+       ((u64 *)part_number)[0] = be64_to_cpu(data0);
+       ((u64 *)part_number)[1] = be64_to_cpu(data1);
+
+       for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
+            i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
+               data0 = i;
+               data1 = steer_ctrl = 0;
+
+               status = vxge_hw_vpath_fw_api(vpath,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+                       0, &data0, &data1, &steer_ctrl);
+               if (status != VXGE_HW_OK)
+                       return status;
+
+               ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
+               ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
+       }
+
+       return status;
+}
+
+/*
+ * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
+ * Returns pci function mode
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
+                                 struct vxge_hw_device_hw_info *hw_info)
+{
+       u64 data0, data1 = 0, steer_ctrl = 0;
+       enum vxge_hw_status status;
+
+       data0 = 0;
+
+       status = vxge_hw_vpath_fw_api(vpath,
+                       VXGE_HW_FW_API_GET_FUNC_MODE,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+                       0, &data0, &data1, &steer_ctrl);
+       if (status != VXGE_HW_OK)
+               return status;
+
+       hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
+       return status;
 }
 
 /*
- * __vxge_hw_device_initialize
- * Initialize Titan-V hardware.
+ * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
+ *               from MAC address table.
  */
-enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
+static enum vxge_hw_status
+__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
+                        u8 *macaddr, u8 *macaddr_mask)
 {
-       enum vxge_hw_status status = VXGE_HW_OK;
+       u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
+           data0 = 0, data1 = 0, steer_ctrl = 0;
+       enum vxge_hw_status status;
+       int i;
 
-       if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
-                               hldev->func_id)) {
-               /* Validate the pci-e link width and speed */
-               status = __vxge_hw_verify_pci_e_info(hldev);
+       do {
+               status = vxge_hw_vpath_fw_api(vpath, action,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+                       0, &data0, &data1, &steer_ctrl);
                if (status != VXGE_HW_OK)
                        goto exit;
-       }
 
+               data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
+               data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
+                                                                       data1);
+
+               for (i = ETH_ALEN; i > 0; i--) {
+                       macaddr[i - 1] = (u8) (data0 & 0xFF);
+                       data0 >>= 8;
+
+                       macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
+                       data1 >>= 8;
+               }
+
+               action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
+               data0 = 0, data1 = 0, steer_ctrl = 0;
+
+       } while (!is_valid_ether_addr(macaddr));
 exit:
        return status;
 }
@@ -665,9 +1104,9 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
        struct vxge_hw_toc_reg __iomem *toc;
        struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
        struct vxge_hw_common_reg __iomem *common_reg;
-       struct vxge_hw_vpath_reg __iomem *vpath_reg;
        struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
        enum vxge_hw_status status;
+       struct __vxge_hw_virtualpath vpath;
 
        memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
 
@@ -702,7 +1141,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
                vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
                                (bar0 + val64);
 
-               hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
+               hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
                if (__vxge_hw_device_access_rights_get(hw_info->host_type,
                        hw_info->func_id) &
                        VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
@@ -718,16 +1157,19 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
 
                val64 = readq(&toc->toc_vpath_pointer[i]);
 
-               vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
+               vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
+                              (bar0 + val64);
+               vpath.vp_open = 0;
 
-               hw_info->function_mode =
-                       __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
+               status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
+               if (status != VXGE_HW_OK)
+                       goto exit;
 
-               status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
+               status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
                if (status != VXGE_HW_OK)
                        goto exit;
 
-               status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
+               status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
                if (status != VXGE_HW_OK)
                        goto exit;
 
@@ -735,14 +1177,15 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
        }
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
                if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
                        continue;
 
                val64 = readq(&toc->toc_vpath_pointer[i]);
-               vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
+               vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
+                              (bar0 + val64);
+               vpath.vp_open = 0;
 
-               status =  __vxge_hw_vpath_addr_get(i, vpath_reg,
+               status =  __vxge_hw_vpath_addr_get(&vpath,
                                hw_info->mac_addrs[i],
                                hw_info->mac_addr_masks[i]);
                if (status != VXGE_HW_OK)
@@ -806,7 +1249,6 @@ vxge_hw_device_initialize(
                vfree(hldev);
                goto exit;
        }
-       __vxge_hw_device_id_get(hldev);
 
        __vxge_hw_device_host_info_get(hldev);
 
@@ -814,7 +1256,6 @@ vxge_hw_device_initialize(
        nblocks++;
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
                if (!(hldev->vpath_assignments & vxge_mBIT(i)))
                        continue;
 
@@ -839,7 +1280,6 @@ vxge_hw_device_initialize(
        }
 
        status = __vxge_hw_device_initialize(hldev);
-
        if (status != VXGE_HW_OK) {
                vxge_hw_device_terminate(hldev);
                goto exit;
@@ -876,7 +1316,6 @@ vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
        enum vxge_hw_status status = VXGE_HW_OK;
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
                if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
                        (hldev->virtual_paths[i].vp_open ==
                                VXGE_HW_VP_NOT_OPEN))
@@ -1165,7 +1604,6 @@ exit:
  * It can be used to set or reset Pause frame generation or reception
  * support of the NIC.
  */
-
 enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
                                                 u32 port, u32 tx, u32 rx)
 {
@@ -1409,7 +1847,6 @@ exit:
 /*
  * __vxge_hw_ring_create - Create a Ring
  * This function creates Ring and initializes it.
- *
  */
 static enum vxge_hw_status
 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
@@ -1845,7 +2282,7 @@ static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
  * __vxge_hw_device_fifo_config_check - Check fifo configuration.
  * Check the fifo configuration
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
 {
        if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
@@ -1893,7 +2330,7 @@ __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
  * __vxge_hw_device_config_check - Check device configuration.
  * Check the device configuration
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
 {
        u32 i;
@@ -2453,7 +2890,7 @@ __vxge_hw_fifo_mempool_item_alloc(
  * __vxge_hw_fifo_create - Create a FIFO
  * This function creates FIFO and initializes it.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
                      struct vxge_hw_fifo_attr *attr)
 {
@@ -2516,454 +2953,164 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
         *
         * During "reserve" operations more memory can be allocated on demand
         * for example due to FIFO full condition.
-        *
-        * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
-        * routine which will essentially stop the channel and free resources.
-        */
-
-       /* TxDL common private size == TxDL private  +  driver private */
-       fifo->priv_size =
-               sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
-       fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
-                       VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
-
-       fifo->per_txdl_space = attr->per_txdl_space;
-
-       /* recompute txdl size to be cacheline aligned */
-       fifo->txdl_size = txdl_size;
-       fifo->txdl_per_memblock = txdl_per_memblock;
-
-       fifo->txdl_term = attr->txdl_term;
-       fifo->callback = attr->callback;
-
-       if (fifo->txdl_per_memblock == 0) {
-               __vxge_hw_fifo_delete(vp);
-               status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
-               goto exit;
-       }
-
-       fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
-
-       fifo->mempool =
-               __vxge_hw_mempool_create(vpath->hldev,
-                       fifo->config->memblock_size,
-                       fifo->txdl_size,
-                       fifo->priv_size,
-                       (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
-                       (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
-                       &fifo_mp_callback,
-                       fifo);
-
-       if (fifo->mempool == NULL) {
-               __vxge_hw_fifo_delete(vp);
-               status = VXGE_HW_ERR_OUT_OF_MEMORY;
-               goto exit;
-       }
-
-       status = __vxge_hw_channel_initialize(&fifo->channel);
-       if (status != VXGE_HW_OK) {
-               __vxge_hw_fifo_delete(vp);
-               goto exit;
-       }
-
-       vxge_assert(fifo->channel.reserve_ptr);
-exit:
-       return status;
-}
-
-/*
- * __vxge_hw_fifo_abort - Returns the TxD
- * This function terminates the TxDs of fifo
- */
-static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
-{
-       void *txdlh;
-
-       for (;;) {
-               vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
-
-               if (txdlh == NULL)
-                       break;
-
-               vxge_hw_channel_dtr_complete(&fifo->channel);
-
-               if (fifo->txdl_term) {
-                       fifo->txdl_term(txdlh,
-                       VXGE_HW_TXDL_STATE_POSTED,
-                       fifo->channel.userdata);
-               }
-
-               vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
-       }
-
-       return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_fifo_reset - Resets the fifo
- * This function resets the fifo during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       __vxge_hw_fifo_abort(fifo);
-       status = __vxge_hw_channel_reset(&fifo->channel);
-
-       return status;
-}
-
-/*
- * __vxge_hw_fifo_delete - Removes the FIFO
- * This function freeup the memory pool and removes the FIFO
- */
-enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
-{
-       struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
-
-       __vxge_hw_fifo_abort(fifo);
-
-       if (fifo->mempool)
-               __vxge_hw_mempool_destroy(fifo->mempool);
-
-       vp->vpath->fifoh = NULL;
-
-       __vxge_hw_channel_free(&fifo->channel);
-
-       return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_pci_read - Read the content of given address
- *                          in pci config space.
- * Read from the vpath pci config space.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
-                        u32 phy_func_0, u32 offset, u32 *val)
-{
-       u64 val64;
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
-
-       val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
-
-       if (phy_func_0)
-               val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
-
-       writeq(val64, &vp_reg->pci_config_access_cfg1);
-       wmb();
-       writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
-                       &vp_reg->pci_config_access_cfg2);
-       wmb();
-
-       status = __vxge_hw_device_register_poll(
-                       &vp_reg->pci_config_access_cfg2,
-                       VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
-       if (status != VXGE_HW_OK)
-               goto exit;
-
-       val64 = readq(&vp_reg->pci_config_access_status);
-
-       if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
-               status = VXGE_HW_FAIL;
-               *val = 0;
-       } else
-               *val = (u32)vxge_bVALn(val64, 32, 32);
-exit:
-       return status;
-}
-
-/*
- * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
- * Returns the function number of the vpath.
- */
-static u32
-__vxge_hw_vpath_func_id_get(u32 vp_id,
-       struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
-{
-       u64 val64;
-
-       val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
-
-       return
-        (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
-}
-
-/*
- * __vxge_hw_read_rts_ds - Program RTS steering critieria
- */
-static inline void
-__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
-                     u64 dta_struct_sel)
-{
-       writeq(0, &vpath_reg->rts_access_steer_ctrl);
-       wmb();
-       writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
-       writeq(0, &vpath_reg->rts_access_steer_data1);
-       wmb();
-}
-
-
-/*
- * __vxge_hw_vpath_card_info_get - Get the serial numbers,
- * part number and product description.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
-       u32 vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg,
-       struct vxge_hw_device_hw_info *hw_info)
-{
-       u32 i, j;
-       u64 val64;
-       u64 data1 = 0ULL;
-       u64 data2 = 0ULL;
-       enum vxge_hw_status status = VXGE_HW_OK;
-       u8 *serial_number = hw_info->serial_number;
-       u8 *part_number = hw_info->part_number;
-       u8 *product_desc = hw_info->product_desc;
-
-       __vxge_hw_read_rts_ds(vpath_reg,
-               VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
-
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vpath_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
-       if (status != VXGE_HW_OK)
-               return status;
-
-       val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
-       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-               data1 = readq(&vpath_reg->rts_access_steer_data0);
-               ((u64 *)serial_number)[0] = be64_to_cpu(data1);
-
-               data2 = readq(&vpath_reg->rts_access_steer_data1);
-               ((u64 *)serial_number)[1] = be64_to_cpu(data2);
-               status = VXGE_HW_OK;
-       } else
-               *serial_number = 0;
-
-       __vxge_hw_read_rts_ds(vpath_reg,
-                       VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
-
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vpath_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
-       if (status != VXGE_HW_OK)
-               return status;
-
-       val64 = readq(&vpath_reg->rts_access_steer_ctrl);
+        *
+        * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
+        * routine which will essentially stop the channel and free resources.
+        */
 
-       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+       /* TxDL common private size == TxDL private  +  driver private */
+       fifo->priv_size =
+               sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
+       fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
+                       VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
 
-               data1 = readq(&vpath_reg->rts_access_steer_data0);
-               ((u64 *)part_number)[0] = be64_to_cpu(data1);
+       fifo->per_txdl_space = attr->per_txdl_space;
 
-               data2 = readq(&vpath_reg->rts_access_steer_data1);
-               ((u64 *)part_number)[1] = be64_to_cpu(data2);
+       /* recompute txdl size to be cacheline aligned */
+       fifo->txdl_size = txdl_size;
+       fifo->txdl_per_memblock = txdl_per_memblock;
 
-               status = VXGE_HW_OK;
+       fifo->txdl_term = attr->txdl_term;
+       fifo->callback = attr->callback;
 
-       } else
-               *part_number = 0;
+       if (fifo->txdl_per_memblock == 0) {
+               __vxge_hw_fifo_delete(vp);
+               status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
+               goto exit;
+       }
 
-       j = 0;
+       fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
 
-       for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
-            i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
+       fifo->mempool =
+               __vxge_hw_mempool_create(vpath->hldev,
+                       fifo->config->memblock_size,
+                       fifo->txdl_size,
+                       fifo->priv_size,
+                       (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
+                       (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
+                       &fifo_mp_callback,
+                       fifo);
 
-               __vxge_hw_read_rts_ds(vpath_reg, i);
+       if (fifo->mempool == NULL) {
+               __vxge_hw_fifo_delete(vp);
+               status = VXGE_HW_ERR_OUT_OF_MEMORY;
+               goto exit;
+       }
 
-               val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
+       status = __vxge_hw_channel_initialize(&fifo->channel);
+       if (status != VXGE_HW_OK) {
+               __vxge_hw_fifo_delete(vp);
+               goto exit;
+       }
 
-               status = __vxge_hw_pio_mem_write64(val64,
-                               &vpath_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+       vxge_assert(fifo->channel.reserve_ptr);
+exit:
+       return status;
+}
 
-               if (status != VXGE_HW_OK)
-                       return status;
+/*
+ * __vxge_hw_fifo_abort - Returns the TxD
+ * This function terminates the TxDs of fifo
+ */
+static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
+{
+       void *txdlh;
 
-               val64 = readq(&vpath_reg->rts_access_steer_ctrl);
+       for (;;) {
+               vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
 
-               if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+               if (txdlh == NULL)
+                       break;
 
-                       data1 = readq(&vpath_reg->rts_access_steer_data0);
-                       ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
+               vxge_hw_channel_dtr_complete(&fifo->channel);
 
-                       data2 = readq(&vpath_reg->rts_access_steer_data1);
-                       ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
+               if (fifo->txdl_term) {
+                       fifo->txdl_term(txdlh,
+                       VXGE_HW_TXDL_STATE_POSTED,
+                       fifo->channel.userdata);
+               }
 
-                       status = VXGE_HW_OK;
-               } else
-                       *product_desc = 0;
+               vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
        }
 
-       return status;
+       return VXGE_HW_OK;
 }
 
 /*
- * __vxge_hw_vpath_fw_ver_get - Get the fw version
- * Returns FW Version
+ * __vxge_hw_fifo_reset - Resets the fifo
+ * This function resets the fifo during vpath reset operation
  */
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(
-       u32 vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg,
-       struct vxge_hw_device_hw_info *hw_info)
+static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
 {
-       u64 val64;
-       u64 data1 = 0ULL;
-       u64 data2 = 0ULL;
-       struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
-       struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
-       struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
-       struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
        enum vxge_hw_status status = VXGE_HW_OK;
 
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
+       __vxge_hw_fifo_abort(fifo);
+       status = __vxge_hw_channel_reset(&fifo->channel);
 
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vpath_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+       return status;
+}
 
-       if (status != VXGE_HW_OK)
-               goto exit;
+/*
+ * __vxge_hw_fifo_delete - Removes the FIFO
+ * This function freeup the memory pool and removes the FIFO
+ */
+static enum vxge_hw_status
+__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
+{
+       struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
 
-       val64 = readq(&vpath_reg->rts_access_steer_ctrl);
+       __vxge_hw_fifo_abort(fifo);
 
-       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+       if (fifo->mempool)
+               __vxge_hw_mempool_destroy(fifo->mempool);
 
-               data1 = readq(&vpath_reg->rts_access_steer_data0);
-               data2 = readq(&vpath_reg->rts_access_steer_data1);
-
-               fw_date->day =
-                       (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
-                                               data1);
-               fw_date->month =
-                       (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
-                                               data1);
-               fw_date->year =
-                       (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
-                                               data1);
-
-               snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
-                       fw_date->month, fw_date->day, fw_date->year);
-
-               fw_version->major =
-                   (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
-               fw_version->minor =
-                   (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
-               fw_version->build =
-                   (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
-
-               snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
-                   fw_version->major, fw_version->minor, fw_version->build);
-
-               flash_date->day =
-                 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
-               flash_date->month =
-                (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
-               flash_date->year =
-                (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
-
-               snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
-                       "%2.2d/%2.2d/%4.4d",
-                       flash_date->month, flash_date->day, flash_date->year);
-
-               flash_version->major =
-                (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
-               flash_version->minor =
-                (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
-               flash_version->build =
-                (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
-
-               snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
-                       flash_version->major, flash_version->minor,
-                       flash_version->build);
+       vp->vpath->fifoh = NULL;
 
-               status = VXGE_HW_OK;
+       __vxge_hw_channel_free(&fifo->channel);
 
-       } else
-               status = VXGE_HW_FAIL;
-exit:
-       return status;
+       return VXGE_HW_OK;
 }
 
 /*
- * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
- * Returns pci function mode
+ * __vxge_hw_vpath_pci_read - Read the content of given address
+ *                          in pci config space.
+ * Read from the vpath pci config space.
  */
-static u64
-__vxge_hw_vpath_pci_func_mode_get(
-       u32  vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg)
+static enum vxge_hw_status
+__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
+                        u32 phy_func_0, u32 offset, u32 *val)
 {
        u64 val64;
-       u64 data1 = 0ULL;
        enum vxge_hw_status status = VXGE_HW_OK;
+       struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
 
-       __vxge_hw_read_rts_ds(vpath_reg,
-               VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
+       val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
 
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
+       if (phy_func_0)
+               val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
 
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vpath_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+       writeq(val64, &vp_reg->pci_config_access_cfg1);
+       wmb();
+       writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
+                       &vp_reg->pci_config_access_cfg2);
+       wmb();
+
+       status = __vxge_hw_device_register_poll(
+                       &vp_reg->pci_config_access_cfg2,
+                       VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
 
        if (status != VXGE_HW_OK)
                goto exit;
 
-       val64 = readq(&vpath_reg->rts_access_steer_ctrl);
+       val64 = readq(&vp_reg->pci_config_access_status);
 
-       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-               data1 = readq(&vpath_reg->rts_access_steer_data0);
-               status = VXGE_HW_OK;
-       } else {
-               data1 = 0;
+       if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
                status = VXGE_HW_FAIL;
-       }
+               *val = 0;
+       } else
+               *val = (u32)vxge_bVALn(val64, 32, 32);
 exit:
-       return data1;
+       return status;
 }
 
 /**
@@ -2974,37 +3121,24 @@ exit:
  * Flicker the link LED.
  */
 enum vxge_hw_status
-vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
-                              u64 on_off)
+vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
 {
-       u64 val64;
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct vxge_hw_vpath_reg __iomem *vp_reg;
+       struct __vxge_hw_virtualpath *vpath;
+       u64 data0, data1 = 0, steer_ctrl = 0;
+       enum vxge_hw_status status;
 
        if (hldev == NULL) {
                status = VXGE_HW_ERR_INVALID_DEVICE;
                goto exit;
        }
 
-       vp_reg = hldev->vpath_reg[hldev->first_vp_id];
-
-       writeq(0, &vp_reg->rts_access_steer_ctrl);
-       wmb();
-       writeq(on_off, &vp_reg->rts_access_steer_data0);
-       writeq(0, &vp_reg->rts_access_steer_data1);
-       wmb();
-
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
+       vpath = &hldev->virtual_paths[hldev->first_vp_id];
 
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vp_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+       data0 = on_off;
+       status = vxge_hw_vpath_fw_api(vpath,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+                       0, &data0, &data1, &steer_ctrl);
 exit:
        return status;
 }
@@ -3013,63 +3147,38 @@ exit:
  * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
  */
 enum vxge_hw_status
-__vxge_hw_vpath_rts_table_get(
-       struct __vxge_hw_vpath_handle *vp,
-       u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
+__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
+                             u32 action, u32 rts_table, u32 offset,
+                             u64 *data0, u64 *data1)
 {
-       u64 val64;
-       struct __vxge_hw_virtualpath *vpath;
-       struct vxge_hw_vpath_reg __iomem *vp_reg;
-
-       enum vxge_hw_status status = VXGE_HW_OK;
+       enum vxge_hw_status status;
+       u64 steer_ctrl = 0;
 
        if (vp == NULL) {
                status = VXGE_HW_ERR_INVALID_HANDLE;
                goto exit;
        }
 
-       vpath = vp->vpath;
-       vp_reg = vpath->vp_reg;
-
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
-
        if ((rts_table ==
-               VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
+            VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
            (rts_table ==
-               VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
+            VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
            (rts_table ==
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
+            VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
            (rts_table ==
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
-               val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
+            VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
+               steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
        }
 
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vp_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               vpath->hldev->config.device_poll_millis);
-
+       status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
+                                     data0, data1, &steer_ctrl);
        if (status != VXGE_HW_OK)
                goto exit;
 
-       val64 = readq(&vp_reg->rts_access_steer_ctrl);
-
-       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
-               *data1 = readq(&vp_reg->rts_access_steer_data0);
-
-               if ((rts_table ==
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
-               (rts_table ==
-               VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
-                       *data2 = readq(&vp_reg->rts_access_steer_data1);
-               }
-               status = VXGE_HW_OK;
-       } else
-               status = VXGE_HW_FAIL;
+       if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
+           (rts_table !=
+            VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
+               *data1 = 0;
 exit:
        return status;
 }
@@ -3078,107 +3187,27 @@ exit:
  * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
  */
 enum vxge_hw_status
-__vxge_hw_vpath_rts_table_set(
-       struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
-       u32 offset, u64 data1, u64 data2)
+__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
+                             u32 rts_table, u32 offset, u64 steer_data0,
+                             u64 steer_data1)
 {
-       u64 val64;
-       struct __vxge_hw_virtualpath *vpath;
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct vxge_hw_vpath_reg __iomem *vp_reg;
+       u64 data0, data1 = 0, steer_ctrl = 0;
+       enum vxge_hw_status status;
 
        if (vp == NULL) {
                status = VXGE_HW_ERR_INVALID_HANDLE;
                goto exit;
        }
 
-       vpath = vp->vpath;
-       vp_reg = vpath->vp_reg;
-
-       writeq(data1, &vp_reg->rts_access_steer_data0);
-       wmb();
+       data0 = steer_data0;
 
        if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
            (rts_table ==
-               VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
-               writeq(data2, &vp_reg->rts_access_steer_data1);
-               wmb();
-       }
-
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
-
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vp_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               vpath->hldev->config.device_poll_millis);
-
-       if (status != VXGE_HW_OK)
-               goto exit;
-
-       val64 = readq(&vp_reg->rts_access_steer_ctrl);
-
-       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
-               status = VXGE_HW_OK;
-       else
-               status = VXGE_HW_FAIL;
-exit:
-       return status;
-}
-
-/*
- * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
- *               from MAC address table.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(
-       u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
-       u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
-{
-       u32 i;
-       u64 val64;
-       u64 data1 = 0ULL;
-       u64 data2 = 0ULL;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vpath_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
-       if (status != VXGE_HW_OK)
-               goto exit;
-
-       val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
-       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
-               data1 = readq(&vpath_reg->rts_access_steer_data0);
-               data2 = readq(&vpath_reg->rts_access_steer_data1);
-
-               data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
-               data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
-                                                       data2);
-
-               for (i = ETH_ALEN; i > 0; i--) {
-                       macaddr[i-1] = (u8)(data1 & 0xFF);
-                       data1 >>= 8;
+            VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
+               data1 = steer_data1;
 
-                       macaddr_mask[i-1] = (u8)(data2 & 0xFF);
-                       data2 >>= 8;
-               }
-               status = VXGE_HW_OK;
-       } else
-               status = VXGE_HW_FAIL;
+       status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
+                                     &data0, &data1, &steer_ctrl);
 exit:
        return status;
 }
@@ -3204,6 +3233,8 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
                     VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
                     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
                        0, &data0, &data1);
+       if (status != VXGE_HW_OK)
+               goto exit;
 
        data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
                        VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
@@ -4117,6 +4148,7 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
 
        vpath = &hldev->virtual_paths[vp_id];
 
+       spin_lock_init(&hldev->virtual_paths[vp_id].lock);
        vpath->vp_id = vp_id;
        vpath->vp_open = VXGE_HW_VP_OPEN;
        vpath->hldev = hldev;
@@ -4127,14 +4159,12 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
        __vxge_hw_vpath_reset(hldev, vp_id);
 
        status = __vxge_hw_vpath_reset_check(vpath);
-
        if (status != VXGE_HW_OK) {
                memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
                goto exit;
        }
 
        status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
-
        if (status != VXGE_HW_OK) {
                memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
                goto exit;
@@ -4148,7 +4178,6 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
                hldev->tim_int_mask1, vp_id);
 
        status = __vxge_hw_vpath_initialize(hldev, vp_id);
-
        if (status != VXGE_HW_OK)
                __vxge_hw_vp_terminate(hldev, vp_id);
 exit:
@@ -4335,16 +4364,18 @@ vpath_open_exit1:
 void
 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
 {
-       struct __vxge_hw_virtualpath *vpath = NULL;
+       struct __vxge_hw_virtualpath *vpath = vp->vpath;
+       struct __vxge_hw_ring *ring = vpath->ringh;
+       struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
        u64 new_count, val64, val164;
-       struct __vxge_hw_ring *ring;
 
-       vpath = vp->vpath;
-       ring = vpath->ringh;
+       if (vdev->titan1) {
+               new_count = readq(&vpath->vp_reg->rxdmem_size);
+               new_count &= 0x1fff;
+       } else
+               new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
 
-       new_count = readq(&vpath->vp_reg->rxdmem_size);
-       new_count &= 0x1fff;
-       val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
+       val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
 
        writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
                &vpath->vp_reg->prc_rxd_doorbell);
@@ -4414,7 +4445,9 @@ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
 
        __vxge_hw_vp_terminate(devh, vp_id);
 
+       spin_lock(&vpath->lock);
        vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
+       spin_unlock(&vpath->lock);
 
 vpath_close_exit:
        return status;
@@ -4810,7 +4843,7 @@ static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
  * __vxge_hw_blockpool_create - Create block pool
  */
 
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
                           struct __vxge_hw_blockpool *blockpool,
                           u32 pool_size,
@@ -4910,7 +4943,7 @@ blockpool_create_exit:
  * __vxge_hw_blockpool_destroy - Deallocates the block pool
  */
 
-void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
+static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
 {
 
        struct __vxge_hw_device *hldev;
@@ -5076,7 +5109,7 @@ exit:
  * Allocates a block of memory of given size, either from block pool
  * or by calling vxge_os_dma_malloc()
  */
-void *
+static void *
 __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
                                struct vxge_hw_mempool_dma *dma_object)
 {
@@ -5140,7 +5173,7 @@ exit:
  * __vxge_hw_blockpool_free - Frees the memory allcoated with
                                __vxge_hw_blockpool_malloc
  */
-void
+static void
 __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
                        void *memblock, u32 size,
                        struct vxge_hw_mempool_dma *dma_object)
@@ -5192,7 +5225,7 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
  * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
  * This function allocates a block from block pool or from the system
  */
-struct __vxge_hw_blockpool_entry *
+static struct __vxge_hw_blockpool_entry *
 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
 {
        struct __vxge_hw_blockpool_entry *entry = NULL;
@@ -5227,7 +5260,7 @@ __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
  *
  * This function frees a block from block pool
  */
-void
+static void
 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
                        struct __vxge_hw_blockpool_entry *entry)
 {
index 5c00861b6c2c08351f1f52023f4037dfaad45236..5b2c8313426d3c7d7d051f4f51f020746d2af936 100644 (file)
 #define VXGE_CACHE_LINE_SIZE 128
 #endif
 
-#define vxge_os_vaprintf(level, mask, fmt, ...) { \
-       char buff[255]; \
-               snprintf(buff, 255, fmt, __VA_ARGS__); \
-               printk(buff); \
-               printk("\n"); \
-}
-
 #ifndef VXGE_ALIGN
 #define VXGE_ALIGN(adrs, size) \
        (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
 #define VXGE_HW_MAX_MTU                                9600
 #define VXGE_HW_DEFAULT_MTU                    1500
 
-#ifdef VXGE_DEBUG_ASSERT
+#define VXGE_HW_MAX_ROM_IMAGES                 8
+
+struct eprom_image {
+       u8 is_valid:1;
+       u8 index;
+       u8 type;
+       u16 version;
+};
 
+#ifdef VXGE_DEBUG_ASSERT
 /**
  * vxge_assert
  * @test: C-condition to check
  * compilation
  * time.
  */
-#define vxge_assert(test) { \
-       if (!(test)) \
-               vxge_os_bug("bad cond: "#test" at %s:%d\n", \
-                               __FILE__, __LINE__); }
+#define vxge_assert(test) BUG_ON(!(test))
 #else
 #define vxge_assert(test)
 #endif /* end of VXGE_DEBUG_ASSERT */
 
 /**
- * enum enum vxge_debug_level
+ * enum vxge_debug_level
  * @VXGE_NONE: debug disabled
  * @VXGE_ERR: all errors going to be logged out
  * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
@@ -158,6 +156,47 @@ enum vxge_hw_device_link_state {
        VXGE_HW_LINK_UP
 };
 
+/**
+ * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
+ * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
+ * @VXGE_HW_FW_UPGRADE_DONE:  upload completed
+ * @VXGE_HW_FW_UPGRADE_ERR:  upload error
+ * @VXGE_FW_UPGRADE_BYTES2SKIP:  skip bytes in the stream
+ *
+ */
+enum vxge_hw_fw_upgrade_code {
+       VXGE_HW_FW_UPGRADE_OK           = 0,
+       VXGE_HW_FW_UPGRADE_DONE         = 1,
+       VXGE_HW_FW_UPGRADE_ERR          = 2,
+       VXGE_FW_UPGRADE_BYTES2SKIP      = 3
+};
+
+/**
+ * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
+ * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
+ * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
+ * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
+ */
+enum vxge_hw_fw_upgrade_err_code {
+       VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1           = 1,
+       VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW          = 2,
+       VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3           = 3,
+       VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4           = 4,
+       VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5           = 5,
+       VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6           = 6,
+       VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7           = 7,
+       VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8           = 8,
+       VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN    = 9,
+       VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH          = 10
+};
+
 /**
  * struct vxge_hw_device_date - Date Format
  * @day: Day
@@ -465,7 +504,6 @@ struct vxge_hw_device_config {
  * See also: vxge_hw_driver_initialize().
  */
 struct vxge_hw_uld_cbs {
-
        void (*link_up)(struct __vxge_hw_device *devh);
        void (*link_down)(struct __vxge_hw_device *devh);
        void (*crit_err)(struct __vxge_hw_device *devh,
@@ -652,6 +690,7 @@ struct __vxge_hw_virtualpath {
        struct vxge_hw_vpath_stats_hw_info      *hw_stats;
        struct vxge_hw_vpath_stats_hw_info      *hw_stats_sav;
        struct vxge_hw_vpath_stats_sw_info      *sw_stats;
+       spinlock_t lock;
 };
 
 /*
@@ -674,9 +713,6 @@ struct __vxge_hw_vpath_handle{
 /**
  * struct __vxge_hw_device  - Hal device object
  * @magic: Magic Number
- * @device_id: PCI Device Id of the adapter
- * @major_revision: PCI Device major revision
- * @minor_revision: PCI Device minor revision
  * @bar0: BAR0 virtual address.
  * @pdev: Physical device handle
  * @config: Confguration passed by the LL driver at initialization
@@ -688,9 +724,6 @@ struct __vxge_hw_device {
        u32                             magic;
 #define VXGE_HW_DEVICE_MAGIC           0x12345678
 #define VXGE_HW_DEVICE_DEAD            0xDEADDEAD
-       u16                             device_id;
-       u8                              major_revision;
-       u8                              minor_revision;
        void __iomem                    *bar0;
        struct pci_dev                  *pdev;
        struct net_device               *ndev;
@@ -731,6 +764,7 @@ struct __vxge_hw_device {
        u32                             debug_level;
        u32                             level_err;
        u32                             level_trace;
+       u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
 };
 
 #define VXGE_HW_INFO_LEN       64
@@ -1413,12 +1447,12 @@ enum vxge_hw_rth_algoritms {
  * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
  */
 struct vxge_hw_rth_hash_types {
-       u8 hash_type_tcpipv4_en;
-       u8 hash_type_ipv4_en;
-       u8 hash_type_tcpipv6_en;
-       u8 hash_type_ipv6_en;
-       u8 hash_type_tcpipv6ex_en;
-       u8 hash_type_ipv6ex_en;
+       u8 hash_type_tcpipv4_en:1,
+          hash_type_ipv4_en:1,
+          hash_type_tcpipv6_en:1,
+          hash_type_ipv6_en:1,
+          hash_type_tcpipv6ex_en:1,
+          hash_type_ipv6ex_en:1;
 };
 
 void vxge_hw_device_debug_set(
@@ -2000,7 +2034,7 @@ enum vxge_hw_status
 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
 
 /**
- * vxge_debug
+ * vxge_debug_ll
  * @level: level of debug verbosity.
  * @mask: mask for the debug
  * @buf: Circular buffer for tracing
@@ -2012,26 +2046,13 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
  * may be compiled out if DEBUG macro was never defined.
  * See also: enum vxge_debug_level{}.
  */
-
-#define vxge_trace_aux(level, mask, fmt, ...) \
-{\
-               vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\
-}
-
-#define vxge_debug(module, level, mask, fmt, ...) { \
-if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \
-       (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\
-       if ((mask & VXGE_DEBUG_MASK) == mask)\
-               vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \
-} \
-}
-
 #if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
-#define vxge_debug_ll(level, mask, fmt, ...) \
-{\
-       vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\
-}
-
+#define vxge_debug_ll(level, mask, fmt, ...) do {                             \
+       if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) ||  \
+           (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
+               if ((mask & VXGE_DEBUG_MASK) == mask)                          \
+                       printk(fmt "\n", __VA_ARGS__);                         \
+} while (0)
 #else
 #define vxge_debug_ll(level, mask, fmt, ...)
 #endif
@@ -2051,4 +2072,26 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
 
 enum vxge_hw_status
 __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
+
+#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
+#define VXGE_HW_MAX_POLLING_COUNT 100
+
+void
+vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
+                            u32 *minor, u32 *build);
+
+enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
+                    int size);
+
+enum vxge_hw_status
+vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
+                               struct eprom_image *eprom_image_data);
+
+int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
 #endif
index b67746eef923e2cdeb9886130796e375d3b34818..09f721e10517c9dfa0eedd5c37408ef783ae450b 100644 (file)
@@ -11,7 +11,7 @@
  *                 Virtualized Server Adapter.
  * Copyright(c) 2002-2010 Exar Corp.
  ******************************************************************************/
-#include<linux/ethtool.h>
+#include <linux/ethtool.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/etherdevice.h>
@@ -29,7 +29,6 @@
  * Return value:
  * 0 on success.
  */
-
 static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
 {
        /* We currently only support 10Gb/FULL */
@@ -79,10 +78,9 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
  * Returns driver specefic information like name, version etc.. to ethtool.
  */
 static void vxge_ethtool_gdrvinfo(struct net_device *dev,
-                       struct ethtool_drvinfo *info)
+                                 struct ethtool_drvinfo *info)
 {
-       struct vxgedev *vdev;
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
        strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
        strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
        strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
@@ -104,15 +102,14 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
  * buffer area.
  */
 static void vxge_ethtool_gregs(struct net_device *dev,
-                       struct ethtool_regs *regs, void *space)
+                              struct ethtool_regs *regs, void *space)
 {
        int index, offset;
        enum vxge_hw_status status;
        u64 reg;
-       u64 *reg_space = (u64 *) space;
+       u64 *reg_space = (u64 *)space;
        struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-       struct __vxge_hw_device  *hldev = (struct __vxge_hw_device *)
-                                       pci_get_drvdata(vdev->pdev);
+       struct __vxge_hw_device *hldev = vdev->devh;
 
        regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
        regs->version = vdev->pdev->subsystem_device;
@@ -148,8 +145,7 @@ static void vxge_ethtool_gregs(struct net_device *dev,
 static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
 {
        struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-       struct __vxge_hw_device  *hldev = (struct __vxge_hw_device  *)
-                       pci_get_drvdata(vdev->pdev);
+       struct __vxge_hw_device *hldev = vdev->devh;
 
        vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
        msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME);
@@ -168,11 +164,10 @@ static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
  *  void
  */
 static void vxge_ethtool_getpause_data(struct net_device *dev,
-                                       struct ethtool_pauseparam *ep)
+                                      struct ethtool_pauseparam *ep)
 {
        struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-       struct __vxge_hw_device  *hldev = (struct __vxge_hw_device  *)
-                       pci_get_drvdata(vdev->pdev);
+       struct __vxge_hw_device *hldev = vdev->devh;
 
        vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
 }
@@ -188,11 +183,10 @@ static void vxge_ethtool_getpause_data(struct net_device *dev,
  * int, returns 0 on Success
  */
 static int vxge_ethtool_setpause_data(struct net_device *dev,
-                                       struct ethtool_pauseparam *ep)
+                                     struct ethtool_pauseparam *ep)
 {
        struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-       struct __vxge_hw_device  *hldev = (struct __vxge_hw_device  *)
-                       pci_get_drvdata(vdev->pdev);
+       struct __vxge_hw_device *hldev = vdev->devh;
 
        vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
 
@@ -209,9 +203,8 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
        enum vxge_hw_status status;
        enum vxge_hw_status swstatus;
        struct vxge_vpath *vpath = NULL;
-
        struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-       struct __vxge_hw_device  *hldev = vdev->devh;
+       struct __vxge_hw_device *hldev = vdev->devh;
        struct vxge_hw_xmac_stats *xmac_stats;
        struct vxge_hw_device_stats_sw_info *sw_stats;
        struct vxge_hw_device_stats_hw_info *hw_stats;
@@ -574,8 +567,8 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
        kfree(hw_stats);
 }
 
-static void vxge_ethtool_get_strings(struct net_device *dev,
-                             u32 stringset, u8 *data)
+static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
+                                    u8 *data)
 {
        int stat_size = 0;
        int i, j;
@@ -1119,6 +1112,59 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
        }
 }
 
+static int vxge_set_flags(struct net_device *dev, u32 data)
+{
+       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+       enum vxge_hw_status status;
+
+       if (data & ~ETH_FLAG_RXHASH)
+               return -EOPNOTSUPP;
+
+       if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
+               return 0;
+
+       if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
+               return -EINVAL;
+
+       vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
+
+       /* Enabling RTH requires some of the logic in vxge_device_register and a
+        * vpath reset.  Due to these restrictions, only allow modification
+        * while the interface is down.
+        */
+       status = vxge_reset_all_vpaths(vdev);
+       if (status != VXGE_HW_OK) {
+               vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
+               return -EFAULT;
+       }
+
+       if (vdev->devh->config.rth_en)
+               dev->features |= NETIF_F_RXHASH;
+       else
+               dev->features &= ~NETIF_F_RXHASH;
+
+       return 0;
+}
+
+static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
+{
+       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+
+       if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
+               printk(KERN_INFO "Single Function Mode is required to flash the"
+                      " firmware\n");
+               return -EINVAL;
+       }
+
+       if (netif_running(dev)) {
+               printk(KERN_INFO "Interface %s must be down to flash the "
+                      "firmware\n", dev->name);
+               return -EBUSY;
+       }
+
+       return vxge_fw_upgrade(vdev, parms->data, 1);
+}
+
 static const struct ethtool_ops vxge_ethtool_ops = {
        .get_settings           = vxge_ethtool_gset,
        .set_settings           = vxge_ethtool_sset,
@@ -1140,6 +1186,8 @@ static const struct ethtool_ops vxge_ethtool_ops = {
        .phys_id                = vxge_ethtool_idnic,
        .get_sset_count         = vxge_ethtool_get_sset_count,
        .get_ethtool_stats      = vxge_get_ethtool_stats,
+       .set_flags              = vxge_set_flags,
+       .flash_device           = vxge_fw_flash,
 };
 
 void vxge_initialize_ethtool_ops(struct net_device *ndev)
index 813829f3d0242be97ff22f97bf1724b7db3c8734..3f2d6ed13d3ef9fc926d7603429416f626e34c78 100644 (file)
@@ -50,6 +50,8 @@
 #include <net/ip.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/net_tstamp.h>
 #include "vxge-main.h"
 #include "vxge-reg.h"
 
@@ -90,7 +92,6 @@ static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
 static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
 static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
 static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
 
 static inline int is_vxge_card_up(struct vxgedev *vdev)
 {
@@ -369,7 +370,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
                 u8 t_code, void *userdata)
 {
        struct vxge_ring *ring = (struct vxge_ring *)userdata;
-       struct  net_device *dev = ring->ndev;
+       struct net_device *dev = ring->ndev;
        unsigned int dma_sizes;
        void *first_dtr = NULL;
        int dtr_cnt = 0;
@@ -513,6 +514,23 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
                else
                        skb_checksum_none_assert(skb);
 
+
+               if (ring->rx_hwts) {
+                       struct skb_shared_hwtstamps *skb_hwts;
+                       u32 ns = *(u32 *)(skb->head + pkt_length);
+
+                       skb_hwts = skb_hwtstamps(skb);
+                       skb_hwts->hwtstamp = ns_to_ktime(ns);
+                       skb_hwts->syststamp.tv64 = 0;
+               }
+
+               /* rth_hash_type and rth_it_hit are non-zero regardless of
+                * whether rss is enabled.  Only the rth_value is zero/non-zero
+                * if rss is disabled/enabled, so key off of that.
+                */
+               if (ext_info.rth_value)
+                       skb->rxhash = ext_info.rth_value;
+
                vxge_rx_complete(ring, skb, ext_info.vlan,
                        pkt_length, &ext_info);
 
@@ -670,7 +688,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
        struct vxge_vpath *vpath = NULL;
        struct __vxge_hw_device *hldev;
 
-       hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+       hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
 
        mac_address = (u8 *)&mac_addr;
        memcpy(mac_address, mac_header, ETH_ALEN);
@@ -1094,7 +1112,7 @@ static void vxge_set_multicast(struct net_device *dev)
                /* Delete previous MC's */
                for (i = 0; i < mcast_cnt; i++) {
                        list_for_each_safe(entry, next, list_head) {
-                               mac_entry = (struct vxge_mac_addrs *) entry;
+                               mac_entry = (struct vxge_mac_addrs *)entry;
                                /* Copy the mac address to delete */
                                mac_address = (u8 *)&mac_entry->macaddr;
                                memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1137,7 +1155,7 @@ _set_all_mcast:
                /* Delete previous MC's */
                for (i = 0; i < mcast_cnt; i++) {
                        list_for_each_safe(entry, next, list_head) {
-                               mac_entry = (struct vxge_mac_addrs *) entry;
+                               mac_entry = (struct vxge_mac_addrs *)entry;
                                /* Copy the mac address to delete */
                                mac_address = (u8 *)&mac_entry->macaddr;
                                memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1184,7 +1202,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
 {
        struct sockaddr *addr = p;
        struct vxgedev *vdev;
-       struct __vxge_hw_device  *hldev;
+       struct __vxge_hw_device *hldev;
        enum vxge_hw_status status = VXGE_HW_OK;
        struct macInfo mac_info_new, mac_info_old;
        int vpath_idx = 0;
@@ -1292,8 +1310,13 @@ static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
 static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
 {
        struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
+       struct __vxge_hw_device *hldev;
        int msix_id;
 
+       hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
+
+       vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
+
        vxge_hw_vpath_intr_disable(vpath->handle);
 
        if (vdev->config.intr_type == INTA)
@@ -1423,6 +1446,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
        }
 
        if (event == VXGE_LL_FULL_RESET) {
+               vxge_hw_device_wait_receive_idle(vdev->devh);
                vxge_hw_device_intr_disable(vdev->devh);
 
                switch (vdev->cric_err_event) {
@@ -1608,7 +1632,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
        int budget_org = budget;
        struct vxge_ring *ring;
 
-       struct __vxge_hw_device  *hldev = (struct __vxge_hw_device *)
+       struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
                pci_get_drvdata(vdev->pdev);
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
@@ -1645,7 +1669,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
  */
 static void vxge_netpoll(struct net_device *dev)
 {
-       struct __vxge_hw_device  *hldev;
+       struct __vxge_hw_device *hldev;
        struct vxgedev *vdev;
 
        vdev = (struct vxgedev *)netdev_priv(dev);
@@ -1689,15 +1713,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
                mtable[index] = index % vdev->no_of_vpath;
        }
 
-       /* Fill RTH hash types */
-       hash_types.hash_type_tcpipv4_en   = vdev->config.rth_hash_type_tcpipv4;
-       hash_types.hash_type_ipv4_en      = vdev->config.rth_hash_type_ipv4;
-       hash_types.hash_type_tcpipv6_en   = vdev->config.rth_hash_type_tcpipv6;
-       hash_types.hash_type_ipv6_en      = vdev->config.rth_hash_type_ipv6;
-       hash_types.hash_type_tcpipv6ex_en =
-                                       vdev->config.rth_hash_type_tcpipv6ex;
-       hash_types.hash_type_ipv6ex_en    = vdev->config.rth_hash_type_ipv6ex;
-
        /* set indirection table, bucket-to-vpath mapping */
        status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
                                                vdev->no_of_vpath,
@@ -1710,12 +1725,21 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
                return status;
        }
 
+       /* Fill RTH hash types */
+       hash_types.hash_type_tcpipv4_en   = vdev->config.rth_hash_type_tcpipv4;
+       hash_types.hash_type_ipv4_en      = vdev->config.rth_hash_type_ipv4;
+       hash_types.hash_type_tcpipv6_en   = vdev->config.rth_hash_type_tcpipv6;
+       hash_types.hash_type_ipv6_en      = vdev->config.rth_hash_type_ipv6;
+       hash_types.hash_type_tcpipv6ex_en =
+                                       vdev->config.rth_hash_type_tcpipv6ex;
+       hash_types.hash_type_ipv6ex_en    = vdev->config.rth_hash_type_ipv6ex;
+
        /*
-       * Because the itable_set() method uses the active_table field
-       * for the target virtual path the RTH config should be updated
-       * for all VPATHs. The h/w only uses the lowest numbered VPATH
-       * when steering frames.
-       */
+        * Because the itable_set() method uses the active_table field
+        * for the target virtual path the RTH config should be updated
+        * for all VPATHs. The h/w only uses the lowest numbered VPATH
+        * when steering frames.
+        */
         for (index = 0; index < vdev->no_of_vpath; index++) {
                status = vxge_hw_vpath_rts_rth_set(
                                vdev->vpaths[index].handle,
@@ -1797,7 +1821,7 @@ static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
 {
        struct list_head *entry, *next;
        u64 del_mac = 0;
-       u8 *mac_address = (u8 *) (&del_mac);
+       u8 *mac_address = (u8 *)(&del_mac);
 
        /* Copy the mac address to delete from the list */
        memcpy(mac_address, mac->macaddr, ETH_ALEN);
@@ -1928,7 +1952,7 @@ static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
 }
 
 /* reset vpaths */
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxge_vpath *vpath;
@@ -1988,8 +2012,23 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
                vpath = &vdev->vpaths[i];
-
                vxge_assert(vpath->is_configured);
+
+               if (!vdev->titan1) {
+                       struct vxge_hw_vp_config *vcfg;
+                       vcfg = &vdev->devh->config.vp_config[vpath->device_id];
+
+                       vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
+                       vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
+                       vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
+                       vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
+                       vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
+                       vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
+                       vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
+                       vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
+                       vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
+               }
+
                attr.vp_id = vpath->device_id;
                attr.fifo_attr.callback = vxge_xmit_compl;
                attr.fifo_attr.txdl_term = vxge_tx_term;
@@ -2024,6 +2063,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
                                vdev->config.fifo_indicate_max_pkts;
                        vpath->ring.rx_vector_no = 0;
                        vpath->ring.rx_csum = vdev->rx_csum;
+                       vpath->ring.rx_hwts = vdev->rx_hwts;
                        vpath->is_open = 1;
                        vdev->vp_handles[i] = vpath->handle;
                        vpath->ring.gro_enable = vdev->config.gro_enable;
@@ -2062,7 +2102,7 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
        struct __vxge_hw_device *hldev;
        u64 reason;
        enum vxge_hw_status status;
-       struct vxgedev *vdev = (struct vxgedev *) dev_id;;
+       struct vxgedev *vdev = (struct vxgedev *)dev_id;
 
        vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
@@ -2073,7 +2113,7 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
                return IRQ_NONE;
 
        if (unlikely(!is_vxge_card_up(vdev)))
-               return IRQ_NONE;
+               return IRQ_HANDLED;
 
        status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
                        &reason);
@@ -2301,8 +2341,8 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
 
 static void vxge_rem_isr(struct vxgedev *vdev)
 {
-       struct __vxge_hw_device  *hldev;
-       hldev = (struct __vxge_hw_device  *) pci_get_drvdata(vdev->pdev);
+       struct __vxge_hw_device *hldev;
+       hldev = (struct __vxge_hw_device  *)pci_get_drvdata(vdev->pdev);
 
 #ifdef CONFIG_PCI_MSI
        if (vdev->config.intr_type == MSI_X) {
@@ -2543,7 +2583,7 @@ vxge_open(struct net_device *dev)
                "%s: %s:%d", dev->name, __func__, __LINE__);
 
        vdev = (struct vxgedev *)netdev_priv(dev);
-       hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+       hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
        function_mode = vdev->config.device_hw_info.function_mode;
 
        /* make sure you have link off by default every time Nic is
@@ -2598,6 +2638,8 @@ vxge_open(struct net_device *dev)
                        goto out2;
                }
        }
+       printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
+              hldev->config.rth_en ? "enabled" : "disabled");
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
                vpath = &vdev->vpaths[i];
@@ -2683,9 +2725,10 @@ vxge_open(struct net_device *dev)
                vxge_os_timer(vdev->vp_reset_timer,
                        vxge_poll_vp_reset, vdev, (HZ/2));
 
-       if (vdev->vp_lockup_timer.function == NULL)
-               vxge_os_timer(vdev->vp_lockup_timer,
-                       vxge_poll_vp_lockup, vdev, (HZ/2));
+       /* There is no need to check for RxD leak and RxD lookup on Titan1A */
+       if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
+               vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
+                             HZ / 2);
 
        set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
 
@@ -2768,7 +2811,7 @@ static int do_vxge_close(struct net_device *dev, int do_io)
                dev->name, __func__, __LINE__);
 
        vdev = (struct vxgedev *)netdev_priv(dev);
-       hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+       hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
 
        if (unlikely(!is_vxge_card_up(vdev)))
                return 0;
@@ -2778,7 +2821,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
        while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
                msleep(50);
 
-       clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
        if (do_io) {
                /* Put the vpath back in normal mode */
                vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
@@ -2818,10 +2860,17 @@ static int do_vxge_close(struct net_device *dev, int do_io)
 
                smp_wmb();
        }
-       del_timer_sync(&vdev->vp_lockup_timer);
+
+       if (vdev->titan1)
+               del_timer_sync(&vdev->vp_lockup_timer);
 
        del_timer_sync(&vdev->vp_reset_timer);
 
+       if (do_io)
+               vxge_hw_device_wait_receive_idle(hldev);
+
+       clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+
        /* Disable napi */
        if (vdev->config.intr_type != MSI_X)
                napi_disable(&vdev->napi);
@@ -2838,8 +2887,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
        if (do_io)
                vxge_hw_device_intr_disable(vdev->devh);
 
-       mdelay(1000);
-
        vxge_rem_isr(vdev);
 
        vxge_napi_del_all(vdev);
@@ -2954,6 +3001,101 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
        return net_stats;
 }
 
+static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev,
+                                                int enable)
+{
+       enum vxge_hw_status status;
+       u64 val64;
+
+       /* Timestamp is passed to the driver via the FCS, therefore we
+        * must disable the FCS stripping by the adapter.  Since this is
+        * required for the driver to load (due to a hardware bug),
+        * there is no need to do anything special here.
+        */
+       if (enable)
+               val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
+                       VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
+                       VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
+       else
+               val64 = 0;
+
+       status = vxge_hw_mgmt_reg_write(vdev->devh,
+                                       vxge_hw_mgmt_reg_type_mrpcim,
+                                       0,
+                                       offsetof(struct vxge_hw_mrpcim_reg,
+                                                xmac_timestamp),
+                                       val64);
+       vxge_hw_device_flush_io(vdev->devh);
+       return status;
+}
+
+static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
+{
+       struct hwtstamp_config config;
+       enum vxge_hw_status status;
+       int i;
+
+       if (copy_from_user(&config, data, sizeof(config)))
+               return -EFAULT;
+
+       /* reserved for future extensions */
+       if (config.flags)
+               return -EINVAL;
+
+       /* Transmit HW Timestamp not supported */
+       switch (config.tx_type) {
+       case HWTSTAMP_TX_OFF:
+               break;
+       case HWTSTAMP_TX_ON:
+       default:
+               return -ERANGE;
+       }
+
+       switch (config.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               status = vxge_timestamp_config(vdev, 0);
+               if (status != VXGE_HW_OK)
+                       return -EFAULT;
+
+               vdev->rx_hwts = 0;
+               config.rx_filter = HWTSTAMP_FILTER_NONE;
+               break;
+
+       case HWTSTAMP_FILTER_ALL:
+       case HWTSTAMP_FILTER_SOME:
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+               status = vxge_timestamp_config(vdev, 1);
+               if (status != VXGE_HW_OK)
+                       return -EFAULT;
+
+               vdev->rx_hwts = 1;
+               config.rx_filter = HWTSTAMP_FILTER_ALL;
+               break;
+
+       default:
+                return -ERANGE;
+       }
+
+       for (i = 0; i < vdev->no_of_vpath; i++)
+               vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
+
+       if (copy_to_user(data, &config, sizeof(config)))
+               return -EFAULT;
+
+       return 0;
+}
+
 /**
  * vxge_ioctl
  * @dev: Device pointer.
@@ -2966,7 +3108,20 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
  */
 static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-       return -EOPNOTSUPP;
+       struct vxgedev *vdev = netdev_priv(dev);
+       int ret;
+
+       switch (cmd) {
+       case SIOCSHWTSTAMP:
+               ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
+               if (ret)
+                       return ret;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
 }
 
 /**
@@ -3125,6 +3280,19 @@ static const struct net_device_ops vxge_netdev_ops = {
 #endif
 };
 
+static int __devinit vxge_device_revision(struct vxgedev *vdev)
+{
+       int ret;
+       u8 revision;
+
+       ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
+       if (ret)
+               return -EIO;
+
+       vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
+       return 0;
+}
+
 static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
                                          struct vxge_config *config,
                                          int high_dma, int no_of_vpath,
@@ -3163,6 +3331,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
        vdev->pdev = hldev->pdev;
        memcpy(&vdev->config, config, sizeof(struct vxge_config));
        vdev->rx_csum = 1;      /* Enable Rx CSUM by default. */
+       vdev->rx_hwts = 0;
+
+       ret = vxge_device_revision(vdev);
+       if (ret < 0)
+               goto _out1;
 
        SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
 
@@ -3178,6 +3351,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
 
        vxge_initialize_ethtool_ops(ndev);
 
+       if (vdev->config.rth_steering != NO_STEERING) {
+               ndev->features |= NETIF_F_RXHASH;
+               hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
+       }
+
        /* Allocate memory for vpath */
        vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
                                no_of_vpath, GFP_KERNEL);
@@ -3227,6 +3405,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
                "%s: Ethernet device registered",
                ndev->name);
 
+       hldev->ndev = ndev;
        *vdev_out = vdev;
 
        /* Resetting the Device stats */
@@ -3261,36 +3440,29 @@ _out0:
  *
  * This function will unregister and free network device
  */
-static void
-vxge_device_unregister(struct __vxge_hw_device *hldev)
+static void vxge_device_unregister(struct __vxge_hw_device *hldev)
 {
        struct vxgedev *vdev;
        struct net_device *dev;
        char buf[IFNAMSIZ];
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-       (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-       u32 level_trace;
-#endif
 
        dev = hldev->ndev;
        vdev = netdev_priv(dev);
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-       (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-       level_trace = vdev->level_trace;
-#endif
-       vxge_debug_entryexit(level_trace,
-               "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
 
-       memcpy(buf, vdev->ndev->name, IFNAMSIZ);
+       vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
+                            __func__, __LINE__);
+
+       memcpy(buf, dev->name, IFNAMSIZ);
 
        /* in 2.6 will call stop() if device is up */
        unregister_netdev(dev);
 
        flush_scheduled_work();
 
-       vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
-       vxge_debug_entryexit(level_trace,
-               "%s: %s:%d  Exiting...", buf, __func__, __LINE__);
+       vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
+                       buf);
+       vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d  Exiting...", buf,
+                            __func__, __LINE__);
 }
 
 /*
@@ -3813,8 +3985,8 @@ static int vxge_pm_resume(struct pci_dev *pdev)
 static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
                                                pci_channel_state_t state)
 {
-       struct __vxge_hw_device  *hldev =
-               (struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+       struct __vxge_hw_device *hldev =
+               (struct __vxge_hw_device  *)pci_get_drvdata(pdev);
        struct net_device *netdev = hldev->ndev;
 
        netif_device_detach(netdev);
@@ -3843,8 +4015,8 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
  */
 static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
 {
-       struct __vxge_hw_device  *hldev =
-               (struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+       struct __vxge_hw_device *hldev =
+               (struct __vxge_hw_device  *)pci_get_drvdata(pdev);
        struct net_device *netdev = hldev->ndev;
 
        struct vxgedev *vdev = netdev_priv(netdev);
@@ -3869,8 +4041,8 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
  */
 static void vxge_io_resume(struct pci_dev *pdev)
 {
-       struct __vxge_hw_device  *hldev =
-               (struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+       struct __vxge_hw_device *hldev =
+               (struct __vxge_hw_device  *)pci_get_drvdata(pdev);
        struct net_device *netdev = hldev->ndev;
 
        if (netif_running(netdev)) {
@@ -3914,6 +4086,142 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
        return num_functions;
 }
 
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
+{
+       struct __vxge_hw_device *hldev = vdev->devh;
+       u32 maj, min, bld, cmaj, cmin, cbld;
+       enum vxge_hw_status status;
+       const struct firmware *fw;
+       int ret;
+
+       ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
+       if (ret) {
+               vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
+                               VXGE_DRIVER_NAME, fw_name);
+               goto out;
+       }
+
+       /* Load the new firmware onto the adapter */
+       status = vxge_update_fw_image(hldev, fw->data, fw->size);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR,
+                               "%s: FW image download to adapter failed '%s'.",
+                               VXGE_DRIVER_NAME, fw_name);
+               ret = -EIO;
+               goto out;
+       }
+
+       /* Read the version of the new firmware */
+       status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR,
+                               "%s: Upgrade read version failed '%s'.",
+                               VXGE_DRIVER_NAME, fw_name);
+               ret = -EIO;
+               goto out;
+       }
+
+       cmaj = vdev->config.device_hw_info.fw_version.major;
+       cmin = vdev->config.device_hw_info.fw_version.minor;
+       cbld = vdev->config.device_hw_info.fw_version.build;
+       /* It's possible the version in /lib/firmware is not the latest version.
+        * If so, we could get into a loop of trying to upgrade to the latest
+        * and flashing the older version.
+        */
+       if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
+           !override) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
+              maj, min, bld);
+
+       /* Flash the adapter with the new firmware */
+       status = vxge_hw_flash_fw(hldev);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
+                               VXGE_DRIVER_NAME, fw_name);
+               ret = -EIO;
+               goto out;
+       }
+
+       printk(KERN_NOTICE "Upgrade of firmware successful!  Adapter must be "
+              "hard reset before using, thus requiring a system reboot or a "
+              "hotplug event.\n");
+
+out:
+       return ret;
+}
+
+static int vxge_probe_fw_update(struct vxgedev *vdev)
+{
+       u32 maj, min, bld;
+       int ret, gpxe = 0;
+       char *fw_name;
+
+       maj = vdev->config.device_hw_info.fw_version.major;
+       min = vdev->config.device_hw_info.fw_version.minor;
+       bld = vdev->config.device_hw_info.fw_version.build;
+
+       if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
+               return 0;
+
+       /* Ignore the build number when determining if the current firmware is
+        * "too new" to load the driver
+        */
+       if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
+               vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
+                               "version, unable to load driver\n",
+                               VXGE_DRIVER_NAME);
+               return -EINVAL;
+       }
+
+       /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
+        * work with this driver.
+        */
+       if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
+               vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
+                               "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
+               return -EINVAL;
+       }
+
+       /* If file not specified, determine gPXE or not */
+       if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
+               int i;
+               for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
+                       if (vdev->devh->eprom_versions[i]) {
+                               gpxe = 1;
+                               break;
+                       }
+       }
+       if (gpxe)
+               fw_name = "vxge/X3fw-pxe.ncf";
+       else
+               fw_name = "vxge/X3fw.ncf";
+
+       ret = vxge_fw_upgrade(vdev, fw_name, 0);
+       /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
+        * probe, so ignore them
+        */
+       if (ret != -EINVAL && ret != -ENOENT)
+               return -EIO;
+       else
+               ret = 0;
+
+       if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
+           VXGE_FW_VER(maj, min, 0)) {
+               vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
+                               " be used with this driver.\n"
+                               "Please get the latest version from "
+                               "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
+                               VXGE_DRIVER_NAME, maj, min, bld);
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
 /**
  * vxge_probe
  * @pdev : structure containing the PCI related information of the device.
@@ -3928,7 +4236,7 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
 static int __devinit
 vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
 {
-       struct __vxge_hw_device  *hldev;
+       struct __vxge_hw_device *hldev;
        enum vxge_hw_status status;
        int ret;
        int high_dma = 0;
@@ -4072,16 +4380,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                goto _exit3;
        }
 
-       if (ll_config->device_hw_info.fw_version.major !=
-               VXGE_DRIVER_FW_VERSION_MAJOR) {
-               vxge_debug_init(VXGE_ERR,
-                       "%s: Incorrect firmware version."
-                       "Please upgrade the firmware to version 1.x.x",
-                       VXGE_DRIVER_NAME);
-               ret = -EINVAL;
-               goto _exit3;
-       }
-
        vpath_mask = ll_config->device_hw_info.vpath_mask;
        if (vpath_mask == 0) {
                vxge_debug_ll_config(VXGE_TRACE,
@@ -4145,11 +4443,37 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                        goto _exit3;
        }
 
+       if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
+                       ll_config->device_hw_info.fw_version.minor,
+                       ll_config->device_hw_info.fw_version.build) >=
+           VXGE_EPROM_FW_VER) {
+               struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
+
+               status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
+               if (status != VXGE_HW_OK) {
+                       vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
+                                       VXGE_DRIVER_NAME);
+                       /* This is a non-fatal error, continue */
+               }
+
+               for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+                       hldev->eprom_versions[i] = img[i].version;
+                       if (!img[i].is_valid)
+                               break;
+                       vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
+                                       "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
+                                       VXGE_EPROM_IMG_MAJOR(img[i].version),
+                                       VXGE_EPROM_IMG_MINOR(img[i].version),
+                                       VXGE_EPROM_IMG_FIX(img[i].version),
+                                       VXGE_EPROM_IMG_BUILD(img[i].version));
+               }
+       }
+
        /* if FCS stripping is not disabled in MAC fail driver load */
-       if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
-               vxge_debug_init(VXGE_ERR,
-                       "%s: FCS stripping is not disabled in MAC"
-                       " failing driver load", VXGE_DRIVER_NAME);
+       status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
+                               " failing driver load", VXGE_DRIVER_NAME);
                ret = -EINVAL;
                goto _exit4;
        }
@@ -4163,28 +4487,32 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
        ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
        ll_config->addr_learn_en = addr_learn_en;
        ll_config->rth_algorithm = RTH_ALG_JENKINS;
-       ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
-       ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
-       ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
-       ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
-       ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
-       ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
+       ll_config->rth_hash_type_tcpipv4 = 1;
+       ll_config->rth_hash_type_ipv4 = 0;
+       ll_config->rth_hash_type_tcpipv6 = 0;
+       ll_config->rth_hash_type_ipv6 = 0;
+       ll_config->rth_hash_type_tcpipv6ex = 0;
+       ll_config->rth_hash_type_ipv6ex = 0;
        ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
        ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
        ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
 
-       if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
-               &vdev)) {
+       ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
+                                  &vdev);
+       if (ret) {
                ret = -EINVAL;
                goto _exit4;
        }
 
+       ret = vxge_probe_fw_update(vdev);
+       if (ret)
+               goto _exit5;
+
        vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
        VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
                vxge_hw_device_trace_level_get(hldev));
 
        /* set private HW device info */
-       hldev->ndev = vdev->ndev;
        vdev->mtu = VXGE_HW_DEFAULT_MTU;
        vdev->bar0 = attr.bar0;
        vdev->max_vpath_supported = max_vpath_supported;
@@ -4286,7 +4614,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                                "%s: mac_addr_list : memory allocation failed",
                                vdev->ndev->name);
                        ret = -EPERM;
-                       goto _exit5;
+                       goto _exit6;
                }
                macaddr = (u8 *)&entry->macaddr;
                memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
@@ -4326,10 +4654,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
        kfree(ll_config);
        return 0;
 
-_exit5:
+_exit6:
        for (i = 0; i < vdev->no_of_vpath; i++)
                vxge_free_mac_add_list(&vdev->vpaths[i]);
-
+_exit5:
        vxge_device_unregister(hldev);
 _exit4:
        pci_disable_sriov(pdev);
@@ -4354,34 +4682,25 @@ _exit0:
  * Description: This function is called by the Pci subsystem to release a
  * PCI device and free up all resource held up by the device.
  */
-static void __devexit
-vxge_remove(struct pci_dev *pdev)
+static void __devexit vxge_remove(struct pci_dev *pdev)
 {
-       struct __vxge_hw_device  *hldev;
+       struct __vxge_hw_device *hldev;
        struct vxgedev *vdev = NULL;
        struct net_device *dev;
        int i = 0;
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-       (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-       u32 level_trace;
-#endif
 
-       hldev = (struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+       hldev = (struct __vxge_hw_device  *)pci_get_drvdata(pdev);
 
        if (hldev == NULL)
                return;
+
        dev = hldev->ndev;
        vdev = netdev_priv(dev);
 
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-       (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-       level_trace = vdev->level_trace;
-#endif
-       vxge_debug_entryexit(level_trace,
-               "%s:%d", __func__, __LINE__);
+       vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
 
-       vxge_debug_init(level_trace,
-               "%s : removing PCI device...", __func__);
+       vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
+                       __func__);
        vxge_device_unregister(hldev);
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
@@ -4399,16 +4718,16 @@ vxge_remove(struct pci_dev *pdev)
        /* we are safe to free it now */
        free_netdev(dev);
 
-       vxge_debug_init(level_trace,
-               "%s:%d  Device unregistered", __func__, __LINE__);
+       vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
+                       __func__, __LINE__);
 
        vxge_hw_device_terminate(hldev);
 
        pci_disable_device(pdev);
        pci_release_regions(pdev);
        pci_set_drvdata(pdev, NULL);
-       vxge_debug_entryexit(level_trace,
-               "%s:%d  Exiting...", __func__, __LINE__);
+       vxge_debug_entryexit(vdev->level_trace, "%s:%d  Exiting...", __func__,
+                            __LINE__);
 }
 
 static struct pci_error_handlers vxge_err_handler = {
index de64536cb7d0d1c32943b6d7b64762e306f8bcb8..953cb0ded3e1897058b0e99724f9026e45d161da 100644 (file)
@@ -29,6 +29,9 @@
 
 #define PCI_DEVICE_ID_TITAN_WIN                0x5733
 #define PCI_DEVICE_ID_TITAN_UNI                0x5833
+#define VXGE_HW_TITAN1_PCI_REVISION    1
+#define VXGE_HW_TITAN1A_PCI_REVISION   2
+
 #define        VXGE_USE_DEFAULT                0xffffffff
 #define VXGE_HW_VPATH_MSIX_ACTIVE      4
 #define VXGE_ALARM_MSIX_ID             2
 
 #define VXGE_TTI_BTIMER_VAL 250000
 
-#define VXGE_TTI_LTIMER_VAL 1000
-#define VXGE_TTI_RTIMER_VAL 0
-#define VXGE_RTI_BTIMER_VAL 250
-#define VXGE_RTI_LTIMER_VAL 100
-#define VXGE_RTI_RTIMER_VAL 0
+#define VXGE_TTI_LTIMER_VAL    1000
+#define VXGE_T1A_TTI_LTIMER_VAL        80
+#define VXGE_TTI_RTIMER_VAL    0
+#define VXGE_T1A_TTI_RTIMER_VAL        400
+#define VXGE_RTI_BTIMER_VAL    250
+#define VXGE_RTI_LTIMER_VAL    100
+#define VXGE_RTI_RTIMER_VAL    0
 #define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
 #define VXGE_ISR_POLLING_CNT   8
 #define VXGE_MAX_CONFIG_DEV    0xFF
 #define TTI_TX_UFC_B   40
 #define TTI_TX_UFC_C   60
 #define TTI_TX_UFC_D   100
+#define TTI_T1A_TX_UFC_A       30
+#define TTI_T1A_TX_UFC_B       80
+/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
+/* Slope - 93 */
+/* 60 - 9k Mtu, 140 - 1.5k mtu */
+#define TTI_T1A_TX_UFC_C(mtu)  (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
+
+/* Slope - 37 */
+/* 100 - 9k Mtu, 300 - 1.5k mtu */
+#define TTI_T1A_TX_UFC_D(mtu)  (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
+
+
+#define RTI_RX_URANGE_A                5
+#define RTI_RX_URANGE_B                15
+#define RTI_RX_URANGE_C                40
+#define RTI_T1A_RX_URANGE_A    1
+#define RTI_T1A_RX_URANGE_B    20
+#define RTI_T1A_RX_URANGE_C    50
+#define RTI_RX_UFC_A           1
+#define RTI_RX_UFC_B           5
+#define RTI_RX_UFC_C           10
+#define RTI_RX_UFC_D           15
+#define RTI_T1A_RX_UFC_B       20
+#define RTI_T1A_RX_UFC_C       50
+#define RTI_T1A_RX_UFC_D       60
 
-#define RTI_RX_URANGE_A        5
-#define RTI_RX_URANGE_B        15
-#define RTI_RX_URANGE_C        40
-#define RTI_RX_UFC_A   1
-#define RTI_RX_UFC_B   5
-#define RTI_RX_UFC_C   10
-#define RTI_RX_UFC_D   15
 
 /* Milli secs timer period */
 #define VXGE_TIMER_DELAY               10000
@@ -145,15 +168,15 @@ struct vxge_config {
 
        int             addr_learn_en;
 
-       int             rth_steering;
-       int             rth_algorithm;
-       int             rth_hash_type_tcpipv4;
-       int             rth_hash_type_ipv4;
-       int             rth_hash_type_tcpipv6;
-       int             rth_hash_type_ipv6;
-       int             rth_hash_type_tcpipv6ex;
-       int             rth_hash_type_ipv6ex;
-       int             rth_bkt_sz;
+       u32             rth_steering:2,
+                       rth_algorithm:2,
+                       rth_hash_type_tcpipv4:1,
+                       rth_hash_type_ipv4:1,
+                       rth_hash_type_tcpipv6:1,
+                       rth_hash_type_ipv6:1,
+                       rth_hash_type_tcpipv6ex:1,
+                       rth_hash_type_ipv6ex:1,
+                       rth_bkt_sz:8;
        int             rth_jhash_golden_ratio;
        int             tx_steering_type;
        int     fifo_indicate_max_pkts;
@@ -248,8 +271,9 @@ struct vxge_ring {
         */
        int driver_id;
 
-        /* copy of the flag indicating whether rx_csum is to be used */
-       u32 rx_csum;
+       /* copy of the flag indicating whether rx_csum is to be used */
+       u32 rx_csum:1,
+           rx_hwts:1;
 
        int pkts_processed;
        int budget;
@@ -327,7 +351,9 @@ struct vxgedev {
        u16             all_multi_flg;
 
         /* A flag indicating whether rx_csum is to be used or not. */
-       u32     rx_csum;
+       u32     rx_csum:1,
+               rx_hwts:1,
+               titan1:1;
 
        struct vxge_msix_entry *vxge_entries;
        struct msix_entry *entries;
@@ -387,8 +413,6 @@ struct vxge_tx_priv {
        static int p = val; \
        module_param(p, int, 0)
 
-#define vxge_os_bug(fmt...)            { printk(fmt); BUG(); }
-
 #define vxge_os_timer(timer, handle, arg, exp) do { \
                init_timer(&timer); \
                timer.function = handle; \
@@ -397,6 +421,11 @@ struct vxge_tx_priv {
        } while (0);
 
 extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
+
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
+
 /**
  * #define VXGE_DEBUG_INIT: debug for initialization functions
  * #define VXGE_DEBUG_TX        : debug transmit related functions
index 3dd5c9615ef9a05e77bf023fef62379778cf3761..3e658b175947809e194e661fca36e9716304f193 100644 (file)
 #define VXGE_HW_TITAN_VPMGMT_REG_SPACES                        17
 #define VXGE_HW_TITAN_VPATH_REG_SPACES                 17
 
+#define VXGE_HW_FW_API_GET_EPROM_REV                   31
+
+#define VXGE_EPROM_IMG_MAJOR(val)              (u32) vxge_bVALn(val, 48, 4)
+#define VXGE_EPROM_IMG_MINOR(val)              (u32) vxge_bVALn(val, 52, 4)
+#define VXGE_EPROM_IMG_FIX(val)                        (u32) vxge_bVALn(val, 56, 4)
+#define VXGE_EPROM_IMG_BUILD(val)              (u32) vxge_bVALn(val, 60, 4)
+
+#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val)             vxge_bVALn(val, 16, 8)
+#define VXGE_HW_GET_EPROM_IMAGE_VALID(val)             vxge_bVALn(val, 31, 1)
+#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val)              vxge_bVALn(val, 40, 8)
+#define VXGE_HW_GET_EPROM_IMAGE_REV(val)               vxge_bVALn(val, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val)  vxge_vBIT(val, 16, 8)
+
+#define VXGE_HW_FW_API_GET_FUNC_MODE                   29
+#define VXGE_HW_GET_FUNC_MODE_VAL(val)                 (val & 0xFF)
+
+#define VXGE_HW_FW_UPGRADE_MEMO                                13
+#define VXGE_HW_FW_UPGRADE_ACTION                      16
+#define VXGE_HW_FW_UPGRADE_OFFSET_START                        2
+#define VXGE_HW_FW_UPGRADE_OFFSET_SEND                 3
+#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT               4
+#define VXGE_HW_FW_UPGRADE_OFFSET_READ                 5
+
+#define VXGE_HW_FW_UPGRADE_BLK_SIZE                    16
+#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val)          (val & 0xff)
+#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val)          ((val >> 8) & 0xff)
+
 #define VXGE_HW_ASIC_MODE_RESERVED                             0
 #define VXGE_HW_ASIC_MODE_NO_IOV                               1
 #define VXGE_HW_ASIC_MODE_SR_IOV                               2
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE             2
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN                3
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG       5
-#define        VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT  6
+#define        VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT          6
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG     7
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK          8
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY           9
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS               10
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS                11
-#define        VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
+#define        VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT         12
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO           13
 
 #define        VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
 #define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
                                                        vxge_bVALn(bits, 48, 16)
 #define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
 
 #define        VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
                                                        vxge_bVALn(bits, 0, 18)
@@ -3998,6 +4026,7 @@ struct vxge_hw_vpath_reg {
 #define        VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN   vxge_mBIT(9)
 #define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
 #define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
+#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val)     vxge_bVALn(val, 36, 9)
 /*0x00a78*/    u64     prc_cfg7;
 #define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
 #define        VXGE_HW_PRC_CFG7_SMART_SCAT_EN  vxge_mBIT(11)
index 9890d4d596d0d29a5b8083656e416b1c30ca2b74..1fceee87622878f5e8e2b10d28a9901942bb83be 100644 (file)
@@ -1904,34 +1904,6 @@ enum vxge_hw_ring_tcode {
        VXGE_HW_RING_T_CODE_MULTI_ERR                   = 0xF
 };
 
-/**
- * enum enum vxge_hw_ring_hash_type - RTH hash types
- * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
- * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
- * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
- * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
- *
- * RTH hash types
- */
-enum vxge_hw_ring_hash_type {
-       VXGE_HW_RING_HASH_TYPE_NONE                     = 0x0,
-       VXGE_HW_RING_HASH_TYPE_TCP_IPV4         = 0x1,
-       VXGE_HW_RING_HASH_TYPE_UDP_IPV4         = 0x2,
-       VXGE_HW_RING_HASH_TYPE_IPV4                     = 0x3,
-       VXGE_HW_RING_HASH_TYPE_TCP_IPV6         = 0x4,
-       VXGE_HW_RING_HASH_TYPE_UDP_IPV6         = 0x5,
-       VXGE_HW_RING_HASH_TYPE_IPV6                     = 0x6,
-       VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX      = 0x7,
-       VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX      = 0x8,
-       VXGE_HW_RING_HASH_TYPE_IPV6_EX          = 0x9
-};
-
 enum vxge_hw_status vxge_hw_ring_rxd_reserve(
        struct __vxge_hw_ring *ring_handle,
        void **rxdh);
index 53fefe13736875650952d7f748c79edbc1acb8c7..f05bb2f55e73034309655df55df44ec9adc60d09 100644 (file)
 
 #define VXGE_VERSION_MAJOR     "2"
 #define VXGE_VERSION_MINOR     "0"
-#define VXGE_VERSION_FIX       "9"
-#define VXGE_VERSION_BUILD     "20840"
+#define VXGE_VERSION_FIX       "10"
+#define VXGE_VERSION_BUILD     "21808"
 #define VXGE_VERSION_FOR       "k"
+
+#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
+
+#define VXGE_DEAD_FW_VER_MAJOR 1
+#define VXGE_DEAD_FW_VER_MINOR 4
+#define VXGE_DEAD_FW_VER_BUILD 4
+
+#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
+                                    VXGE_DEAD_FW_VER_MINOR, \
+                                    VXGE_DEAD_FW_VER_BUILD)
+
+#define VXGE_EPROM_FW_VER_MAJOR        1
+#define VXGE_EPROM_FW_VER_MINOR        6
+#define VXGE_EPROM_FW_VER_BUILD        1
+
+#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
+                                     VXGE_EPROM_FW_VER_MINOR, \
+                                     VXGE_EPROM_FW_VER_BUILD)
+
+#define VXGE_CERT_FW_VER_MAJOR 1
+#define VXGE_CERT_FW_VER_MINOR 8
+#define VXGE_CERT_FW_VER_BUILD 1
+
+#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
+                                    VXGE_CERT_FW_VER_MINOR, \
+                                    VXGE_CERT_FW_VER_BUILD)
+
 #endif
index c3a3292045114bf86a285622ad7b75efeef89fa3..ae07b3dfbcc160e9991c1dd8c7d695a581f67f3a 100644 (file)
@@ -124,7 +124,7 @@ MODULE_LICENSE("GPL");
 #define TX_BUF_SIZE 8192
 #define DMA_BUF_SIZE (RX_BUF_SIZE + 16)        /* 8k + 16 bytes for trailers */
 
-#define TX_TIMEOUT     10
+#define TX_TIMEOUT     (HZ/10)
 
 struct znet_private {
        int rx_dma, tx_dma;
index 93fc2449af10e8dded6cbed23b4d979e4b5d10ef..7d164670f264c9ca141bdfea34a3c82c8740b953 100644 (file)
@@ -167,10 +167,10 @@ struct ip_sf_socklist {
  */
 
 struct ip_mc_socklist {
-       struct ip_mc_socklist   *next;
+       struct ip_mc_socklist __rcu *next_rcu;
        struct ip_mreqn         multi;
        unsigned int            sfmode;         /* MCAST_{INCLUDE,EXCLUDE} */
-       struct ip_sf_socklist   *sflist;
+       struct ip_sf_socklist __rcu     *sflist;
        struct rcu_head         rcu;
 };
 
@@ -186,11 +186,14 @@ struct ip_sf_list {
 struct ip_mc_list {
        struct in_device        *interface;
        __be32                  multiaddr;
+       unsigned int            sfmode;
        struct ip_sf_list       *sources;
        struct ip_sf_list       *tomb;
-       unsigned int            sfmode;
        unsigned long           sfcount[2];
-       struct ip_mc_list       *next;
+       union {
+               struct ip_mc_list *next;
+               struct ip_mc_list __rcu *next_rcu;
+       };
        struct timer_list       timer;
        int                     users;
        atomic_t                refcnt;
@@ -201,6 +204,7 @@ struct ip_mc_list {
        char                    loaded;
        unsigned char           gsquery;        /* check source marks? */
        unsigned char           crcount;
+       struct rcu_head         rcu;
 };
 
 /* V3 exponential field decoding */
index ccd5b07d678deb8a61ff759ba943dceae6bf1507..380ba6bc5db1dfaa96912b45426edff4d9b8ec2c 100644 (file)
@@ -52,9 +52,8 @@ struct in_device {
        atomic_t                refcnt;
        int                     dead;
        struct in_ifaddr        *ifa_list;      /* IP ifaddr chain              */
-       rwlock_t                mc_list_lock;
-       struct ip_mc_list       *mc_list;       /* IP multicast filter chain    */
-       int                     mc_count;                 /* Number of installed mcasts */
+       struct ip_mc_list __rcu *mc_list;       /* IP multicast filter chain    */
+       int                     mc_count;       /* Number of installed mcasts   */
        spinlock_t              mc_tomb_lock;
        struct ip_mc_list       *mc_tomb;
        unsigned long           mr_v1_seen;
index d8fd2c23a1b994ec10e0a204820d6c7daf7dca0e..578debb801f42bbc81179c9042faf8912b1f00cc 100644 (file)
@@ -951,7 +951,7 @@ struct net_device {
 #endif
        void                    *atalk_ptr;     /* AppleTalk link       */
        struct in_device __rcu  *ip_ptr;        /* IPv4 specific data   */
-       void                    *dn_ptr;        /* DECnet specific data */
+       struct dn_dev __rcu     *dn_ptr;        /* DECnet specific data */
        struct inet6_dev __rcu  *ip6_ptr;       /* IPv6 specific data */
        void                    *ec_ptr;        /* Econet specific data */
        void                    *ax25_ptr;      /* AX.25 specific data */
index 0916bbf3bdff065fbc91d191c05aeecd3019ad40..b9e32db03f2040e76d7d73295406772de0029930 100644 (file)
@@ -5,13 +5,14 @@
 struct dn_dev;
 
 struct dn_ifaddr {
-       struct dn_ifaddr *ifa_next;
+       struct dn_ifaddr __rcu *ifa_next;
        struct dn_dev    *ifa_dev;
        __le16            ifa_local;
        __le16            ifa_address;
        __u8              ifa_flags;
        __u8              ifa_scope;
        char              ifa_label[IFNAMSIZ];
+       struct rcu_head   rcu;
 };
 
 #define DN_DEV_S_RU  0 /* Run - working normally   */
@@ -83,7 +84,7 @@ struct dn_dev_parms {
 
 
 struct dn_dev {
-       struct dn_ifaddr *ifa_list;
+       struct dn_ifaddr __rcu *ifa_list;
        struct net_device *dev;
        struct dn_dev_parms parms;
        char use_long;
@@ -171,19 +172,27 @@ extern int unregister_dnaddr_notifier(struct notifier_block *nb);
 
 static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
 {
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db;
        struct dn_ifaddr *ifa;
+       int res = 0;
 
+       rcu_read_lock();
+       dn_db = rcu_dereference(dev->dn_ptr);
        if (dn_db == NULL) {
                printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n");
-               return 0;
+               goto out;
        }
 
-       for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next)
-               if ((addr ^ ifa->ifa_local) == 0)
-                       return 1;
-
-       return 0;
+       for (ifa = rcu_dereference(dn_db->ifa_list);
+            ifa != NULL;
+            ifa = rcu_dereference(ifa->ifa_next))
+               if ((addr ^ ifa->ifa_local) == 0) {
+                       res = 1;
+                       break;
+               }
+out:
+       rcu_read_unlock();
+       return res;
 }
 
 #endif /* _NET_DN_DEV_H */
index ccadab3aa3f6c5948e484062c82c022b5dbf132b..9b185df265fb5ed2fc5e7d690368d7dd5405b011 100644 (file)
@@ -80,6 +80,16 @@ struct dn_route {
        unsigned rt_type;
 };
 
+static inline bool dn_is_input_route(struct dn_route *rt)
+{
+       return rt->fl.iif != 0;
+}
+
+static inline bool dn_is_output_route(struct dn_route *rt)
+{
+       return rt->fl.iif == 0;
+}
+
 extern void dn_route_init(void);
 extern void dn_route_cleanup(void);
 
index ffe9cb719c0e526302bb2d4635c6f402ce31c867..a5bd72646d6510f18ff085674283f55386e118bc 100644 (file)
@@ -94,10 +94,10 @@ struct dst_entry {
        int                     __use;
        unsigned long           lastuse;
        union {
-               struct dst_entry *next;
-               struct rtable __rcu *rt_next;
-               struct rt6_info   *rt6_next;
-               struct dn_route  *dn_next;
+               struct dst_entry        *next;
+               struct rtable __rcu     *rt_next;
+               struct rt6_info         *rt6_next;
+               struct dn_route __rcu   *dn_next;
        };
 };
 
index 1989cfd7405fccfc6839f2742eb45b4b2d7f8712..8945f9fb192ab536d0e27f0b9617046f5086b9bc 100644 (file)
@@ -141,7 +141,7 @@ struct inet_sock {
                                nodefrag:1;
        int                     mc_index;
        __be32                  mc_addr;
-       struct ip_mc_socklist   *mc_list;
+       struct ip_mc_socklist __rcu     *mc_list;
        struct {
                unsigned int            flags;
                unsigned int            fragsize;
index 55590ab16b3ed5192413b4b6415a8785c1d1a5bb..815b2ce9f4a496768fff673321a79bd39c0545c9 100644 (file)
@@ -96,16 +96,16 @@ struct neighbour {
        struct neigh_parms      *parms;
        unsigned long           confirmed;
        unsigned long           updated;
-       __u8                    flags;
-       __u8                    nud_state;
-       __u8                    type;
-       __u8                    dead;
+       rwlock_t                lock;
        atomic_t                refcnt;
        struct sk_buff_head     arp_queue;
        struct timer_list       timer;
        unsigned long           used;
        atomic_t                probes;
-       rwlock_t                lock;
+       __u8                    flags;
+       __u8                    nud_state;
+       __u8                    type;
+       __u8                    dead;
        seqlock_t               ha_lock;
        unsigned char           ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
        struct hh_cache         *hh;
index 7e5e73bfa4dec8e2d45c834507f74d86484b8715..5cd46d1c0e14cd679b5e342e497efa4ca3ace7b9 100644 (file)
@@ -55,8 +55,6 @@ struct rtable {
        /* Cache lookup keys */
        struct flowi            fl;
 
-       struct in_device        *idev;
-       
        int                     rt_genid;
        unsigned                rt_flags;
        __u16                   rt_type;
@@ -73,6 +71,16 @@ struct rtable {
        struct inet_peer        *peer; /* long-living peer info */
 };
 
+static inline bool rt_is_input_route(struct rtable *rt)
+{
+       return rt->fl.iif != 0;
+}
+
+static inline bool rt_is_output_route(struct rtable *rt)
+{
+       return rt->fl.iif == 0;
+}
+
 struct ip_rt_acct {
        __u32   o_bytes;
        __u32   o_packets;
index 0dd54a69dace255fcdf54732d982e8c521c574a5..5968c822c99993ec67e285817bf27c25716f16e3 100644 (file)
@@ -1817,8 +1817,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
                if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
                        dev->ethtool_ops->get_drvinfo(dev, &info);
 
-               WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
-                       "ip_summed=%d",
+               WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
                     info.driver, dev ? dev->features : 0L,
                     skb->sk ? skb->sk->sk_route_caps : 0L,
                     skb->len, skb->data_len, skb->ip_summed);
index 92a6fcb40d7daa4c3e1bc3b8e2b5a49c4948cf3d..abaf241c7353e6e109582f253eaf5865ed968cab 100644 (file)
@@ -1,7 +1,8 @@
 /*
  *  net/dccp/ackvec.c
  *
- *  An implementation of the DCCP protocol
+ *  An implementation of Ack Vectors for the DCCP protocol
+ *  Copyright (c) 2007 University of Aberdeen, Scotland, UK
  *  Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
  *
  *      This program is free software; you can redistribute it and/or modify it
 static struct kmem_cache *dccp_ackvec_slab;
 static struct kmem_cache *dccp_ackvec_record_slab;
 
-static struct dccp_ackvec_record *dccp_ackvec_record_new(void)
+struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
 {
-       struct dccp_ackvec_record *avr =
-                       kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
-
-       if (avr != NULL)
-               INIT_LIST_HEAD(&avr->avr_node);
+       struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority);
 
-       return avr;
+       if (av != NULL) {
+               av->av_buf_head = av->av_buf_tail = DCCPAV_MAX_ACKVEC_LEN - 1;
+               INIT_LIST_HEAD(&av->av_records);
+       }
+       return av;
 }
 
-static void dccp_ackvec_record_delete(struct dccp_ackvec_record *avr)
+static void dccp_ackvec_purge_records(struct dccp_ackvec *av)
 {
-       if (unlikely(avr == NULL))
-               return;
-       /* Check if deleting a linked record */
-       WARN_ON(!list_empty(&avr->avr_node));
-       kmem_cache_free(dccp_ackvec_record_slab, avr);
+       struct dccp_ackvec_record *cur, *next;
+
+       list_for_each_entry_safe(cur, next, &av->av_records, avr_node)
+               kmem_cache_free(dccp_ackvec_record_slab, cur);
+       INIT_LIST_HEAD(&av->av_records);
 }
 
-static void dccp_ackvec_insert_avr(struct dccp_ackvec *av,
-                                  struct dccp_ackvec_record *avr)
+void dccp_ackvec_free(struct dccp_ackvec *av)
 {
-       /*
-        * AVRs are sorted by seqno. Since we are sending them in order, we
-        * just add the AVR at the head of the list.
-        * -sorbo.
-        */
-       if (!list_empty(&av->av_records)) {
-               const struct dccp_ackvec_record *head =
-                                       list_entry(av->av_records.next,
-                                                  struct dccp_ackvec_record,
-                                                  avr_node);
-               BUG_ON(before48(avr->avr_ack_seqno, head->avr_ack_seqno));
+       if (likely(av != NULL)) {
+               dccp_ackvec_purge_records(av);
+               kmem_cache_free(dccp_ackvec_slab, av);
        }
-
-       list_add(&avr->avr_node, &av->av_records);
 }
 
-int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
+/**
+ * dccp_ackvec_update_records  -  Record information about sent Ack Vectors
+ * @av:                Ack Vector records to update
+ * @seqno:     Sequence number of the packet carrying the Ack Vector just sent
+ * @nonce_sum: The sum of all buffer nonces contained in the Ack Vector
+ */
+int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
 {
-       struct dccp_sock *dp = dccp_sk(sk);
-       struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
-       /* Figure out how many options do we need to represent the ackvec */
-       const u8 nr_opts = DIV_ROUND_UP(av->av_vec_len, DCCP_SINGLE_OPT_MAXLEN);
-       u16 len = av->av_vec_len + 2 * nr_opts, i;
-       u32 elapsed_time;
-       const unsigned char *tail, *from;
-       unsigned char *to;
        struct dccp_ackvec_record *avr;
-       suseconds_t delta;
-
-       if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
-               return -1;
-
-       delta = ktime_us_delta(ktime_get_real(), av->av_time);
-       elapsed_time = delta / 10;
 
-       if (elapsed_time != 0 &&
-           dccp_insert_option_elapsed_time(skb, elapsed_time))
-               return -1;
-
-       avr = dccp_ackvec_record_new();
+       avr = kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
        if (avr == NULL)
-               return -1;
-
-       DCCP_SKB_CB(skb)->dccpd_opt_len += len;
-
-       to   = skb_push(skb, len);
-       len  = av->av_vec_len;
-       from = av->av_buf + av->av_buf_head;
-       tail = av->av_buf + DCCP_MAX_ACKVEC_LEN;
-
-       for (i = 0; i < nr_opts; ++i) {
-               int copylen = len;
-
-               if (len > DCCP_SINGLE_OPT_MAXLEN)
-                       copylen = DCCP_SINGLE_OPT_MAXLEN;
-
-               *to++ = DCCPO_ACK_VECTOR_0;
-               *to++ = copylen + 2;
-
-               /* Check if buf_head wraps */
-               if (from + copylen > tail) {
-                       const u16 tailsize = tail - from;
-
-                       memcpy(to, from, tailsize);
-                       to      += tailsize;
-                       len     -= tailsize;
-                       copylen -= tailsize;
-                       from    = av->av_buf;
-               }
-
-               memcpy(to, from, copylen);
-               from += copylen;
-               to   += copylen;
-               len  -= copylen;
-       }
+               return -ENOBUFS;
 
+       avr->avr_ack_seqno  = seqno;
+       avr->avr_ack_ptr    = av->av_buf_head;
+       avr->avr_ack_ackno  = av->av_buf_ackno;
+       avr->avr_ack_nonce  = nonce_sum;
+       avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head);
        /*
-        *      From RFC 4340, A.2:
-        *
-        *      For each acknowledgement it sends, the HC-Receiver will add an
-        *      acknowledgement record.  ack_seqno will equal the HC-Receiver
-        *      sequence number it used for the ack packet; ack_ptr will equal
-        *      buf_head; ack_ackno will equal buf_ackno; and ack_nonce will
-        *      equal buf_nonce.
+        * When the buffer overflows, we keep no more than one record. This is
+        * the simplest way of disambiguating sender-Acks dating from before the
+        * overflow from sender-Acks which refer to after the overflow; a simple
+        * solution is preferable here since we are handling an exception.
         */
-       avr->avr_ack_seqno = DCCP_SKB_CB(skb)->dccpd_seq;
-       avr->avr_ack_ptr   = av->av_buf_head;
-       avr->avr_ack_ackno = av->av_buf_ackno;
-       avr->avr_ack_nonce = av->av_buf_nonce;
-       avr->avr_sent_len  = av->av_vec_len;
-
-       dccp_ackvec_insert_avr(av, avr);
+       if (av->av_overflow)
+               dccp_ackvec_purge_records(av);
+       /*
+        * Since GSS is incremented for each packet, the list is automatically
+        * arranged in descending order of @ack_seqno.
+        */
+       list_add(&avr->avr_node, &av->av_records);
 
-       dccp_pr_debug("%s ACK Vector 0, len=%d, ack_seqno=%llu, "
-                     "ack_ackno=%llu\n",
-                     dccp_role(sk), avr->avr_sent_len,
+       dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n",
                      (unsigned long long)avr->avr_ack_seqno,
-                     (unsigned long long)avr->avr_ack_ackno);
+                     (unsigned long long)avr->avr_ack_ackno,
+                     avr->avr_ack_runlen);
        return 0;
 }
 
-struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
-{
-       struct dccp_ackvec *av = kmem_cache_alloc(dccp_ackvec_slab, priority);
-
-       if (av != NULL) {
-               av->av_buf_head  = DCCP_MAX_ACKVEC_LEN - 1;
-               av->av_buf_ackno = UINT48_MAX + 1;
-               av->av_buf_nonce = 0;
-               av->av_time      = ktime_set(0, 0);
-               av->av_vec_len   = 0;
-               INIT_LIST_HEAD(&av->av_records);
-       }
-
-       return av;
-}
-
-void dccp_ackvec_free(struct dccp_ackvec *av)
+/*
+ * Buffer index and length computation using modulo-buffersize arithmetic.
+ * Note that, as pointers move from right to left, head is `before' tail.
+ */
+static inline u16 __ackvec_idx_add(const u16 a, const u16 b)
 {
-       if (unlikely(av == NULL))
-               return;
-
-       if (!list_empty(&av->av_records)) {
-               struct dccp_ackvec_record *avr, *next;
-
-               list_for_each_entry_safe(avr, next, &av->av_records, avr_node) {
-                       list_del_init(&avr->avr_node);
-                       dccp_ackvec_record_delete(avr);
-               }
-       }
-
-       kmem_cache_free(dccp_ackvec_slab, av);
+       return (a + b) % DCCPAV_MAX_ACKVEC_LEN;
 }
 
-static inline u8 dccp_ackvec_state(const struct dccp_ackvec *av,
-                                  const u32 index)
+static inline u16 __ackvec_idx_sub(const u16 a, const u16 b)
 {
-       return av->av_buf[index] & DCCP_ACKVEC_STATE_MASK;
+       return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b);
 }
 
-static inline u8 dccp_ackvec_len(const struct dccp_ackvec *av,
-                                const u32 index)
+u16 dccp_ackvec_buflen(const struct dccp_ackvec *av)
 {
-       return av->av_buf[index] & DCCP_ACKVEC_LEN_MASK;
+       if (unlikely(av->av_overflow))
+               return DCCPAV_MAX_ACKVEC_LEN;
+       return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head);
 }
 
 /*
@@ -204,7 +125,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
        long gap;
        long new_head;
 
-       if (av->av_vec_len + packets > DCCP_MAX_ACKVEC_LEN)
+       if (av->av_vec_len + packets > DCCPAV_MAX_ACKVEC_LEN)
                return -ENOBUFS;
 
        gap      = packets - 1;
@@ -212,18 +133,18 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
 
        if (new_head < 0) {
                if (gap > 0) {
-                       memset(av->av_buf, DCCP_ACKVEC_STATE_NOT_RECEIVED,
+                       memset(av->av_buf, DCCPAV_NOT_RECEIVED,
                               gap + new_head + 1);
                        gap = -new_head;
                }
-               new_head += DCCP_MAX_ACKVEC_LEN;
+               new_head += DCCPAV_MAX_ACKVEC_LEN;
        }
 
        av->av_buf_head = new_head;
 
        if (gap > 0)
                memset(av->av_buf + av->av_buf_head + 1,
-                      DCCP_ACKVEC_STATE_NOT_RECEIVED, gap);
+                      DCCPAV_NOT_RECEIVED, gap);
 
        av->av_buf[av->av_buf_head] = state;
        av->av_vec_len += packets;
@@ -236,6 +157,8 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
 int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
                    const u64 ackno, const u8 state)
 {
+       u8 *cur_head = av->av_buf + av->av_buf_head,
+          *buf_end  = av->av_buf + DCCPAV_MAX_ACKVEC_LEN;
        /*
         * Check at the right places if the buffer is full, if it is, tell the
         * caller to start dropping packets till the HC-Sender acks our ACK
@@ -260,7 +183,7 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
 
        /* See if this is the first ackno being inserted */
        if (av->av_vec_len == 0) {
-               av->av_buf[av->av_buf_head] = state;
+               *cur_head = state;
                av->av_vec_len = 1;
        } else if (after48(ackno, av->av_buf_ackno)) {
                const u64 delta = dccp_delta_seqno(av->av_buf_ackno, ackno);
@@ -269,10 +192,9 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
                 * Look if the state of this packet is the same as the
                 * previous ackno and if so if we can bump the head len.
                 */
-               if (delta == 1 &&
-                   dccp_ackvec_state(av, av->av_buf_head) == state &&
-                   dccp_ackvec_len(av, av->av_buf_head) < DCCP_ACKVEC_LEN_MASK)
-                       av->av_buf[av->av_buf_head]++;
+               if (delta == 1 && dccp_ackvec_state(cur_head) == state &&
+                   dccp_ackvec_runlen(cur_head) < DCCPAV_MAX_RUNLEN)
+                       *cur_head += 1;
                else if (dccp_ackvec_set_buf_head_state(av, delta, state))
                        return -ENOBUFS;
        } else {
@@ -285,21 +207,17 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
                 *      could reduce the complexity of this scan.)
                 */
                u64 delta = dccp_delta_seqno(ackno, av->av_buf_ackno);
-               u32 index = av->av_buf_head;
 
                while (1) {
-                       const u8 len = dccp_ackvec_len(av, index);
-                       const u8 av_state = dccp_ackvec_state(av, index);
+                       const u8 len = dccp_ackvec_runlen(cur_head);
                        /*
                         * valid packets not yet in av_buf have a reserved
                         * entry, with a len equal to 0.
                         */
-                       if (av_state == DCCP_ACKVEC_STATE_NOT_RECEIVED &&
-                           len == 0 && delta == 0) { /* Found our
-                                                        reserved seat! */
+                       if (*cur_head == DCCPAV_NOT_RECEIVED && delta == 0) {
                                dccp_pr_debug("Found %llu reserved seat!\n",
                                              (unsigned long long)ackno);
-                               av->av_buf[index] = state;
+                               *cur_head = state;
                                goto out;
                        }
                        /* len == 0 means one packet */
@@ -307,13 +225,12 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
                                goto out_duplicate;
 
                        delta -= len + 1;
-                       if (++index == DCCP_MAX_ACKVEC_LEN)
-                               index = 0;
+                       if (++cur_head == buf_end)
+                               cur_head = av->av_buf;
                }
        }
 
        av->av_buf_ackno = ackno;
-       av->av_time = ktime_get_real();
 out:
        return 0;
 
@@ -333,13 +250,13 @@ static void dccp_ackvec_throw_record(struct dccp_ackvec *av,
        if (av->av_buf_head <= avr->avr_ack_ptr)
                av->av_vec_len = avr->avr_ack_ptr - av->av_buf_head;
        else
-               av->av_vec_len = DCCP_MAX_ACKVEC_LEN - 1 -
+               av->av_vec_len = DCCPAV_MAX_ACKVEC_LEN - 1 -
                                 av->av_buf_head + avr->avr_ack_ptr;
 
        /* free records */
        list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) {
-               list_del_init(&avr->avr_node);
-               dccp_ackvec_record_delete(avr);
+               list_del(&avr->avr_node);
+               kmem_cache_free(dccp_ackvec_record_slab, avr);
        }
 }
 
@@ -357,7 +274,7 @@ void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, struct sock *sk,
                if (ackno == avr->avr_ack_seqno) {
                        dccp_pr_debug("%s ACK packet 0, len=%d, ack_seqno=%llu, "
                                      "ack_ackno=%llu, ACKED!\n",
-                                     dccp_role(sk), 1,
+                                     dccp_role(sk), avr->avr_ack_runlen,
                                      (unsigned long long)avr->avr_ack_seqno,
                                      (unsigned long long)avr->avr_ack_ackno);
                        dccp_ackvec_throw_record(av, avr);
@@ -387,7 +304,7 @@ static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av,
         */
        avr = list_entry(av->av_records.next, struct dccp_ackvec_record, avr_node);
        while (i--) {
-               const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
+               const u8 rl = dccp_ackvec_runlen(vector);
                u64 ackno_end_rl;
 
                dccp_set_seqno(&ackno_end_rl, *ackno - rl);
@@ -404,8 +321,7 @@ static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av,
                break;
 found:
                if (between48(avr->avr_ack_seqno, ackno_end_rl, *ackno)) {
-                       const u8 state = *vector & DCCP_ACKVEC_STATE_MASK;
-                       if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED) {
+                       if (dccp_ackvec_state(vector) != DCCPAV_NOT_RECEIVED) {
                                dccp_pr_debug("%s ACK vector 0, len=%d, "
                                              "ack_seqno=%llu, ack_ackno=%llu, "
                                              "ACKED!\n",
@@ -448,10 +364,9 @@ int __init dccp_ackvec_init(void)
        if (dccp_ackvec_slab == NULL)
                goto out_err;
 
-       dccp_ackvec_record_slab =
-                       kmem_cache_create("dccp_ackvec_record",
-                                         sizeof(struct dccp_ackvec_record),
-                                         0, SLAB_HWCACHE_ALIGN, NULL);
+       dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
+                                            sizeof(struct dccp_ackvec_record),
+                                            0, SLAB_HWCACHE_ALIGN, NULL);
        if (dccp_ackvec_record_slab == NULL)
                goto out_destroy_slab;
 
index 7ea557b7c6b15aa15bf326622ce3ba400f389fa5..23880be8fc29711752e42e564364f97dd9bc729e 100644 (file)
@@ -3,9 +3,9 @@
 /*
  *  net/dccp/ackvec.h
  *
- *  An implementation of the DCCP protocol
+ *  An implementation of Ack Vectors for the DCCP protocol
+ *  Copyright (c) 2007 University of Aberdeen, Scotland, UK
  *  Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@mandriva.com>
- *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License version 2 as
  *     published by the Free Software Foundation.
 
 #include <linux/dccp.h>
 #include <linux/compiler.h>
-#include <linux/ktime.h>
 #include <linux/list.h>
 #include <linux/types.h>
 
-/* We can spread an ack vector across multiple options */
-#define DCCP_MAX_ACKVEC_LEN (DCCP_SINGLE_OPT_MAXLEN * 2)
+/*
+ * Ack Vector buffer space is static, in multiples of %DCCP_SINGLE_OPT_MAXLEN,
+ * the maximum size of a single Ack Vector. Setting %DCCPAV_NUM_ACKVECS to 1
+ * will be sufficient for most cases of low Ack Ratios, using a value of 2 gives
+ * more headroom if Ack Ratio is higher or when the sender acknowledges slowly.
+ * The maximum value is bounded by the u16 types for indices and functions.
+ */
+#define DCCPAV_NUM_ACKVECS     2
+#define DCCPAV_MAX_ACKVEC_LEN  (DCCP_SINGLE_OPT_MAXLEN * DCCPAV_NUM_ACKVECS)
 
 /* Estimated minimum average Ack Vector length - used for updating MPS */
 #define DCCPAV_MIN_OPTLEN      16
 
-#define DCCP_ACKVEC_STATE_RECEIVED     0
-#define DCCP_ACKVEC_STATE_ECN_MARKED   (1 << 6)
-#define DCCP_ACKVEC_STATE_NOT_RECEIVED (3 << 6)
+enum dccp_ackvec_states {
+       DCCPAV_RECEIVED =       0x00,
+       DCCPAV_ECN_MARKED =     0x40,
+       DCCPAV_RESERVED =       0x80,
+       DCCPAV_NOT_RECEIVED =   0xC0
+};
+#define DCCPAV_MAX_RUNLEN      0x3F
 
-#define DCCP_ACKVEC_STATE_MASK         0xC0 /* 11000000 */
-#define DCCP_ACKVEC_LEN_MASK           0x3F /* 00111111 */
+static inline u8 dccp_ackvec_runlen(const u8 *cell)
+{
+       return *cell & DCCPAV_MAX_RUNLEN;
+}
 
-/** struct dccp_ackvec - ack vector
- *
- * This data structure is the one defined in RFC 4340, Appendix A.
- *
- * @av_buf_head - circular buffer head
- * @av_buf_tail - circular buffer tail
- * @av_buf_ackno - ack # of the most recent packet acknowledgeable in the
- *                    buffer (i.e. %av_buf_head)
- * @av_buf_nonce - the one-bit sum of the ECN Nonces on all packets acked
- *                    by the buffer with State 0
- *
- * Additionally, the HC-Receiver must keep some information about the
- * Ack Vectors it has recently sent. For each packet sent carrying an
- * Ack Vector, it remembers four variables:
+static inline u8 dccp_ackvec_state(const u8 *cell)
+{
+       return *cell & ~DCCPAV_MAX_RUNLEN;
+}
+
+/** struct dccp_ackvec - Ack Vector main data structure
  *
- * @av_records - list of dccp_ackvec_record
- * @av_ack_nonce - the one-bit sum of the ECN Nonces for all State 0.
+ * This implements a fixed-size circular buffer within an array and is largely
+ * based on Appendix A of RFC 4340.
  *
- * @av_time - the time in usecs
- * @av_buf - circular buffer of acknowledgeable packets
+ * @av_buf:       circular buffer storage area
+ * @av_buf_head:   head index; begin of live portion in @av_buf
+ * @av_buf_tail:   tail index; first index _after_ the live portion in @av_buf
+ * @av_buf_ackno:  highest seqno of acknowledgeable packet recorded in @av_buf
+ * @av_tail_ackno: lowest  seqno of acknowledgeable packet recorded in @av_buf
+ * @av_buf_nonce:  ECN nonce sums, each covering subsequent segments of up to
+ *                %DCCP_SINGLE_OPT_MAXLEN cells in the live portion of @av_buf
+ * @av_overflow:   if 1 then buf_head == buf_tail indicates buffer wraparound
+ * @av_records:           list of %dccp_ackvec_record (Ack Vectors sent previously)
+ * @av_veclen:    length of the live portion of @av_buf
  */
 struct dccp_ackvec {
-       u64                     av_buf_ackno;
-       struct list_head        av_records;
-       ktime_t                 av_time;
+       u8                      av_buf[DCCPAV_MAX_ACKVEC_LEN];
        u16                     av_buf_head;
+       u16                     av_buf_tail;
+       u64                     av_buf_ackno:48;
+       u64                     av_tail_ackno:48;
+       bool                    av_buf_nonce[DCCPAV_NUM_ACKVECS];
+       u8                      av_overflow:1;
+       struct list_head        av_records;
        u16                     av_vec_len;
-       u8                      av_buf_nonce;
-       u8                      av_ack_nonce;
-       u8                      av_buf[DCCP_MAX_ACKVEC_LEN];
 };
 
-/** struct dccp_ackvec_record - ack vector record
+/** struct dccp_ackvec_record - Records information about sent Ack Vectors
  *
- * ACK vector record as defined in Appendix A of spec.
+ * These list entries define the additional information which the HC-Receiver
+ * keeps about recently-sent Ack Vectors; again refer to RFC 4340, Appendix A.
  *
- * The list is sorted by avr_ack_seqno
+ * @avr_node:      the list node in @av_records
+ * @avr_ack_seqno:  sequence number of the packet the Ack Vector was sent on
+ * @avr_ack_ackno:  the Ack number that this record/Ack Vector refers to
+ * @avr_ack_ptr:    pointer into @av_buf where this record starts
+ * @avr_ack_runlen: run length of @avr_ack_ptr at the time of sending
+ * @avr_ack_nonce:  the sum of @av_buf_nonce's at the time this record was sent
  *
- * @avr_node - node in av_records
- * @avr_ack_seqno - sequence number of the packet this record was sent on
- * @avr_ack_ackno - sequence number being acknowledged
- * @avr_ack_ptr - pointer into av_buf where this record starts
- * @avr_ack_nonce - av_ack_nonce at the time this record was sent
- * @avr_sent_len - lenght of the record in av_buf
+ * The list as a whole is sorted in descending order by @avr_ack_seqno.
  */
 struct dccp_ackvec_record {
        struct list_head avr_node;
-       u64              avr_ack_seqno;
-       u64              avr_ack_ackno;
+       u64              avr_ack_seqno:48;
+       u64              avr_ack_ackno:48;
        u16              avr_ack_ptr;
-       u16              avr_sent_len;
-       u8               avr_ack_nonce;
+       u8               avr_ack_runlen;
+       u8               avr_ack_nonce:1;
 };
 
 struct sock;
@@ -102,10 +116,11 @@ extern int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
                             u64 *ackno, const u8 opt,
                             const u8 *value, const u8 len);
 
-extern int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb);
+extern int  dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seq, u8 sum);
+extern u16  dccp_ackvec_buflen(const struct dccp_ackvec *av);
 
-static inline int dccp_ackvec_pending(const struct dccp_ackvec *av)
+static inline bool dccp_ackvec_is_empty(const struct dccp_ackvec *av)
 {
-       return av->av_vec_len;
+       return av->av_overflow == 0 && av->av_buf_head == av->av_buf_tail;
 }
 #endif /* _ACKVEC_H */
index 6576eae9e7792499f962592ce66cebf8c8d34fe7..cb1b4a0d18771036690cfd0180cc8856630bd17e 100644 (file)
@@ -513,8 +513,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
                                         &vector, &veclen)) != -1) {
                /* go through this ack vector */
                while (veclen--) {
-                       const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
-                       u64 ackno_end_rl = SUB48(ackno, rl);
+                       u64 ackno_end_rl = SUB48(ackno, dccp_ackvec_runlen(vector));
 
                        ccid2_pr_debug("ackvec start:%llu end:%llu\n",
                                       (unsigned long long)ackno,
@@ -537,17 +536,15 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
                         * run length
                         */
                        while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
-                               const u8 state = *vector &
-                                                DCCP_ACKVEC_STATE_MASK;
+                               const u8 state = dccp_ackvec_state(vector);
 
                                /* new packet received or marked */
-                               if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED &&
+                               if (state != DCCPAV_NOT_RECEIVED &&
                                    !seqp->ccid2s_acked) {
-                                       if (state ==
-                                           DCCP_ACKVEC_STATE_ECN_MARKED) {
+                                       if (state == DCCPAV_ECN_MARKED)
                                                ccid2_congestion_event(sk,
                                                                       seqp);
-                                       else
+                                       else
                                                ccid2_new_ack(sk, seqp,
                                                              &maxincr);
 
index a8ed459508b294feb774c1597e788fa38313e995..19fafd597465fac2ac7e40ff77b862a91884ed6b 100644 (file)
@@ -457,12 +457,15 @@ static inline void dccp_update_gss(struct sock *sk, u64 seq)
        dp->dccps_awh = dp->dccps_gss;
 }
 
+static inline int dccp_ackvec_pending(const struct sock *sk)
+{
+       return dccp_sk(sk)->dccps_hc_rx_ackvec != NULL &&
+              !dccp_ackvec_is_empty(dccp_sk(sk)->dccps_hc_rx_ackvec);
+}
+
 static inline int dccp_ack_pending(const struct sock *sk)
 {
-       const struct dccp_sock *dp = dccp_sk(sk);
-       return (dp->dccps_hc_rx_ackvec != NULL &&
-               dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) ||
-              inet_csk_ack_scheduled(sk);
+       return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
 }
 
 extern int  dccp_feat_finalise_settings(struct dccp_sock *dp);
index 265985370fa1de664b1b7f3de427cb043fc74665..c7aeeba859d4ea622e82f32ea28a494171100e5d 100644 (file)
@@ -378,8 +378,7 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
 
        if (dp->dccps_hc_rx_ackvec != NULL &&
            dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
-                           DCCP_SKB_CB(skb)->dccpd_seq,
-                           DCCP_ACKVEC_STATE_RECEIVED))
+                           DCCP_SKB_CB(skb)->dccpd_seq, DCCPAV_RECEIVED))
                goto discard;
        dccp_deliver_input_to_ccids(sk, skb);
 
@@ -637,8 +636,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 
                if (dp->dccps_hc_rx_ackvec != NULL &&
                    dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
-                                   DCCP_SKB_CB(skb)->dccpd_seq,
-                                   DCCP_ACKVEC_STATE_RECEIVED))
+                                   DCCP_SKB_CB(skb)->dccpd_seq, DCCPAV_RECEIVED))
                        goto discard;
 
                dccp_deliver_input_to_ccids(sk, skb);
index cd306181300940f924b682c9f77e5881323ce6a0..5adeeed5e0d2ea089699738ad6a03b47e8420544 100644 (file)
@@ -340,6 +340,7 @@ static inline int dccp_elapsed_time_len(const u32 elapsed_time)
        return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4;
 }
 
+/* FIXME: This function is currently not used anywhere */
 int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed_time)
 {
        const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
@@ -424,6 +425,67 @@ static int dccp_insert_option_timestamp_echo(struct dccp_sock *dp,
        return 0;
 }
 
+static int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
+{
+       struct dccp_sock *dp = dccp_sk(sk);
+       struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
+       const u16 buflen = dccp_ackvec_buflen(av);
+       /* Figure out how many options do we need to represent the ackvec */
+       const u8 nr_opts = DIV_ROUND_UP(buflen, DCCP_SINGLE_OPT_MAXLEN);
+       u16 len = buflen + 2 * nr_opts;
+       u8 i, nonce = 0;
+       const unsigned char *tail, *from;
+       unsigned char *to;
+
+       if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
+               return -1;
+
+       DCCP_SKB_CB(skb)->dccpd_opt_len += len;
+
+       to   = skb_push(skb, len);
+       len  = buflen;
+       from = av->av_buf + av->av_buf_head;
+       tail = av->av_buf + DCCPAV_MAX_ACKVEC_LEN;
+
+       for (i = 0; i < nr_opts; ++i) {
+               int copylen = len;
+
+               if (len > DCCP_SINGLE_OPT_MAXLEN)
+                       copylen = DCCP_SINGLE_OPT_MAXLEN;
+
+               /*
+                * RFC 4340, 12.2: Encode the Nonce Echo for this Ack Vector via
+                * its type; ack_nonce is the sum of all individual buf_nonce's.
+                */
+               nonce ^= av->av_buf_nonce[i];
+
+               *to++ = DCCPO_ACK_VECTOR_0 + av->av_buf_nonce[i];
+               *to++ = copylen + 2;
+
+               /* Check if buf_head wraps */
+               if (from + copylen > tail) {
+                       const u16 tailsize = tail - from;
+
+                       memcpy(to, from, tailsize);
+                       to      += tailsize;
+                       len     -= tailsize;
+                       copylen -= tailsize;
+                       from    = av->av_buf;
+               }
+
+               memcpy(to, from, copylen);
+               from += copylen;
+               to   += copylen;
+               len  -= copylen;
+       }
+       /*
+        * Each sent Ack Vector is recorded in the list, as per A.2 of RFC 4340.
+        */
+       if (dccp_ackvec_update_records(av, DCCP_SKB_CB(skb)->dccpd_seq, nonce))
+               return -ENOBUFS;
+       return 0;
+}
+
 /**
  * dccp_insert_option_mandatory  -  Mandatory option (5.8.2)
  * Note that since we are using skb_push, this function needs to be called
@@ -519,8 +581,7 @@ int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
                        if (dccp_insert_option_timestamp(skb))
                                return -1;
 
-               } else if (dp->dccps_hc_rx_ackvec != NULL &&
-                          dccp_ackvec_pending(dp->dccps_hc_rx_ackvec) &&
+               } else if (dccp_ackvec_pending(sk) &&
                           dccp_insert_option_ackvec(sk, skb)) {
                                return -1;
                }
index a76b78de679fa7e928cfae7b62c0a7e73c2256dd..9ecef9968c3940026deefba772fa568af7237bcc 100644 (file)
@@ -1848,7 +1848,7 @@ unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
 {
        unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
        if (dev) {
-               struct dn_dev *dn_db = dev->dn_ptr;
+               struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
                mtu -= LL_RESERVED_SPACE(dev);
                if (dn_db->use_long)
                        mtu -= 21;
index 4c409b46aa35c0bb13bc0b99cec4f43dd2da0690..0ba15633c4184484fb46e99c50fdb0608a325046 100644 (file)
@@ -267,7 +267,7 @@ static int dn_forwarding_proc(ctl_table *table, int write,
        if (table->extra1 == NULL)
                return -EINVAL;
 
-       dn_db = dev->dn_ptr;
+       dn_db = rcu_dereference_raw(dev->dn_ptr);
        old = dn_db->parms.forwarding;
 
        err = proc_dointvec(table, write, buffer, lenp, ppos);
@@ -332,14 +332,19 @@ static struct dn_ifaddr *dn_dev_alloc_ifa(void)
        return ifa;
 }
 
-static __inline__ void dn_dev_free_ifa(struct dn_ifaddr *ifa)
+static void dn_dev_free_ifa_rcu(struct rcu_head *head)
 {
-       kfree(ifa);
+       kfree(container_of(head, struct dn_ifaddr, rcu));
 }
 
-static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int destroy)
+static void dn_dev_free_ifa(struct dn_ifaddr *ifa)
 {
-       struct dn_ifaddr *ifa1 = *ifap;
+       call_rcu(&ifa->rcu, dn_dev_free_ifa_rcu);
+}
+
+static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy)
+{
+       struct dn_ifaddr *ifa1 = rtnl_dereference(*ifap);
        unsigned char mac_addr[6];
        struct net_device *dev = dn_db->dev;
 
@@ -373,7 +378,9 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
        ASSERT_RTNL();
 
        /* Check for duplicates */
-       for(ifa1 = dn_db->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
+       for (ifa1 = rtnl_dereference(dn_db->ifa_list);
+            ifa1 != NULL;
+            ifa1 = rtnl_dereference(ifa1->ifa_next)) {
                if (ifa1->ifa_local == ifa->ifa_local)
                        return -EEXIST;
        }
@@ -386,7 +393,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
        }
 
        ifa->ifa_next = dn_db->ifa_list;
-       dn_db->ifa_list = ifa;
+       rcu_assign_pointer(dn_db->ifa_list, ifa);
 
        dn_ifaddr_notify(RTM_NEWADDR, ifa);
        blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
@@ -396,7 +403,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
 
 static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa)
 {
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
        int rv;
 
        if (dn_db == NULL) {
@@ -425,7 +432,8 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
        struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr;
        struct dn_dev *dn_db;
        struct net_device *dev;
-       struct dn_ifaddr *ifa = NULL, **ifap = NULL;
+       struct dn_ifaddr *ifa = NULL;
+       struct dn_ifaddr __rcu **ifap = NULL;
        int ret = 0;
 
        if (copy_from_user(ifr, arg, DN_IFREQ_SIZE))
@@ -454,8 +462,10 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
                goto done;
        }
 
-       if ((dn_db = dev->dn_ptr) != NULL) {
-               for (ifap = &dn_db->ifa_list; (ifa=*ifap) != NULL; ifap = &ifa->ifa_next)
+       if ((dn_db = rtnl_dereference(dev->dn_ptr)) != NULL) {
+               for (ifap = &dn_db->ifa_list;
+                    (ifa = rtnl_dereference(*ifap)) != NULL;
+                    ifap = &ifa->ifa_next)
                        if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0)
                                break;
        }
@@ -558,7 +568,7 @@ static struct dn_dev *dn_dev_by_index(int ifindex)
 
        dev = __dev_get_by_index(&init_net, ifindex);
        if (dev)
-               dn_dev = dev->dn_ptr;
+               dn_dev = rtnl_dereference(dev->dn_ptr);
 
        return dn_dev;
 }
@@ -576,7 +586,8 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
        struct nlattr *tb[IFA_MAX+1];
        struct dn_dev *dn_db;
        struct ifaddrmsg *ifm;
-       struct dn_ifaddr *ifa, **ifap;
+       struct dn_ifaddr *ifa;
+       struct dn_ifaddr __rcu **ifap;
        int err = -EINVAL;
 
        if (!net_eq(net, &init_net))
@@ -592,7 +603,9 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                goto errout;
 
        err = -EADDRNOTAVAIL;
-       for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) {
+       for (ifap = &dn_db->ifa_list;
+            (ifa = rtnl_dereference(*ifap)) != NULL;
+            ifap = &ifa->ifa_next) {
                if (tb[IFA_LOCAL] &&
                    nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
                        continue;
@@ -632,7 +645,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
        if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL)
                return -ENODEV;
 
-       if ((dn_db = dev->dn_ptr) == NULL) {
+       if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) {
                dn_db = dn_dev_create(dev, &err);
                if (!dn_db)
                        return err;
@@ -748,11 +761,11 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
                        skip_naddr = 0;
                }
 
-               if ((dn_db = dev->dn_ptr) == NULL)
+               if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL)
                        goto cont;
 
-               for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
-                    ifa = ifa->ifa_next, dn_idx++) {
+               for (ifa = rtnl_dereference(dn_db->ifa_list), dn_idx = 0; ifa;
+                    ifa = rtnl_dereference(ifa->ifa_next), dn_idx++) {
                        if (dn_idx < skip_naddr)
                                continue;
 
@@ -773,21 +786,22 @@ done:
 
 static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
 {
-       struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
+       struct dn_dev *dn_db;
        struct dn_ifaddr *ifa;
        int rv = -ENODEV;
 
+       rcu_read_lock();
+       dn_db = rcu_dereference(dev->dn_ptr);
        if (dn_db == NULL)
                goto out;
 
-       rtnl_lock();
-       ifa = dn_db->ifa_list;
+       ifa = rcu_dereference(dn_db->ifa_list);
        if (ifa != NULL) {
                *addr = ifa->ifa_local;
                rv = 0;
        }
-       rtnl_unlock();
 out:
+       rcu_read_unlock();
        return rv;
 }
 
@@ -823,7 +837,7 @@ static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
        struct endnode_hello_message *msg;
        struct sk_buff *skb = NULL;
        __le16 *pktlen;
-       struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
+       struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
        if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
                return;
@@ -889,7 +903,7 @@ static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn
 static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
 {
        int n;
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
        struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
        struct sk_buff *skb;
        size_t size;
@@ -960,7 +974,7 @@ static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
 
 static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa)
 {
-       struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
+       struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
        if (dn_db->parms.forwarding == 0)
                dn_send_endnode_hello(dev, ifa);
@@ -998,7 +1012,7 @@ static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa)
 
 static int dn_eth_up(struct net_device *dev)
 {
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
        if (dn_db->parms.forwarding == 0)
                dev_mc_add(dev, dn_rt_all_end_mcast);
@@ -1012,7 +1026,7 @@ static int dn_eth_up(struct net_device *dev)
 
 static void dn_eth_down(struct net_device *dev)
 {
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
        if (dn_db->parms.forwarding == 0)
                dev_mc_del(dev, dn_rt_all_end_mcast);
@@ -1025,12 +1039,16 @@ static void dn_dev_set_timer(struct net_device *dev);
 static void dn_dev_timer_func(unsigned long arg)
 {
        struct net_device *dev = (struct net_device *)arg;
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db;
        struct dn_ifaddr *ifa;
 
+       rcu_read_lock();
+       dn_db = rcu_dereference(dev->dn_ptr);
        if (dn_db->t3 <= dn_db->parms.t2) {
                if (dn_db->parms.timer3) {
-                       for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) {
+                       for (ifa = rcu_dereference(dn_db->ifa_list);
+                            ifa;
+                            ifa = rcu_dereference(ifa->ifa_next)) {
                                if (!(ifa->ifa_flags & IFA_F_SECONDARY))
                                        dn_db->parms.timer3(dev, ifa);
                        }
@@ -1039,13 +1057,13 @@ static void dn_dev_timer_func(unsigned long arg)
        } else {
                dn_db->t3 -= dn_db->parms.t2;
        }
-
+       rcu_read_unlock();
        dn_dev_set_timer(dev);
 }
 
 static void dn_dev_set_timer(struct net_device *dev)
 {
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
        if (dn_db->parms.t2 > dn_db->parms.t3)
                dn_db->parms.t2 = dn_db->parms.t3;
@@ -1077,8 +1095,8 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
                return NULL;
 
        memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
-       smp_wmb();
-       dev->dn_ptr = dn_db;
+
+       rcu_assign_pointer(dev->dn_ptr, dn_db);
        dn_db->dev = dev;
        init_timer(&dn_db->timer);
 
@@ -1086,7 +1104,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
 
        dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
        if (!dn_db->neigh_parms) {
-               dev->dn_ptr = NULL;
+               rcu_assign_pointer(dev->dn_ptr, NULL);
                kfree(dn_db);
                return NULL;
        }
@@ -1125,7 +1143,7 @@ void dn_dev_up(struct net_device *dev)
        struct dn_ifaddr *ifa;
        __le16 addr = decnet_address;
        int maybe_default = 0;
-       struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
+       struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
 
        if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
                return;
@@ -1176,7 +1194,7 @@ void dn_dev_up(struct net_device *dev)
 
 static void dn_dev_delete(struct net_device *dev)
 {
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
 
        if (dn_db == NULL)
                return;
@@ -1204,13 +1222,13 @@ static void dn_dev_delete(struct net_device *dev)
 
 void dn_dev_down(struct net_device *dev)
 {
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
        struct dn_ifaddr *ifa;
 
        if (dn_db == NULL)
                return;
 
-       while((ifa = dn_db->ifa_list) != NULL) {
+       while ((ifa = rtnl_dereference(dn_db->ifa_list)) != NULL) {
                dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0);
                dn_dev_free_ifa(ifa);
        }
@@ -1270,7 +1288,7 @@ static inline int is_dn_dev(struct net_device *dev)
 }
 
 static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(rcu)
+       __acquires(RCU)
 {
        int i;
        struct net_device *dev;
@@ -1313,7 +1331,7 @@ static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 }
 
 static void dn_dev_seq_stop(struct seq_file *seq, void *v)
-       __releases(rcu)
+       __releases(RCU)
 {
        rcu_read_unlock();
 }
@@ -1340,7 +1358,7 @@ static int dn_dev_seq_show(struct seq_file *seq, void *v)
                struct net_device *dev = v;
                char peer_buf[DN_ASCBUF_LEN];
                char router_buf[DN_ASCBUF_LEN];
-               struct dn_dev *dn_db = dev->dn_ptr;
+               struct dn_dev *dn_db = rcu_dereference(dev->dn_ptr);
 
                seq_printf(seq, "%-8s %1s     %04u %04u   %04lu %04lu"
                                "   %04hu    %03d %02x    %-10s %-7s %-7s\n",
index 4ab96c15166d4930dd49772e41734f3eca3968f5..0ef0a81bcd72b24afaedebd506c255de3452dad0 100644 (file)
@@ -610,10 +610,12 @@ static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa)
        /* Scan device list */
        rcu_read_lock();
        for_each_netdev_rcu(&init_net, dev) {
-               dn_db = dev->dn_ptr;
+               dn_db = rcu_dereference(dev->dn_ptr);
                if (dn_db == NULL)
                        continue;
-               for(ifa2 = dn_db->ifa_list; ifa2; ifa2 = ifa2->ifa_next) {
+               for (ifa2 = rcu_dereference(dn_db->ifa_list);
+                    ifa2 != NULL;
+                    ifa2 = rcu_dereference(ifa2->ifa_next)) {
                        if (ifa2->ifa_local == ifa->ifa_local) {
                                found_it = 1;
                                break;
index a085dbcf5c7fa4fde69419dd135c8c8570bb4fb2..602dade7e9a3576905ae6f1d1dc927df7c8f8b63 100644 (file)
@@ -391,7 +391,7 @@ int dn_neigh_router_hello(struct sk_buff *skb)
                write_lock(&neigh->lock);
 
                neigh->used = jiffies;
-               dn_db = (struct dn_dev *)neigh->dev->dn_ptr;
+               dn_db = rcu_dereference(neigh->dev->dn_ptr);
 
                if (!(neigh->nud_state & NUD_PERMANENT)) {
                        neigh->updated = jiffies;
index df0f3e54ff8aba58dac157ab2b2c866453437310..474d54dd08c26f4ba612c7526632acf33f05d411 100644 (file)
@@ -93,7 +93,7 @@
 
 struct dn_rt_hash_bucket
 {
-       struct dn_route *chain;
+       struct dn_route __rcu *chain;
        spinlock_t lock;
 };
 
@@ -157,15 +157,17 @@ static inline void dnrt_drop(struct dn_route *rt)
 static void dn_dst_check_expire(unsigned long dummy)
 {
        int i;
-       struct dn_route *rt, **rtp;
+       struct dn_route *rt;
+       struct dn_route __rcu **rtp;
        unsigned long now = jiffies;
        unsigned long expire = 120 * HZ;
 
-       for(i = 0; i <= dn_rt_hash_mask; i++) {
+       for (i = 0; i <= dn_rt_hash_mask; i++) {
                rtp = &dn_rt_hash_table[i].chain;
 
                spin_lock(&dn_rt_hash_table[i].lock);
-               while((rt=*rtp) != NULL) {
+               while ((rt = rcu_dereference_protected(*rtp,
+                                               lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
                        if (atomic_read(&rt->dst.__refcnt) ||
                                        (now - rt->dst.lastuse) < expire) {
                                rtp = &rt->dst.dn_next;
@@ -186,17 +188,19 @@ static void dn_dst_check_expire(unsigned long dummy)
 
 static int dn_dst_gc(struct dst_ops *ops)
 {
-       struct dn_route *rt, **rtp;
+       struct dn_route *rt;
+       struct dn_route __rcu **rtp;
        int i;
        unsigned long now = jiffies;
        unsigned long expire = 10 * HZ;
 
-       for(i = 0; i <= dn_rt_hash_mask; i++) {
+       for (i = 0; i <= dn_rt_hash_mask; i++) {
 
                spin_lock_bh(&dn_rt_hash_table[i].lock);
                rtp = &dn_rt_hash_table[i].chain;
 
-               while((rt=*rtp) != NULL) {
+               while ((rt = rcu_dereference_protected(*rtp,
+                                               lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
                        if (atomic_read(&rt->dst.__refcnt) ||
                                        (now - rt->dst.lastuse) < expire) {
                                rtp = &rt->dst.dn_next;
@@ -227,7 +231,7 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
        u32 min_mtu = 230;
        struct dn_dev *dn = dst->neighbour ?
-                           (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL;
+                           rcu_dereference_raw(dst->neighbour->dev->dn_ptr) : NULL;
 
        if (dn && dn->use_long == 0)
                min_mtu -= 6;
@@ -277,13 +281,15 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
 
 static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
 {
-       struct dn_route *rth, **rthp;
+       struct dn_route *rth;
+       struct dn_route __rcu **rthp;
        unsigned long now = jiffies;
 
        rthp = &dn_rt_hash_table[hash].chain;
 
        spin_lock_bh(&dn_rt_hash_table[hash].lock);
-       while((rth = *rthp) != NULL) {
+       while ((rth = rcu_dereference_protected(*rthp,
+                                               lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
                if (compare_keys(&rth->fl, &rt->fl)) {
                        /* Put it first */
                        *rthp = rth->dst.dn_next;
@@ -315,15 +321,15 @@ static void dn_run_flush(unsigned long dummy)
        int i;
        struct dn_route *rt, *next;
 
-       for(i = 0; i < dn_rt_hash_mask; i++) {
+       for (i = 0; i < dn_rt_hash_mask; i++) {
                spin_lock_bh(&dn_rt_hash_table[i].lock);
 
-               if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL)
+               if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL)
                        goto nothing_to_declare;
 
-               for(; rt; rt=next) {
-                       next = rt->dst.dn_next;
-                       rt->dst.dn_next = NULL;
+               for(; rt; rt = next) {
+                       next = rcu_dereference_raw(rt->dst.dn_next);
+                       RCU_INIT_POINTER(rt->dst.dn_next, NULL);
                        dst_free((struct dst_entry *)rt);
                }
 
@@ -458,15 +464,16 @@ static int dn_return_long(struct sk_buff *skb)
  */
 static int dn_route_rx_packet(struct sk_buff *skb)
 {
-       struct dn_skb_cb *cb = DN_SKB_CB(skb);
+       struct dn_skb_cb *cb;
        int err;
 
        if ((err = dn_route_input(skb)) == 0)
                return dst_input(skb);
 
+       cb = DN_SKB_CB(skb);
        if (decnet_debug_level & 4) {
                char *devname = skb->dev ? skb->dev->name : "???";
-               struct dn_skb_cb *cb = DN_SKB_CB(skb);
+
                printk(KERN_DEBUG
                        "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
                        (int)cb->rt_flags, devname, skb->len,
@@ -573,7 +580,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
        struct dn_skb_cb *cb;
        unsigned char flags = 0;
        __u16 len = le16_to_cpu(*(__le16 *)skb->data);
-       struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr;
+       struct dn_dev *dn = rcu_dereference(dev->dn_ptr);
        unsigned char padlen = 0;
 
        if (!net_eq(dev_net(dev), &init_net))
@@ -728,7 +735,7 @@ static int dn_forward(struct sk_buff *skb)
 {
        struct dn_skb_cb *cb = DN_SKB_CB(skb);
        struct dst_entry *dst = skb_dst(skb);
-       struct dn_dev *dn_db = dst->dev->dn_ptr;
+       struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
        struct dn_route *rt;
        struct neighbour *neigh = dst->neighbour;
        int header_len;
@@ -835,13 +842,16 @@ static inline int dn_match_addr(__le16 addr1, __le16 addr2)
 static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope)
 {
        __le16 saddr = 0;
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db;
        struct dn_ifaddr *ifa;
        int best_match = 0;
        int ret;
 
-       read_lock(&dev_base_lock);
-       for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) {
+       rcu_read_lock();
+       dn_db = rcu_dereference(dev->dn_ptr);
+       for (ifa = rcu_dereference(dn_db->ifa_list);
+            ifa != NULL;
+            ifa = rcu_dereference(ifa->ifa_next)) {
                if (ifa->ifa_scope > scope)
                        continue;
                if (!daddr) {
@@ -854,7 +864,7 @@ static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int
                if (best_match == 0)
                        saddr = ifa->ifa_local;
        }
-       read_unlock(&dev_base_lock);
+       rcu_read_unlock();
 
        return saddr;
 }
@@ -1020,7 +1030,7 @@ source_ok:
                err = -ENODEV;
                if (dev_out == NULL)
                        goto out;
-               dn_db = dev_out->dn_ptr;
+               dn_db = rcu_dereference_raw(dev_out->dn_ptr);
                /* Possible improvement - check all devices for local addr */
                if (dn_dev_islocal(dev_out, fl.fld_dst)) {
                        dev_put(dev_out);
@@ -1171,7 +1181,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
                        if ((flp->fld_dst == rt->fl.fld_dst) &&
                            (flp->fld_src == rt->fl.fld_src) &&
                            (flp->mark == rt->fl.mark) &&
-                           (rt->fl.iif == 0) &&
+                           dn_is_output_route(rt) &&
                            (rt->fl.oif == flp->oif)) {
                                dst_use(&rt->dst, jiffies);
                                rcu_read_unlock_bh();
@@ -1233,7 +1243,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
 
        dev_hold(in_dev);
 
-       if ((dn_db = in_dev->dn_ptr) == NULL)
+       if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL)
                goto out;
 
        /* Zero source addresses are not allowed */
@@ -1502,7 +1512,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
        if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires,
                               rt->dst.error) < 0)
                goto rtattr_failure;
-       if (rt->fl.iif)
+       if (dn_is_input_route(rt))
                RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
 
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
@@ -1677,15 +1687,15 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
 {
        struct dn_rt_cache_iter_state *s = seq->private;
 
-       rt = rt->dst.dn_next;
-       while(!rt) {
+       rt = rcu_dereference_bh(rt->dst.dn_next);
+       while (!rt) {
                rcu_read_unlock_bh();
                if (--s->bucket < 0)
                        break;
                rcu_read_lock_bh();
-               rt = dn_rt_hash_table[s->bucket].chain;
+               rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
        }
-       return rcu_dereference_bh(rt);
+       return rt;
 }
 
 static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
index 96bc7f9475a3f2f0124685cc1b7afb7b58c23d8d..c6e2affafbd37df034e2eabbb71e18244b4f2ef2 100644 (file)
@@ -506,8 +506,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
                struct net_device *dev = NULL;
 
                rcu_read_lock();
-               if (rt->fl.iif &&
-                       net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
+               if (rt_is_input_route(rt) &&
+                   net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
                        dev = dev_get_by_index_rcu(net, rt->fl.iif);
 
                if (dev)
index 3c53c2d89e3b47b3e42629bfe37086aeeeda7aa8..0f0e0f0279b8f3a7b6b565f78137e87495f1e07e 100644 (file)
@@ -149,11 +149,17 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc);
 static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
                         int sfcount, __be32 *psfsrc, int delta);
 
+
+static void ip_mc_list_reclaim(struct rcu_head *head)
+{
+       kfree(container_of(head, struct ip_mc_list, rcu));
+}
+
 static void ip_ma_put(struct ip_mc_list *im)
 {
        if (atomic_dec_and_test(&im->refcnt)) {
                in_dev_put(im->interface);
-               kfree(im);
+               call_rcu(&im->rcu, ip_mc_list_reclaim);
        }
 }
 
@@ -163,7 +169,7 @@ static void ip_ma_put(struct ip_mc_list *im)
  *     Timer management
  */
 
-static __inline__ void igmp_stop_timer(struct ip_mc_list *im)
+static void igmp_stop_timer(struct ip_mc_list *im)
 {
        spin_lock_bh(&im->lock);
        if (del_timer(&im->timer))
@@ -496,14 +502,24 @@ empty_source:
        return skb;
 }
 
+#define for_each_pmc_rcu(in_dev, pmc)                          \
+       for (pmc = rcu_dereference(in_dev->mc_list);            \
+            pmc != NULL;                                       \
+            pmc = rcu_dereference(pmc->next_rcu))
+
+#define for_each_pmc_rtnl(in_dev, pmc)                         \
+       for (pmc = rtnl_dereference(in_dev->mc_list);           \
+            pmc != NULL;                                       \
+            pmc = rtnl_dereference(pmc->next_rcu))
+
 static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
 {
        struct sk_buff *skb = NULL;
        int type;
 
        if (!pmc) {
-               read_lock(&in_dev->mc_list_lock);
-               for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+               rcu_read_lock();
+               for_each_pmc_rcu(in_dev, pmc) {
                        if (pmc->multiaddr == IGMP_ALL_HOSTS)
                                continue;
                        spin_lock_bh(&pmc->lock);
@@ -514,7 +530,7 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
                        skb = add_grec(skb, pmc, type, 0, 0);
                        spin_unlock_bh(&pmc->lock);
                }
-               read_unlock(&in_dev->mc_list_lock);
+               rcu_read_unlock();
        } else {
                spin_lock_bh(&pmc->lock);
                if (pmc->sfcount[MCAST_EXCLUDE])
@@ -556,7 +572,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
        struct sk_buff *skb = NULL;
        int type, dtype;
 
-       read_lock(&in_dev->mc_list_lock);
+       rcu_read_lock();
        spin_lock_bh(&in_dev->mc_tomb_lock);
 
        /* deleted MCA's */
@@ -593,7 +609,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
        spin_unlock_bh(&in_dev->mc_tomb_lock);
 
        /* change recs */
-       for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+       for_each_pmc_rcu(in_dev, pmc) {
                spin_lock_bh(&pmc->lock);
                if (pmc->sfcount[MCAST_EXCLUDE]) {
                        type = IGMPV3_BLOCK_OLD_SOURCES;
@@ -616,7 +632,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
                }
                spin_unlock_bh(&pmc->lock);
        }
-       read_unlock(&in_dev->mc_list_lock);
+       rcu_read_unlock();
 
        if (!skb)
                return;
@@ -813,14 +829,14 @@ static void igmp_heard_report(struct in_device *in_dev, __be32 group)
        if (group == IGMP_ALL_HOSTS)
                return;
 
-       read_lock(&in_dev->mc_list_lock);
-       for (im=in_dev->mc_list; im!=NULL; im=im->next) {
+       rcu_read_lock();
+       for_each_pmc_rcu(in_dev, im) {
                if (im->multiaddr == group) {
                        igmp_stop_timer(im);
                        break;
                }
        }
-       read_unlock(&in_dev->mc_list_lock);
+       rcu_read_unlock();
 }
 
 static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
@@ -906,8 +922,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
         * - Use the igmp->igmp_code field as the maximum
         *   delay possible
         */
-       read_lock(&in_dev->mc_list_lock);
-       for (im=in_dev->mc_list; im!=NULL; im=im->next) {
+       rcu_read_lock();
+       for_each_pmc_rcu(in_dev, im) {
                int changed;
 
                if (group && group != im->multiaddr)
@@ -925,7 +941,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                if (changed)
                        igmp_mod_timer(im, max_delay);
        }
-       read_unlock(&in_dev->mc_list_lock);
+       rcu_read_unlock();
 }
 
 /* called in rcu_read_lock() section */
@@ -961,7 +977,7 @@ int igmp_rcv(struct sk_buff *skb)
        case IGMP_HOST_MEMBERSHIP_REPORT:
        case IGMPV2_HOST_MEMBERSHIP_REPORT:
                /* Is it our report looped back? */
-               if (skb_rtable(skb)->fl.iif == 0)
+               if (rt_is_output_route(skb_rtable(skb)))
                        break;
                /* don't rely on MC router hearing unicast reports */
                if (skb->pkt_type == PACKET_MULTICAST ||
@@ -1110,8 +1126,8 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
                kfree(pmc);
        }
        /* clear dead sources, too */
-       read_lock(&in_dev->mc_list_lock);
-       for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+       rcu_read_lock();
+       for_each_pmc_rcu(in_dev, pmc) {
                struct ip_sf_list *psf, *psf_next;
 
                spin_lock_bh(&pmc->lock);
@@ -1123,7 +1139,7 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
                        kfree(psf);
                }
        }
-       read_unlock(&in_dev->mc_list_lock);
+       rcu_read_unlock();
 }
 #endif
 
@@ -1209,7 +1225,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
 
        ASSERT_RTNL();
 
-       for (im=in_dev->mc_list; im; im=im->next) {
+       for_each_pmc_rtnl(in_dev, im) {
                if (im->multiaddr == addr) {
                        im->users++;
                        ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
@@ -1217,7 +1233,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
                }
        }
 
-       im = kmalloc(sizeof(*im), GFP_KERNEL);
+       im = kzalloc(sizeof(*im), GFP_KERNEL);
        if (!im)
                goto out;
 
@@ -1227,26 +1243,18 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
        im->multiaddr = addr;
        /* initial mode is (EX, empty) */
        im->sfmode = MCAST_EXCLUDE;
-       im->sfcount[MCAST_INCLUDE] = 0;
        im->sfcount[MCAST_EXCLUDE] = 1;
-       im->sources = NULL;
-       im->tomb = NULL;
-       im->crcount = 0;
        atomic_set(&im->refcnt, 1);
        spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
-       im->tm_running = 0;
        setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
        im->unsolicit_count = IGMP_Unsolicited_Report_Count;
-       im->reporter = 0;
-       im->gsquery = 0;
 #endif
-       im->loaded = 0;
-       write_lock_bh(&in_dev->mc_list_lock);
-       im->next = in_dev->mc_list;
-       in_dev->mc_list = im;
+
+       im->next_rcu = in_dev->mc_list;
        in_dev->mc_count++;
-       write_unlock_bh(&in_dev->mc_list_lock);
+       rcu_assign_pointer(in_dev->mc_list, im);
+
 #ifdef CONFIG_IP_MULTICAST
        igmpv3_del_delrec(in_dev, im->multiaddr);
 #endif
@@ -1287,17 +1295,18 @@ EXPORT_SYMBOL(ip_mc_rejoin_group);
 
 void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
 {
-       struct ip_mc_list *i, **ip;
+       struct ip_mc_list *i;
+       struct ip_mc_list __rcu **ip;
 
        ASSERT_RTNL();
 
-       for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
+       for (ip = &in_dev->mc_list;
+            (i = rtnl_dereference(*ip)) != NULL;
+            ip = &i->next_rcu) {
                if (i->multiaddr == addr) {
                        if (--i->users == 0) {
-                               write_lock_bh(&in_dev->mc_list_lock);
-                               *ip = i->next;
+                               *ip = i->next_rcu;
                                in_dev->mc_count--;
-                               write_unlock_bh(&in_dev->mc_list_lock);
                                igmp_group_dropped(i);
 
                                if (!in_dev->dead)
@@ -1316,34 +1325,34 @@ EXPORT_SYMBOL(ip_mc_dec_group);
 
 void ip_mc_unmap(struct in_device *in_dev)
 {
-       struct ip_mc_list *i;
+       struct ip_mc_list *pmc;
 
        ASSERT_RTNL();
 
-       for (i = in_dev->mc_list; i; i = i->next)
-               igmp_group_dropped(i);
+       for_each_pmc_rtnl(in_dev, pmc)
+               igmp_group_dropped(pmc);
 }
 
 void ip_mc_remap(struct in_device *in_dev)
 {
-       struct ip_mc_list *i;
+       struct ip_mc_list *pmc;
 
        ASSERT_RTNL();
 
-       for (i = in_dev->mc_list; i; i = i->next)
-               igmp_group_added(i);
+       for_each_pmc_rtnl(in_dev, pmc)
+               igmp_group_added(pmc);
 }
 
 /* Device going down */
 
 void ip_mc_down(struct in_device *in_dev)
 {
-       struct ip_mc_list *i;
+       struct ip_mc_list *pmc;
 
        ASSERT_RTNL();
 
-       for (i=in_dev->mc_list; i; i=i->next)
-               igmp_group_dropped(i);
+       for_each_pmc_rtnl(in_dev, pmc)
+               igmp_group_dropped(pmc);
 
 #ifdef CONFIG_IP_MULTICAST
        in_dev->mr_ifc_count = 0;
@@ -1374,7 +1383,6 @@ void ip_mc_init_dev(struct in_device *in_dev)
        in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
 #endif
 
-       rwlock_init(&in_dev->mc_list_lock);
        spin_lock_init(&in_dev->mc_tomb_lock);
 }
 
@@ -1382,14 +1390,14 @@ void ip_mc_init_dev(struct in_device *in_dev)
 
 void ip_mc_up(struct in_device *in_dev)
 {
-       struct ip_mc_list *i;
+       struct ip_mc_list *pmc;
 
        ASSERT_RTNL();
 
        ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
 
-       for (i=in_dev->mc_list; i; i=i->next)
-               igmp_group_added(i);
+       for_each_pmc_rtnl(in_dev, pmc)
+               igmp_group_added(pmc);
 }
 
 /*
@@ -1405,17 +1413,13 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
        /* Deactivate timers */
        ip_mc_down(in_dev);
 
-       write_lock_bh(&in_dev->mc_list_lock);
-       while ((i = in_dev->mc_list) != NULL) {
-               in_dev->mc_list = i->next;
+       while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
+               in_dev->mc_list = i->next_rcu;
                in_dev->mc_count--;
-               write_unlock_bh(&in_dev->mc_list_lock);
+
                igmp_group_dropped(i);
                ip_ma_put(i);
-
-               write_lock_bh(&in_dev->mc_list_lock);
        }
-       write_unlock_bh(&in_dev->mc_list_lock);
 }
 
 /* RTNL is locked */
@@ -1513,18 +1517,18 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
 
        if (!in_dev)
                return -ENODEV;
-       read_lock(&in_dev->mc_list_lock);
-       for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+       rcu_read_lock();
+       for_each_pmc_rcu(in_dev, pmc) {
                if (*pmca == pmc->multiaddr)
                        break;
        }
        if (!pmc) {
                /* MCA not found?? bug */
-               read_unlock(&in_dev->mc_list_lock);
+               rcu_read_unlock();
                return -ESRCH;
        }
        spin_lock_bh(&pmc->lock);
-       read_unlock(&in_dev->mc_list_lock);
+       rcu_read_unlock();
 #ifdef CONFIG_IP_MULTICAST
        sf_markstate(pmc);
 #endif
@@ -1685,18 +1689,18 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
 
        if (!in_dev)
                return -ENODEV;
-       read_lock(&in_dev->mc_list_lock);
-       for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+       rcu_read_lock();
+       for_each_pmc_rcu(in_dev, pmc) {
                if (*pmca == pmc->multiaddr)
                        break;
        }
        if (!pmc) {
                /* MCA not found?? bug */
-               read_unlock(&in_dev->mc_list_lock);
+               rcu_read_unlock();
                return -ESRCH;
        }
        spin_lock_bh(&pmc->lock);
-       read_unlock(&in_dev->mc_list_lock);
+       rcu_read_unlock();
 
 #ifdef CONFIG_IP_MULTICAST
        sf_markstate(pmc);
@@ -1793,7 +1797,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
 
        err = -EADDRINUSE;
        ifindex = imr->imr_ifindex;
-       for (i = inet->mc_list; i; i = i->next) {
+       for_each_pmc_rtnl(inet, i) {
                if (i->multi.imr_multiaddr.s_addr == addr &&
                    i->multi.imr_ifindex == ifindex)
                        goto done;
@@ -1807,7 +1811,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
                goto done;
 
        memcpy(&iml->multi, imr, sizeof(*imr));
-       iml->next = inet->mc_list;
+       iml->next_rcu = inet->mc_list;
        iml->sflist = NULL;
        iml->sfmode = MCAST_EXCLUDE;
        rcu_assign_pointer(inet->mc_list, iml);
@@ -1821,17 +1825,14 @@ EXPORT_SYMBOL(ip_mc_join_group);
 
 static void ip_sf_socklist_reclaim(struct rcu_head *rp)
 {
-       struct ip_sf_socklist *psf;
-
-       psf = container_of(rp, struct ip_sf_socklist, rcu);
+       kfree(container_of(rp, struct ip_sf_socklist, rcu));
        /* sk_omem_alloc should have been decreased by the caller*/
-       kfree(psf);
 }
 
 static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
                           struct in_device *in_dev)
 {
-       struct ip_sf_socklist *psf = iml->sflist;
+       struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
        int err;
 
        if (psf == NULL) {
@@ -1851,11 +1852,8 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
 
 static void ip_mc_socklist_reclaim(struct rcu_head *rp)
 {
-       struct ip_mc_socklist *iml;
-
-       iml = container_of(rp, struct ip_mc_socklist, rcu);
+       kfree(container_of(rp, struct ip_mc_socklist, rcu));
        /* sk_omem_alloc should have been decreased by the caller*/
-       kfree(iml);
 }
 
 
@@ -1866,7 +1864,8 @@ static void ip_mc_socklist_reclaim(struct rcu_head *rp)
 int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 {
        struct inet_sock *inet = inet_sk(sk);
-       struct ip_mc_socklist *iml, **imlp;
+       struct ip_mc_socklist *iml;
+       struct ip_mc_socklist __rcu **imlp;
        struct in_device *in_dev;
        struct net *net = sock_net(sk);
        __be32 group = imr->imr_multiaddr.s_addr;
@@ -1876,7 +1875,9 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
        rtnl_lock();
        in_dev = ip_mc_find_dev(net, imr);
        ifindex = imr->imr_ifindex;
-       for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
+       for (imlp = &inet->mc_list;
+            (iml = rtnl_dereference(*imlp)) != NULL;
+            imlp = &iml->next_rcu) {
                if (iml->multi.imr_multiaddr.s_addr != group)
                        continue;
                if (ifindex) {
@@ -1888,7 +1889,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 
                (void) ip_mc_leave_src(sk, iml, in_dev);
 
-               rcu_assign_pointer(*imlp, iml->next);
+               *imlp = iml->next_rcu;
 
                if (in_dev)
                        ip_mc_dec_group(in_dev, group);
@@ -1934,7 +1935,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
        }
        err = -EADDRNOTAVAIL;
 
-       for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+       for_each_pmc_rtnl(inet, pmc) {
                if ((pmc->multi.imr_multiaddr.s_addr ==
                     imr.imr_multiaddr.s_addr) &&
                    (pmc->multi.imr_ifindex == imr.imr_ifindex))
@@ -1958,7 +1959,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
                pmc->sfmode = omode;
        }
 
-       psl = pmc->sflist;
+       psl = rtnl_dereference(pmc->sflist);
        if (!add) {
                if (!psl)
                        goto done;      /* err = -EADDRNOTAVAIL */
@@ -2077,7 +2078,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
                goto done;
        }
 
-       for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+       for_each_pmc_rtnl(inet, pmc) {
                if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
                    pmc->multi.imr_ifindex == imr.imr_ifindex)
                        break;
@@ -2107,7 +2108,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
                (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
                                     msf->imsf_fmode, 0, NULL, 0);
        }
-       psl = pmc->sflist;
+       psl = rtnl_dereference(pmc->sflist);
        if (psl) {
                (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
                        psl->sl_count, psl->sl_addr, 0);
@@ -2155,7 +2156,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
        }
        err = -EADDRNOTAVAIL;
 
-       for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+       for_each_pmc_rtnl(inet, pmc) {
                if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
                    pmc->multi.imr_ifindex == imr.imr_ifindex)
                        break;
@@ -2163,7 +2164,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
        if (!pmc)               /* must have a prior join */
                goto done;
        msf->imsf_fmode = pmc->sfmode;
-       psl = pmc->sflist;
+       psl = rtnl_dereference(pmc->sflist);
        rtnl_unlock();
        if (!psl) {
                len = 0;
@@ -2208,7 +2209,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
 
        err = -EADDRNOTAVAIL;
 
-       for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+       for_each_pmc_rtnl(inet, pmc) {
                if (pmc->multi.imr_multiaddr.s_addr == addr &&
                    pmc->multi.imr_ifindex == gsf->gf_interface)
                        break;
@@ -2216,7 +2217,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
        if (!pmc)               /* must have a prior join */
                goto done;
        gsf->gf_fmode = pmc->sfmode;
-       psl = pmc->sflist;
+       psl = rtnl_dereference(pmc->sflist);
        rtnl_unlock();
        count = psl ? psl->sl_count : 0;
        copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
@@ -2257,7 +2258,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
                goto out;
 
        rcu_read_lock();
-       for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) {
+       for_each_pmc_rcu(inet, pmc) {
                if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
                    pmc->multi.imr_ifindex == dif)
                        break;
@@ -2265,7 +2266,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
        ret = inet->mc_all;
        if (!pmc)
                goto unlock;
-       psl = pmc->sflist;
+       psl = rcu_dereference(pmc->sflist);
        ret = (pmc->sfmode == MCAST_EXCLUDE);
        if (!psl)
                goto unlock;
@@ -2300,10 +2301,10 @@ void ip_mc_drop_socket(struct sock *sk)
                return;
 
        rtnl_lock();
-       while ((iml = inet->mc_list) != NULL) {
+       while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
                struct in_device *in_dev;
-               rcu_assign_pointer(inet->mc_list, iml->next);
 
+               inet->mc_list = iml->next_rcu;
                in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
                (void) ip_mc_leave_src(sk, iml, in_dev);
                if (in_dev != NULL)
@@ -2321,8 +2322,8 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p
        struct ip_sf_list *psf;
        int rv = 0;
 
-       read_lock(&in_dev->mc_list_lock);
-       for (im=in_dev->mc_list; im; im=im->next) {
+       rcu_read_lock();
+       for_each_pmc_rcu(in_dev, im) {
                if (im->multiaddr == mc_addr)
                        break;
        }
@@ -2343,7 +2344,7 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p
                } else
                        rv = 1; /* unspecified source; tentatively allow */
        }
-       read_unlock(&in_dev->mc_list_lock);
+       rcu_read_unlock();
        return rv;
 }
 
@@ -2369,13 +2370,11 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
                in_dev = __in_dev_get_rcu(state->dev);
                if (!in_dev)
                        continue;
-               read_lock(&in_dev->mc_list_lock);
-               im = in_dev->mc_list;
+               im = rcu_dereference(in_dev->mc_list);
                if (im) {
                        state->in_dev = in_dev;
                        break;
                }
-               read_unlock(&in_dev->mc_list_lock);
        }
        return im;
 }
@@ -2383,11 +2382,9 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
 static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
 {
        struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
-       im = im->next;
-       while (!im) {
-               if (likely(state->in_dev != NULL))
-                       read_unlock(&state->in_dev->mc_list_lock);
 
+       im = rcu_dereference(im->next_rcu);
+       while (!im) {
                state->dev = next_net_device_rcu(state->dev);
                if (!state->dev) {
                        state->in_dev = NULL;
@@ -2396,8 +2393,7 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li
                state->in_dev = __in_dev_get_rcu(state->dev);
                if (!state->in_dev)
                        continue;
-               read_lock(&state->in_dev->mc_list_lock);
-               im = state->in_dev->mc_list;
+               im = rcu_dereference(state->in_dev->mc_list);
        }
        return im;
 }
@@ -2433,10 +2429,8 @@ static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
        __releases(rcu)
 {
        struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
-       if (likely(state->in_dev != NULL)) {
-               read_unlock(&state->in_dev->mc_list_lock);
-               state->in_dev = NULL;
-       }
+
+       state->in_dev = NULL;
        state->dev = NULL;
        rcu_read_unlock();
 }
@@ -2458,7 +2452,7 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
                querier = "NONE";
 #endif
 
-               if (state->in_dev->mc_list == im) {
+               if (rcu_dereference(state->in_dev->mc_list) == im) {
                        seq_printf(seq, "%d\t%-10s: %5d %7s\n",
                                   state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
                }
@@ -2517,8 +2511,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
                idev = __in_dev_get_rcu(state->dev);
                if (unlikely(idev == NULL))
                        continue;
-               read_lock(&idev->mc_list_lock);
-               im = idev->mc_list;
+               im = rcu_dereference(idev->mc_list);
                if (likely(im != NULL)) {
                        spin_lock_bh(&im->lock);
                        psf = im->sources;
@@ -2529,7 +2522,6 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
                        }
                        spin_unlock_bh(&im->lock);
                }
-               read_unlock(&idev->mc_list_lock);
        }
        return psf;
 }
@@ -2543,9 +2535,6 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
                spin_unlock_bh(&state->im->lock);
                state->im = state->im->next;
                while (!state->im) {
-                       if (likely(state->idev != NULL))
-                               read_unlock(&state->idev->mc_list_lock);
-
                        state->dev = next_net_device_rcu(state->dev);
                        if (!state->dev) {
                                state->idev = NULL;
@@ -2554,8 +2543,7 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
                        state->idev = __in_dev_get_rcu(state->dev);
                        if (!state->idev)
                                continue;
-                       read_lock(&state->idev->mc_list_lock);
-                       state->im = state->idev->mc_list;
+                       state->im = rcu_dereference(state->idev->mc_list);
                }
                if (!state->im)
                        break;
@@ -2601,10 +2589,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
                spin_unlock_bh(&state->im->lock);
                state->im = NULL;
        }
-       if (likely(state->idev != NULL)) {
-               read_unlock(&state->idev->mc_list_lock);
-               state->idev = NULL;
-       }
+       state->idev = NULL;
        state->dev = NULL;
        rcu_read_unlock();
 }
index 70ff77f02eee3b345fa94efd35563a95b2b582ef..cab2057d543058775ee1d6b03e07cfba112d27c6 100644 (file)
@@ -634,7 +634,7 @@ static int ipgre_rcv(struct sk_buff *skb)
 #ifdef CONFIG_NET_IPGRE_BROADCAST
                if (ipv4_is_multicast(iph->daddr)) {
                        /* Looped back packet, drop it! */
-                       if (skb_rtable(skb)->fl.iif == 0)
+                       if (rt_is_output_route(skb_rtable(skb)))
                                goto drop;
                        tunnel->dev->stats.multicast++;
                        skb->pkt_type = PACKET_BROADCAST;
index 86dd5691af46dfc4d87631127e9fa06b7329dc3c..ef2b0089e0ea3ffad352f4ec1c3ea29ea7a42704 100644 (file)
@@ -1654,7 +1654,7 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
        if (mrt->vif_table[vif].dev != skb->dev) {
                int true_vifi;
 
-               if (skb_rtable(skb)->fl.iif == 0) {
+               if (rt_is_output_route(skb_rtable(skb))) {
                        /* It is our own packet, looped back.
                         * Very complicated situation...
                         *
index 987bf9adb31833c19a0db04ce76060306d8e6994..66610ea3c87bb987d98504b6cc710b27e5b28990 100644 (file)
@@ -140,13 +140,15 @@ static unsigned long expires_ljiffies;
 
 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
 static void             ipv4_dst_destroy(struct dst_entry *dst);
-static void             ipv4_dst_ifdown(struct dst_entry *dst,
-                                        struct net_device *dev, int how);
 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
 static void             ipv4_link_failure(struct sk_buff *skb);
 static void             ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
 static int rt_garbage_collect(struct dst_ops *ops);
 
+static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
+                           int how)
+{
+}
 
 static struct dst_ops ipv4_dst_ops = {
        .family =               AF_INET,
@@ -621,7 +623,7 @@ static inline int rt_fast_clean(struct rtable *rth)
        /* Kill broadcast/multicast entries very aggresively, if they
           collide in hash table with more useful entries */
        return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
-               rth->fl.iif && rth->dst.rt_next;
+               rt_is_input_route(rth) && rth->dst.rt_next;
 }
 
 static inline int rt_valuable(struct rtable *rth)
@@ -666,7 +668,7 @@ static inline u32 rt_score(struct rtable *rt)
        if (rt_valuable(rt))
                score |= (1<<31);
 
-       if (!rt->fl.iif ||
+       if (rt_is_output_route(rt) ||
            !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
                score |= (1<<30);
 
@@ -1124,7 +1126,7 @@ restart:
                 */
 
                rt->dst.flags |= DST_NOCACHE;
-               if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
+               if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
                        int err = arp_bind_neighbour(&rt->dst);
                        if (err) {
                                if (net_ratelimit())
@@ -1222,7 +1224,7 @@ restart:
        /* Try to bind route to arp only if it is output
           route or unicast forwarding path.
         */
-       if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
+       if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
                int err = arp_bind_neighbour(&rt->dst);
                if (err) {
                        spin_unlock_bh(rt_hash_lock_addr(hash));
@@ -1404,7 +1406,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                                if (rth->fl.fl4_dst != daddr ||
                                    rth->fl.fl4_src != skeys[i] ||
                                    rth->fl.oif != ikeys[k] ||
-                                   rth->fl.iif != 0 ||
+                                   rt_is_input_route(rth) ||
                                    rt_is_expired(rth) ||
                                    !net_eq(dev_net(rth->dst.dev), net)) {
                                        rthp = &rth->dst.rt_next;
@@ -1433,8 +1435,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                                rt->dst.child           = NULL;
                                if (rt->dst.dev)
                                        dev_hold(rt->dst.dev);
-                               if (rt->idev)
-                                       in_dev_hold(rt->idev);
                                rt->dst.obsolete        = -1;
                                rt->dst.lastuse = jiffies;
                                rt->dst.path            = &rt->dst;
@@ -1666,7 +1666,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
                                    rth->rt_dst != daddr ||
                                    rth->rt_src != iph->saddr ||
                                    rth->fl.oif != ikeys[k] ||
-                                   rth->fl.iif != 0 ||
+                                   rt_is_input_route(rth) ||
                                    dst_metric_locked(&rth->dst, RTAX_MTU) ||
                                    !net_eq(dev_net(rth->dst.dev), net) ||
                                    rt_is_expired(rth))
@@ -1728,33 +1728,13 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
 {
        struct rtable *rt = (struct rtable *) dst;
        struct inet_peer *peer = rt->peer;
-       struct in_device *idev = rt->idev;
 
        if (peer) {
                rt->peer = NULL;
                inet_putpeer(peer);
        }
-
-       if (idev) {
-               rt->idev = NULL;
-               in_dev_put(idev);
-       }
 }
 
-static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
-                           int how)
-{
-       struct rtable *rt = (struct rtable *) dst;
-       struct in_device *idev = rt->idev;
-       if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) {
-               struct in_device *loopback_idev =
-                       in_dev_get(dev_net(dev)->loopback_dev);
-               if (loopback_idev) {
-                       rt->idev = loopback_idev;
-                       in_dev_put(idev);
-               }
-       }
-}
 
 static void ipv4_link_failure(struct sk_buff *skb)
 {
@@ -1790,7 +1770,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
        __be32 src;
        struct fib_result res;
 
-       if (rt->fl.iif == 0)
+       if (rt_is_output_route(rt))
                src = rt->rt_src;
        else {
                rcu_read_lock();
@@ -1910,7 +1890,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        rth->fl.iif     = dev->ifindex;
        rth->dst.dev    = init_net.loopback_dev;
        dev_hold(rth->dst.dev);
-       rth->idev       = in_dev_get(rth->dst.dev);
        rth->fl.oif     = 0;
        rth->rt_gateway = daddr;
        rth->rt_spec_dst= spec_dst;
@@ -2050,7 +2029,6 @@ static int __mkroute_input(struct sk_buff *skb,
                rth->fl.iif     = in_dev->dev->ifindex;
        rth->dst.dev    = (out_dev)->dev;
        dev_hold(rth->dst.dev);
-       rth->idev       = in_dev_get(rth->dst.dev);
        rth->fl.oif     = 0;
        rth->rt_spec_dst= spec_dst;
 
@@ -2231,7 +2209,6 @@ local_input:
        rth->fl.iif     = dev->ifindex;
        rth->dst.dev    = net->loopback_dev;
        dev_hold(rth->dst.dev);
-       rth->idev       = in_dev_get(rth->dst.dev);
        rth->rt_gateway = daddr;
        rth->rt_spec_dst= spec_dst;
        rth->dst.input= ip_local_deliver;
@@ -2417,9 +2394,6 @@ static int __mkroute_output(struct rtable **result,
        if (!rth)
                return -ENOBUFS;
 
-       in_dev_hold(in_dev);
-       rth->idev = in_dev;
-
        atomic_set(&rth->dst.__refcnt, 1);
        rth->dst.flags= DST_HOST;
        if (IN_DEV_CONF_GET(in_dev, NOXFRM))
@@ -2695,7 +2669,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
                rth = rcu_dereference_bh(rth->dst.rt_next)) {
                if (rth->fl.fl4_dst == flp->fl4_dst &&
                    rth->fl.fl4_src == flp->fl4_src &&
-                   rth->fl.iif == 0 &&
+                   rt_is_output_route(rth) &&
                    rth->fl.oif == flp->oif &&
                    rth->fl.mark == flp->mark &&
                    !((rth->fl.fl4_tos ^ flp->fl4_tos) &
@@ -2759,9 +2733,6 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
 
                rt->fl = ort->fl;
 
-               rt->idev = ort->idev;
-               if (rt->idev)
-                       in_dev_hold(rt->idev);
                rt->rt_genid = rt_genid(net);
                rt->rt_flags = ort->rt_flags;
                rt->rt_type = ort->rt_type;
@@ -2853,7 +2824,7 @@ static int rt_fill_info(struct net *net,
        if (rt->dst.tclassid)
                NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
 #endif
-       if (rt->fl.iif)
+       if (rt_is_input_route(rt))
                NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
        else if (rt->rt_src != rt->fl.fl4_src)
                NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
@@ -2878,7 +2849,7 @@ static int rt_fill_info(struct net *net,
                }
        }
 
-       if (rt->fl.iif) {
+       if (rt_is_input_route(rt)) {
 #ifdef CONFIG_IP_MROUTE
                __be32 dst = rt->rt_dst;
 
index 0814199694854e534eb4ff12671e02313dced8e9..2bb46d55f40cf0e680b420f6d80d7d745b6e1634 100644 (file)
@@ -1193,7 +1193,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
        struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
 
        WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
-            KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
+            "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
             tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
 #endif
 
@@ -1477,10 +1477,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                         * shouldn't happen.
                         */
                        if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
-                            KERN_INFO "recvmsg bug: copied %X "
-                                      "seq %X rcvnxt %X fl %X\n", *seq,
-                                      TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
-                                      flags))
+                                "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
+                                *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
+                                flags))
                                break;
 
                        offset = *seq - TCP_SKB_CB(skb)->seq;
@@ -1490,10 +1489,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                                goto found_ok_skb;
                        if (tcp_hdr(skb)->fin)
                                goto found_fin_ok;
-                       WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
-                                       "copied %X seq %X rcvnxt %X fl %X\n",
-                                       *seq, TCP_SKB_CB(skb)->seq,
-                                       tp->rcv_nxt, flags);
+                       WARN(!(flags & MSG_PEEK),
+                            "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
+                            *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
                }
 
                /* Well, if we have backlog, try to process it now yet. */
index 4464f3bff6a7a7d902b72806bb9d5e7c1752bf96..dd1fd8c473fc026c4b374621401b6df269680b44 100644 (file)
@@ -80,10 +80,6 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
        xdst->u.dst.dev = dev;
        dev_hold(dev);
 
-       xdst->u.rt.idev = in_dev_get(dev);
-       if (!xdst->u.rt.idev)
-               return -ENODEV;
-
        xdst->u.rt.peer = rt->peer;
        if (rt->peer)
                atomic_inc(&rt->peer->refcnt);
@@ -189,8 +185,6 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
 {
        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
 
-       if (likely(xdst->u.rt.idev))
-               in_dev_put(xdst->u.rt.idev);
        if (likely(xdst->u.rt.peer))
                inet_putpeer(xdst->u.rt.peer);
        xfrm_dst_destroy(xdst);
@@ -199,27 +193,9 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
 static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
                             int unregister)
 {
-       struct xfrm_dst *xdst;
-
        if (!unregister)
                return;
 
-       xdst = (struct xfrm_dst *)dst;
-       if (xdst->u.rt.idev->dev == dev) {
-               struct in_device *loopback_idev =
-                       in_dev_get(dev_net(dev)->loopback_dev);
-               BUG_ON(!loopback_idev);
-
-               do {
-                       in_dev_put(xdst->u.rt.idev);
-                       xdst->u.rt.idev = loopback_idev;
-                       in_dev_hold(loopback_idev);
-                       xdst = (struct xfrm_dst *)xdst->u.dst.child;
-               } while (xdst->u.dst.xfrm);
-
-               __in_dev_put(loopback_idev);
-       }
-
        xfrm_dst_ifdown(dst, dev);
 }
 
index de04ea39cde8990025bdb5f63ff408fcc948fb0a..10bd39c0ae2d6941925855909ba680a69987039b 100644 (file)
@@ -169,7 +169,7 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
        struct net *net = dev_net(dev);
        struct iphdr *iph = ip_hdr(skb);
 
-       if (rt->fl.iif) {
+       if (rt_is_input_route(rt)) {
                unsigned long orefdst = skb->_skb_refdst;
 
                if (ip_route_input(skb, iph->daddr, iph->saddr,
@@ -552,7 +552,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 #endif
 
        /* From world but DNAT to loopback address? */
-       if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) {
+       if (local && ipv4_is_loopback(rt->rt_dst) &&
+           rt_is_input_route(skb_rtable(skb))) {
                IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
                                 "stopping DNAT to loopback address");
                goto tx_error_put;
@@ -1165,7 +1166,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 #endif
 
        /* From world but DNAT to loopback address? */
-       if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) {
+       if (local && ipv4_is_loopback(rt->rt_dst) &&
+           rt_is_input_route(skb_rtable(skb))) {
                IP_VS_DBG(1, "%s(): "
                          "stopping DNAT to loopback %pI4\n",
                          __func__, &cp->daddr.ip);
index 3ca2fd9e37200e3e12f6606065acb8c982f25528..c898df76e924f3d9a0e51f1c102f65018ab16e84 100644 (file)
@@ -156,7 +156,7 @@ static const struct file_operations socket_file_ops = {
  */
 
 static DEFINE_SPINLOCK(net_family_lock);
-static const struct net_proto_family *net_families[NPROTO] __read_mostly;
+static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
 
 /*
  *     Statistics counters of the socket lists
@@ -1200,7 +1200,7 @@ int __sock_create(struct net *net, int family, int type, int protocol,
         * requested real, full-featured networking support upon configuration.
         * Otherwise module support will break!
         */
-       if (net_families[family] == NULL)
+       if (rcu_access_pointer(net_families[family]) == NULL)
                request_module("net-pf-%d", family);
 #endif
 
@@ -2332,10 +2332,11 @@ int sock_register(const struct net_proto_family *ops)
        }
 
        spin_lock(&net_family_lock);
-       if (net_families[ops->family])
+       if (rcu_dereference_protected(net_families[ops->family],
+                                     lockdep_is_held(&net_family_lock)))
                err = -EEXIST;
        else {
-               net_families[ops->family] = ops;
+               rcu_assign_pointer(net_families[ops->family], ops);
                err = 0;
        }
        spin_unlock(&net_family_lock);
@@ -2363,7 +2364,7 @@ void sock_unregister(int family)
        BUG_ON(family < 0 || family >= NPROTO);
 
        spin_lock(&net_family_lock);
-       net_families[family] = NULL;
+       rcu_assign_pointer(net_families[family], NULL);
        spin_unlock(&net_family_lock);
 
        synchronize_rcu();
index 3c95304a08174f550f64f36346ce031419f6a4c8..7ff31c60186ab0ae0ff42f7d739894b38f6db8dc 100644 (file)
@@ -316,7 +316,8 @@ static void unix_write_space(struct sock *sk)
        if (unix_writable(sk)) {
                wq = rcu_dereference(sk->sk_wq);
                if (wq_has_sleeper(wq))
-                       wake_up_interruptible_sync(&wq->wait);
+                       wake_up_interruptible_sync_poll(&wq->wait,
+                               POLLOUT | POLLWRNORM | POLLWRBAND);
                sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
        }
        rcu_read_unlock();
@@ -1710,7 +1711,8 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
                goto out_unlock;
        }
 
-       wake_up_interruptible_sync(&u->peer_wait);
+       wake_up_interruptible_sync_poll(&u->peer_wait,
+                                       POLLOUT | POLLWRNORM | POLLWRBAND);
 
        if (msg->msg_name)
                unix_copy_addr(msg, skb->sk);
@@ -2072,13 +2074,12 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
                mask |= POLLERR;
        if (sk->sk_shutdown & RCV_SHUTDOWN)
-               mask |= POLLRDHUP;
+               mask |= POLLRDHUP | POLLIN | POLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
                mask |= POLLHUP;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue) ||
-           (sk->sk_shutdown & RCV_SHUTDOWN))
+       if (!skb_queue_empty(&sk->sk_receive_queue))
                mask |= POLLIN | POLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
@@ -2090,20 +2091,19 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
                        return mask;
        }
 
-       /* writable? */
-       writable = unix_writable(sk);
-       if (writable) {
-               other = unix_peer_get(sk);
-               if (other) {
-                       if (unix_peer(other) != sk) {
-                               sock_poll_wait(file, &unix_sk(other)->peer_wait,
-                                         wait);
-                               if (unix_recvq_full(other))
-                                       writable = 0;
-                       }
+       /* No write status requested, avoid expensive OUT tests. */
+       if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT)))
+               return mask;
 
-                       sock_put(other);
+       writable = unix_writable(sk);
+       other = unix_peer_get(sk);
+       if (other) {
+               if (unix_peer(other) != sk) {
+                       sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
+                       if (unix_recvq_full(other))
+                               writable = 0;
                }
+               sock_put(other);
        }
 
        if (writable)