]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/net/vxge/vxge-main.c
drivers/net/vxge/vxge-main.c: Remove unnecessary casts of pci_get_drvdata
[net-next-2.6.git] / drivers / net / vxge / vxge-main.c
index fc8b2d7a0919d1cd9709f8c586f315bc5ec96e82..5cba4a684f08f6c108e269097fce6101474841c8 100644 (file)
@@ -7,9 +7,9 @@
 * system is licensed under the GPL.
 * See the file COPYING in this distribution for more information.
 *
-* vxge-main.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
+* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
 *              Virtualized Server Adapter.
-* Copyright(c) 2002-2009 Neterion Inc.
+* Copyright(c) 2002-2010 Exar Corp.
 *
 * The module loadable parameters that are supported by the driver and a brief
 * explanation of all the variables:
@@ -41,6 +41,8 @@
 *
 ******************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/if_vlan.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
@@ -48,6 +50,8 @@
 #include <net/ip.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/net_tstamp.h>
 #include "vxge-main.h"
 #include "vxge-reg.h"
 
@@ -80,6 +84,15 @@ module_param_array(bw_percentage, uint, NULL, 0);
 
 static struct vxge_drv_config *driver_config;
 
+static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
+                                            struct macInfo *mac);
+static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
+                                            struct macInfo *mac);
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
+static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
+
 static inline int is_vxge_card_up(struct vxgedev *vdev)
 {
        return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -87,7 +100,6 @@ static inline int is_vxge_card_up(struct vxgedev *vdev)
 
 static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
 {
-       unsigned long flags = 0;
        struct sk_buff **skb_ptr = NULL;
        struct sk_buff **temp;
 #define NR_SKB_COMPLETED 128
@@ -98,15 +110,16 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
                more = 0;
                skb_ptr = completed;
 
-               if (spin_trylock_irqsave(&fifo->tx_lock, flags)) {
+               if (__netif_tx_trylock(fifo->txq)) {
                        vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
                                                NR_SKB_COMPLETED, &more);
-                       spin_unlock_irqrestore(&fifo->tx_lock, flags);
+                       __netif_tx_unlock(fifo->txq);
                }
+
                /* free SKBs */
                for (temp = completed; temp != skb_ptr; temp++)
                        dev_kfree_skb_irq(*temp);
-       } while (more) ;
+       } while (more);
 }
 
 static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
@@ -130,99 +143,25 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
        }
 }
 
-/*
- * MultiQ manipulation helper functions
- */
-void vxge_stop_all_tx_queue(struct vxgedev *vdev)
-{
-       int i;
-       struct net_device *dev = vdev->ndev;
-
-       if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
-               for (i = 0; i < vdev->no_of_vpath; i++)
-                       vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_STOP;
-       }
-       netif_tx_stop_all_queues(dev);
-}
-
-void vxge_stop_tx_queue(struct vxge_fifo *fifo)
-{
-       struct net_device *dev = fifo->ndev;
-
-       struct netdev_queue *txq = NULL;
-       if (fifo->tx_steering_type == TX_MULTIQ_STEERING)
-               txq = netdev_get_tx_queue(dev, fifo->driver_id);
-       else {
-               txq = netdev_get_tx_queue(dev, 0);
-               fifo->queue_state = VPATH_QUEUE_STOP;
-       }
-
-       netif_tx_stop_queue(txq);
-}
-
-void vxge_start_all_tx_queue(struct vxgedev *vdev)
-{
-       int i;
-       struct net_device *dev = vdev->ndev;
-
-       if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
-               for (i = 0; i < vdev->no_of_vpath; i++)
-                       vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START;
-       }
-       netif_tx_start_all_queues(dev);
-}
-
-static void vxge_wake_all_tx_queue(struct vxgedev *vdev)
-{
-       int i;
-       struct net_device *dev = vdev->ndev;
-
-       if (vdev->config.tx_steering_type != TX_MULTIQ_STEERING) {
-               for (i = 0; i < vdev->no_of_vpath; i++)
-                       vdev->vpaths[i].fifo.queue_state = VPATH_QUEUE_START;
-       }
-       netif_tx_wake_all_queues(dev);
-}
-
-void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb)
-{
-       struct net_device *dev = fifo->ndev;
-
-       int vpath_no = fifo->driver_id;
-       struct netdev_queue *txq = NULL;
-       if (fifo->tx_steering_type == TX_MULTIQ_STEERING) {
-               txq = netdev_get_tx_queue(dev, vpath_no);
-               if (netif_tx_queue_stopped(txq))
-                       netif_tx_wake_queue(txq);
-       } else {
-               txq = netdev_get_tx_queue(dev, 0);
-               if (fifo->queue_state == VPATH_QUEUE_STOP)
-                       if (netif_tx_queue_stopped(txq)) {
-                               fifo->queue_state = VPATH_QUEUE_START;
-                               netif_tx_wake_queue(txq);
-                       }
-       }
-}
-
 /*
  * vxge_callback_link_up
  *
  * This function is called during interrupt context to notify link up state
  * change.
  */
-void
+static void
 vxge_callback_link_up(struct __vxge_hw_device *hldev)
 {
        struct net_device *dev = hldev->ndev;
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+       struct vxgedev *vdev = netdev_priv(dev);
 
        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
                vdev->ndev->name, __func__, __LINE__);
-       printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
+       netdev_notice(vdev->ndev, "Link Up\n");
        vdev->stats.link_up++;
 
        netif_carrier_on(vdev->ndev);
-       vxge_wake_all_tx_queue(vdev);
+       netif_tx_wake_all_queues(vdev->ndev);
 
        vxge_debug_entryexit(VXGE_TRACE,
                "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
@@ -234,19 +173,19 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
  * This function is called during interrupt context to notify link down state
  * change.
  */
-void
+static void
 vxge_callback_link_down(struct __vxge_hw_device *hldev)
 {
        struct net_device *dev = hldev->ndev;
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+       struct vxgedev *vdev = netdev_priv(dev);
 
        vxge_debug_entryexit(VXGE_TRACE,
                "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
-       printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
+       netdev_notice(vdev->ndev, "Link Down\n");
 
        vdev->stats.link_down++;
        netif_carrier_off(vdev->ndev);
-       vxge_stop_all_tx_queue(vdev);
+       netif_tx_stop_all_queues(vdev->ndev);
 
        vxge_debug_entryexit(VXGE_TRACE,
                "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
@@ -426,12 +365,12 @@ static inline void vxge_post(int *dtr_cnt, void **first_dtr,
  * If the interrupt is because of a received frame or if the receive ring
  * contains fresh as yet un-processed frames, this function is called.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
                 u8 t_code, void *userdata)
 {
        struct vxge_ring *ring = (struct vxge_ring *)userdata;
-       struct  net_device *dev = ring->ndev;
+       struct net_device *dev = ring->ndev;
        unsigned int dma_sizes;
        void *first_dtr = NULL;
        int dtr_cnt = 0;
@@ -573,7 +512,24 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
                    ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                else
-                       skb->ip_summed = CHECKSUM_NONE;
+                       skb_checksum_none_assert(skb);
+
+
+               if (ring->rx_hwts) {
+                       struct skb_shared_hwtstamps *skb_hwts;
+                       u32 ns = *(u32 *)(skb->head + pkt_length);
+
+                       skb_hwts = skb_hwtstamps(skb);
+                       skb_hwts->hwtstamp = ns_to_ktime(ns);
+                       skb_hwts->syststamp.tv64 = 0;
+               }
+
+               /* rth_hash_type and rth_it_hit are non-zero regardless of
+                * whether rss is enabled.  Only the rth_value is zero/non-zero
+                * if rss is disabled/enabled, so key off of that.
+                */
+               if (ext_info.rth_value)
+                       skb->rxhash = ext_info.rth_value;
 
                vxge_rx_complete(ring, skb, ext_info.vlan,
                        pkt_length, &ext_info);
@@ -603,7 +559,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
  * freed and frees all skbs whose data have already DMA'ed into the NICs
  * internal memory.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
                enum vxge_hw_fifo_tcode t_code, void *userdata,
                struct sk_buff ***skb_ptr, int nr_skb, int *more)
@@ -677,7 +633,8 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
                                &dtr, &t_code) == VXGE_HW_OK);
 
        *skb_ptr = done_skb;
-       vxge_wake_tx_queue(fifo, skb);
+       if (netif_tx_queue_stopped(fifo->txq))
+               netif_tx_wake_queue(fifo->txq);
 
        vxge_debug_entryexit(VXGE_TRACE,
                                "%s: %s:%d  Exiting...",
@@ -686,8 +643,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
 }
 
 /* select a vpath to transmit the packet */
-static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
-       int *do_lock)
+static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
 {
        u16 queue_len, counter = 0;
        if (skb->protocol == htons(ETH_P_IP)) {
@@ -706,12 +662,6 @@ static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb,
                                vdev->vpath_selector[queue_len - 1];
                        if (counter >= queue_len)
                                counter = queue_len - 1;
-
-                       if (ip->protocol == IPPROTO_UDP) {
-#ifdef NETIF_F_LLTX
-                               *do_lock = 0;
-#endif
-                       }
                }
        }
        return counter;
@@ -738,7 +688,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
        struct vxge_vpath *vpath = NULL;
        struct __vxge_hw_device *hldev;
 
-       hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+       hldev = pci_get_drvdata(vdev->pdev);
 
        mac_address = (u8 *)&mac_addr;
        memcpy(mac_address, mac_header, ETH_ALEN);
@@ -808,8 +758,6 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
  *
  * This function is the Tx entry point of the driver. Neterion NIC supports
  * certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
- * NOTE: when device cant queue the pkt, just the trans_start variable will
- * not be upadted.
 */
 static netdev_tx_t
 vxge_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -826,9 +774,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
        struct vxge_tx_priv *txdl_priv = NULL;
        struct __vxge_hw_fifo *fifo_hw;
        int offload_type;
-       unsigned long flags = 0;
        int vpath_no = 0;
-       int do_spin_tx_lock = 1;
 
        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
                        dev->name, __func__, __LINE__);
@@ -841,7 +787,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       vdev = netdev_priv(dev);
 
        if (unlikely(!is_vxge_card_up(vdev))) {
                vxge_debug_tx(VXGE_ERR,
@@ -864,7 +810,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
        if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
                vpath_no = skb_get_queue_mapping(skb);
        else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
-               vpath_no = vxge_get_vpath_no(vdev, skb, &do_spin_tx_lock);
+               vpath_no = vxge_get_vpath_no(vdev, skb);
 
        vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
 
@@ -874,46 +820,29 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
        fifo = &vdev->vpaths[vpath_no].fifo;
        fifo_hw = fifo->handle;
 
-       if (do_spin_tx_lock)
-               spin_lock_irqsave(&fifo->tx_lock, flags);
-       else {
-               if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
-                       return NETDEV_TX_LOCKED;
-       }
+       if (netif_tx_queue_stopped(fifo->txq))
+               return NETDEV_TX_BUSY;
 
-       if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) {
-               if (netif_subqueue_stopped(dev, skb)) {
-                       spin_unlock_irqrestore(&fifo->tx_lock, flags);
-                       return NETDEV_TX_BUSY;
-               }
-       } else if (unlikely(fifo->queue_state == VPATH_QUEUE_STOP)) {
-               if (netif_queue_stopped(dev)) {
-                       spin_unlock_irqrestore(&fifo->tx_lock, flags);
-                       return NETDEV_TX_BUSY;
-               }
-       }
        avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
        if (avail == 0) {
                vxge_debug_tx(VXGE_ERR,
                        "%s: No free TXDs available", dev->name);
                fifo->stats.txd_not_free++;
-               vxge_stop_tx_queue(fifo);
-               goto _exit2;
+               goto _exit0;
        }
 
        /* Last TXD?  Stop tx queue to avoid dropping packets.  TX
         * completion will resume the queue.
         */
        if (avail == 1)
-               vxge_stop_tx_queue(fifo);
+               netif_tx_stop_queue(fifo->txq);
 
        status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
        if (unlikely(status != VXGE_HW_OK)) {
                vxge_debug_tx(VXGE_ERR,
                   "%s: Out of descriptors .", dev->name);
                fifo->stats.txd_out_of_desc++;
-               vxge_stop_tx_queue(fifo);
-               goto _exit2;
+               goto _exit0;
        }
 
        vxge_debug_tx(VXGE_TRACE,
@@ -921,7 +850,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
                dev->name, __func__, __LINE__,
                fifo_hw, dtr, dtr_priv);
 
-       if (vdev->vlgrp && vlan_tx_tag_present(skb)) {
+       if (vlan_tx_tag_present(skb)) {
                u16 vlan_tag = vlan_tx_tag_get(skb);
                vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
        }
@@ -933,9 +862,8 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
                vxge_hw_fifo_txdl_free(fifo_hw, dtr);
-               vxge_stop_tx_queue(fifo);
                fifo->stats.pci_map_fail++;
-               goto _exit2;
+               goto _exit0;
        }
 
        txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
@@ -958,13 +886,12 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
                if (!frag->size)
                        continue;
 
-               dma_pointer =
-                       (u64)pci_map_page(fifo->pdev, frag->page,
+               dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page,
                                frag->page_offset, frag->size,
                                PCI_DMA_TODEVICE);
 
                if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
-                       goto _exit0;
+                       goto _exit2;
                vxge_debug_tx(VXGE_TRACE,
                        "%s: %s:%d frag = %d dma_pointer = 0x%llx",
                                dev->name, __func__, __LINE__, i,
@@ -979,11 +906,9 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
        offload_type = vxge_offload_type(skb);
 
        if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
-
                int mss = vxge_tcp_mss(skb);
                if (mss) {
-                       vxge_debug_tx(VXGE_TRACE,
-                               "%s: %s:%d mss = %d",
+                       vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
                                dev->name, __func__, __LINE__, mss);
                        vxge_hw_fifo_txdl_mss_set(dtr, mss);
                } else {
@@ -1001,19 +926,13 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
                                        VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
 
        vxge_hw_fifo_txdl_post(fifo_hw, dtr);
-#ifdef NETIF_F_LLTX
-       dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
-#endif
-       spin_unlock_irqrestore(&fifo->tx_lock, flags);
 
-       VXGE_COMPLETE_VPATH_TX(fifo);
        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d  Exiting...",
                dev->name, __func__, __LINE__);
        return NETDEV_TX_OK;
 
-_exit0:
+_exit2:
        vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
-
 _exit1:
        j = 0;
        frag = &skb_shinfo(skb)->frags[0];
@@ -1028,10 +947,9 @@ _exit1:
        }
 
        vxge_hw_fifo_txdl_free(fifo_hw, dtr);
-_exit2:
+_exit0:
+       netif_tx_stop_queue(fifo->txq);
        dev_kfree_skb(skb);
-       spin_unlock_irqrestore(&fifo->tx_lock, flags);
-       VXGE_COMPLETE_VPATH_TX(fifo);
 
        return NETDEV_TX_OK;
 }
@@ -1121,7 +1039,8 @@ static void vxge_set_multicast(struct net_device *dev)
        struct netdev_hw_addr *ha;
        struct vxgedev *vdev;
        int i, mcast_cnt = 0;
-       struct __vxge_hw_device  *hldev;
+       struct __vxge_hw_device *hldev;
+       struct vxge_vpath *vpath;
        enum vxge_hw_status status = VXGE_HW_OK;
        struct macInfo mac_info;
        int vpath_idx = 0;
@@ -1133,7 +1052,7 @@ static void vxge_set_multicast(struct net_device *dev)
        vxge_debug_entryexit(VXGE_TRACE,
                "%s:%d", __func__, __LINE__);
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       vdev = netdev_priv(dev);
        hldev = (struct __vxge_hw_device  *)vdev->devh;
 
        if (unlikely(!is_vxge_card_up(vdev)))
@@ -1141,46 +1060,48 @@ static void vxge_set_multicast(struct net_device *dev)
 
        if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
                for (i = 0; i < vdev->no_of_vpath; i++) {
-                       vxge_assert(vdev->vpaths[i].is_open);
-                       status = vxge_hw_vpath_mcast_enable(
-                                               vdev->vpaths[i].handle);
+                       vpath = &vdev->vpaths[i];
+                       vxge_assert(vpath->is_open);
+                       status = vxge_hw_vpath_mcast_enable(vpath->handle);
+                       if (status != VXGE_HW_OK)
+                               vxge_debug_init(VXGE_ERR, "failed to enable "
+                                               "multicast, status %d", status);
                        vdev->all_multi_flg = 1;
                }
-       } else if ((dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
+       } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
                for (i = 0; i < vdev->no_of_vpath; i++) {
-                       vxge_assert(vdev->vpaths[i].is_open);
-                       status = vxge_hw_vpath_mcast_disable(
-                                               vdev->vpaths[i].handle);
-                       vdev->all_multi_flg = 1;
+                       vpath = &vdev->vpaths[i];
+                       vxge_assert(vpath->is_open);
+                       status = vxge_hw_vpath_mcast_disable(vpath->handle);
+                       if (status != VXGE_HW_OK)
+                               vxge_debug_init(VXGE_ERR, "failed to disable "
+                                               "multicast, status %d", status);
+                       vdev->all_multi_flg = 0;
                }
        }
 
-       if (status != VXGE_HW_OK)
-               vxge_debug_init(VXGE_ERR,
-                       "failed to %s multicast, status %d",
-                       dev->flags & IFF_ALLMULTI ?
-                       "enable" : "disable", status);
 
        if (!vdev->config.addr_learn_en) {
-               if (dev->flags & IFF_PROMISC) {
-                       for (i = 0; i < vdev->no_of_vpath; i++) {
-                               vxge_assert(vdev->vpaths[i].is_open);
+               for (i = 0; i < vdev->no_of_vpath; i++) {
+                       vpath = &vdev->vpaths[i];
+                       vxge_assert(vpath->is_open);
+
+                       if (dev->flags & IFF_PROMISC)
                                status = vxge_hw_vpath_promisc_enable(
-                                               vdev->vpaths[i].handle);
-                       }
-               } else {
-                       for (i = 0; i < vdev->no_of_vpath; i++) {
-                               vxge_assert(vdev->vpaths[i].is_open);
+                                       vpath->handle);
+                       else
                                status = vxge_hw_vpath_promisc_disable(
-                                               vdev->vpaths[i].handle);
-                       }
+                                       vpath->handle);
+                       if (status != VXGE_HW_OK)
+                               vxge_debug_init(VXGE_ERR, "failed to %s promisc"
+                                       ", status %d", dev->flags&IFF_PROMISC ?
+                                       "enable" : "disable", status);
                }
        }
 
        memset(&mac_info, 0, sizeof(struct macInfo));
        /* Update individual M_CAST address list */
        if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
-
                mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
                list_head = &vdev->vpaths[0].mac_addr_list;
                if ((netdev_mc_count(dev) +
@@ -1190,15 +1111,8 @@ static void vxge_set_multicast(struct net_device *dev)
 
                /* Delete previous MC's */
                for (i = 0; i < mcast_cnt; i++) {
-                       if (!list_empty(list_head))
-                               mac_entry = (struct vxge_mac_addrs *)
-                                       list_first_entry(list_head,
-                                               struct vxge_mac_addrs,
-                                               item);
-
                        list_for_each_safe(entry, next, list_head) {
-
-                               mac_entry = (struct vxge_mac_addrs *) entry;
+                               mac_entry = (struct vxge_mac_addrs *)entry;
                                /* Copy the mac address to delete */
                                mac_address = (u8 *)&mac_entry->macaddr;
                                memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1240,10 +1154,8 @@ _set_all_mcast:
                mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
                /* Delete previous MC's */
                for (i = 0; i < mcast_cnt; i++) {
-
                        list_for_each_safe(entry, next, list_head) {
-
-                               mac_entry = (struct vxge_mac_addrs *) entry;
+                               mac_entry = (struct vxge_mac_addrs *)entry;
                                /* Copy the mac address to delete */
                                mac_address = (u8 *)&mac_entry->macaddr;
                                memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1262,9 +1174,10 @@ _set_all_mcast:
 
                /* Enable all multicast */
                for (i = 0; i < vdev->no_of_vpath; i++) {
-                       vxge_assert(vdev->vpaths[i].is_open);
-                       status = vxge_hw_vpath_mcast_enable(
-                                               vdev->vpaths[i].handle);
+                       vpath = &vdev->vpaths[i];
+                       vxge_assert(vpath->is_open);
+
+                       status = vxge_hw_vpath_mcast_enable(vpath->handle);
                        if (status != VXGE_HW_OK) {
                                vxge_debug_init(VXGE_ERR,
                                        "%s:%d Enabling all multicasts failed",
@@ -1289,14 +1202,14 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
 {
        struct sockaddr *addr = p;
        struct vxgedev *vdev;
-       struct __vxge_hw_device  *hldev;
+       struct __vxge_hw_device *hldev;
        enum vxge_hw_status status = VXGE_HW_OK;
        struct macInfo mac_info_new, mac_info_old;
        int vpath_idx = 0;
 
        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       vdev = netdev_priv(dev);
        hldev = vdev->devh;
 
        if (!is_valid_ether_addr(addr->sa_data))
@@ -1361,7 +1274,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
  *
  * Enables the interrupts for the vpath
 */
-void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
+static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
 {
        struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
        int msix_id = 0;
@@ -1394,11 +1307,16 @@ void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
  *
  * Disables the interrupts for the vpath
 */
-void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
+static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
 {
        struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
+       struct __vxge_hw_device *hldev;
        int msix_id;
 
+       hldev = pci_get_drvdata(vdev->pdev);
+
+       vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
+
        vxge_hw_vpath_intr_disable(vpath->handle);
 
        if (vdev->config.intr_type == INTA)
@@ -1425,6 +1343,7 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
 static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
+       struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
        int ret = 0;
 
        /* check if device is down already */
@@ -1435,12 +1354,10 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
        if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
                return 0;
 
-       if (vdev->vpaths[vp_id].handle) {
-               if (vxge_hw_vpath_reset(vdev->vpaths[vp_id].handle)
-                               == VXGE_HW_OK) {
+       if (vpath->handle) {
+               if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
                        if (is_vxge_card_up(vdev) &&
-                               vxge_hw_vpath_recover_from_reset(
-                                       vdev->vpaths[vp_id].handle)
+                               vxge_hw_vpath_recover_from_reset(vpath->handle)
                                        != VXGE_HW_OK) {
                                vxge_debug_init(VXGE_ERR,
                                        "vxge_hw_vpath_recover_from_reset"
@@ -1456,11 +1373,20 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
        } else
                return VXGE_HW_FAIL;
 
-       vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
-       vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
+       vxge_restore_vpath_mac_addr(vpath);
+       vxge_restore_vpath_vid_table(vpath);
 
        /* Enable all broadcast */
-       vxge_hw_vpath_bcast_enable(vdev->vpaths[vp_id].handle);
+       vxge_hw_vpath_bcast_enable(vpath->handle);
+
+       /* Enable all multicast */
+       if (vdev->all_multi_flg) {
+               status = vxge_hw_vpath_mcast_enable(vpath->handle);
+               if (status != VXGE_HW_OK)
+                       vxge_debug_init(VXGE_ERR,
+                               "%s:%d Enabling multicast failed",
+                               __func__, __LINE__);
+       }
 
        /* Enable the interrupts */
        vxge_vpath_intr_enable(vdev, vp_id);
@@ -1468,17 +1394,18 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
        smp_wmb();
 
        /* Enable the flow of traffic through the vpath */
-       vxge_hw_vpath_enable(vdev->vpaths[vp_id].handle);
+       vxge_hw_vpath_enable(vpath->handle);
 
        smp_wmb();
-       vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[vp_id].handle);
-       vdev->vpaths[vp_id].ring.last_status = VXGE_HW_OK;
+       vxge_hw_vpath_rx_doorbell_init(vpath->handle);
+       vpath->ring.last_status = VXGE_HW_OK;
 
        /* Vpath reset done */
        clear_bit(vp_id, &vdev->vp_reset);
 
        /* Start the vpath queue */
-       vxge_wake_tx_queue(&vdev->vpaths[vp_id].fifo, NULL);
+       if (netif_tx_queue_stopped(vpath->fifo.txq))
+               netif_tx_wake_queue(vpath->fifo.txq);
 
        return ret;
 }
@@ -1512,18 +1439,19 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
                        vxge_debug_init(VXGE_ERR,
                                "%s: execution mode is debug, returning..",
                                vdev->ndev->name);
-               clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
-               vxge_stop_all_tx_queue(vdev);
-               return 0;
+                       clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+                       netif_tx_stop_all_queues(vdev->ndev);
+                       return 0;
                }
        }
 
        if (event == VXGE_LL_FULL_RESET) {
+               vxge_hw_device_wait_receive_idle(vdev->devh);
                vxge_hw_device_intr_disable(vdev->devh);
 
                switch (vdev->cric_err_event) {
                case VXGE_HW_EVENT_UNKNOWN:
-                       vxge_stop_all_tx_queue(vdev);
+                       netif_tx_stop_all_queues(vdev->ndev);
                        vxge_debug_init(VXGE_ERR,
                                "fatal: %s: Disabling device due to"
                                "unknown error",
@@ -1544,7 +1472,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
                case VXGE_HW_EVENT_VPATH_ERR:
                        break;
                case VXGE_HW_EVENT_CRITICAL_ERR:
-                       vxge_stop_all_tx_queue(vdev);
+                       netif_tx_stop_all_queues(vdev->ndev);
                        vxge_debug_init(VXGE_ERR,
                                "fatal: %s: Disabling device due to"
                                "serious error",
@@ -1554,7 +1482,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
                        ret = -EPERM;
                        goto out;
                case VXGE_HW_EVENT_SERR:
-                       vxge_stop_all_tx_queue(vdev);
+                       netif_tx_stop_all_queues(vdev->ndev);
                        vxge_debug_init(VXGE_ERR,
                                "fatal: %s: Disabling device due to"
                                "serious error",
@@ -1566,7 +1494,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
                        ret = -EPERM;
                        goto out;
                case VXGE_HW_EVENT_SLOT_FREEZE:
-                       vxge_stop_all_tx_queue(vdev);
+                       netif_tx_stop_all_queues(vdev->ndev);
                        vxge_debug_init(VXGE_ERR,
                                "fatal: %s: Disabling device due to"
                                "slot freeze",
@@ -1580,7 +1508,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
        }
 
        if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
-               vxge_stop_all_tx_queue(vdev);
+               netif_tx_stop_all_queues(vdev->ndev);
 
        if (event == VXGE_LL_FULL_RESET) {
                status = vxge_reset_all_vpaths(vdev);
@@ -1640,7 +1568,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
                        vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
                }
 
-               vxge_wake_all_tx_queue(vdev);
+               netif_tx_wake_all_queues(vdev->ndev);
        }
 
 out:
@@ -1659,10 +1587,9 @@ out:
  *
  * driver may reset the chip on events of serr, eccerr, etc
  */
-int vxge_reset(struct vxgedev *vdev)
+static int vxge_reset(struct vxgedev *vdev)
 {
-       do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
-       return 0;
+       return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
 }
 
 /**
@@ -1705,8 +1632,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
        int budget_org = budget;
        struct vxge_ring *ring;
 
-       struct __vxge_hw_device  *hldev = (struct __vxge_hw_device *)
-               pci_get_drvdata(vdev->pdev);
+       struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
                ring = &vdev->vpaths[i].ring;
@@ -1742,11 +1668,11 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
  */
 static void vxge_netpoll(struct net_device *dev)
 {
-       struct __vxge_hw_device  *hldev;
+       struct __vxge_hw_device *hldev;
        struct vxgedev *vdev;
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
-       hldev = (struct __vxge_hw_device  *)pci_get_drvdata(vdev->pdev);
+       vdev = netdev_priv(dev);
+       hldev = pci_get_drvdata(vdev->pdev);
 
        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
@@ -1786,15 +1712,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
                mtable[index] = index % vdev->no_of_vpath;
        }
 
-       /* Fill RTH hash types */
-       hash_types.hash_type_tcpipv4_en   = vdev->config.rth_hash_type_tcpipv4;
-       hash_types.hash_type_ipv4_en      = vdev->config.rth_hash_type_ipv4;
-       hash_types.hash_type_tcpipv6_en   = vdev->config.rth_hash_type_tcpipv6;
-       hash_types.hash_type_ipv6_en      = vdev->config.rth_hash_type_ipv6;
-       hash_types.hash_type_tcpipv6ex_en =
-                                       vdev->config.rth_hash_type_tcpipv6ex;
-       hash_types.hash_type_ipv6ex_en    = vdev->config.rth_hash_type_ipv6ex;
-
        /* set indirection table, bucket-to-vpath mapping */
        status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
                                                vdev->no_of_vpath,
@@ -1807,12 +1724,21 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
                return status;
        }
 
+       /* Fill RTH hash types */
+       hash_types.hash_type_tcpipv4_en   = vdev->config.rth_hash_type_tcpipv4;
+       hash_types.hash_type_ipv4_en      = vdev->config.rth_hash_type_ipv4;
+       hash_types.hash_type_tcpipv6_en   = vdev->config.rth_hash_type_tcpipv6;
+       hash_types.hash_type_ipv6_en      = vdev->config.rth_hash_type_ipv6;
+       hash_types.hash_type_tcpipv6ex_en =
+                                       vdev->config.rth_hash_type_tcpipv6ex;
+       hash_types.hash_type_ipv6ex_en    = vdev->config.rth_hash_type_ipv6ex;
+
        /*
-       * Because the itable_set() method uses the active_table field
-       * for the target virtual path the RTH config should be updated
-       * for all VPATHs. The h/w only uses the lowest numbered VPATH
-       * when steering frames.
-       */
+        * Because the itable_set() method uses the active_table field
+        * for the target virtual path the RTH config should be updated
+        * for all VPATHs. The h/w only uses the lowest numbered VPATH
+        * when steering frames.
+        */
         for (index = 0; index < vdev->no_of_vpath; index++) {
                status = vxge_hw_vpath_rts_rth_set(
                                vdev->vpaths[index].handle,
@@ -1831,7 +1757,7 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
        return status;
 }
 
-int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
 {
        struct vxge_mac_addrs *new_mac_entry;
        u8 *mac_address = NULL;
@@ -1864,7 +1790,8 @@ int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
 }
 
 /* Add a mac address to DA table */
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
+                                            struct macInfo *mac)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxge_vpath *vpath;
@@ -1889,11 +1816,11 @@ enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
        return status;
 }
 
-int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
 {
        struct list_head *entry, *next;
        u64 del_mac = 0;
-       u8 *mac_address = (u8 *) (&del_mac);
+       u8 *mac_address = (u8 *)(&del_mac);
 
        /* Copy the mac address to delete from the list */
        memcpy(mac_address, mac->macaddr, ETH_ALEN);
@@ -1914,7 +1841,8 @@ int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
        return FALSE;
 }
 /* delete a mac address from DA table */
-enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
+                                            struct macInfo *mac)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxge_vpath *vpath;
@@ -1961,7 +1889,7 @@ static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
 }
 
 /* Store all vlan ids from the list to the vid table */
-enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
+static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxgedev *vdev = vpath->vdev;
@@ -1969,7 +1897,7 @@ enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
 
        if (vdev->vlgrp && vpath->is_open) {
 
-               for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+               for (vid = 0; vid < VLAN_N_VID; vid++) {
                        if (!vlan_group_get_device(vdev->vlgrp, vid))
                                continue;
                        /* Add these vlan to the vid table */
@@ -1981,7 +1909,7 @@ enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
 }
 
 /* Store all mac addresses from the list to the DA table */
-enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct macInfo mac_info;
@@ -2025,17 +1953,17 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
 /* reset vpaths */
 enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
 {
-       int i;
        enum vxge_hw_status status = VXGE_HW_OK;
+       struct vxge_vpath *vpath;
+       int i;
 
-       for (i = 0; i < vdev->no_of_vpath; i++)
-               if (vdev->vpaths[i].handle) {
-                       if (vxge_hw_vpath_reset(vdev->vpaths[i].handle)
-                                       == VXGE_HW_OK) {
+       for (i = 0; i < vdev->no_of_vpath; i++) {
+               vpath = &vdev->vpaths[i];
+               if (vpath->handle) {
+                       if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
                                if (is_vxge_card_up(vdev) &&
                                        vxge_hw_vpath_recover_from_reset(
-                                               vdev->vpaths[i].handle)
-                                               != VXGE_HW_OK) {
+                                               vpath->handle) != VXGE_HW_OK) {
                                        vxge_debug_init(VXGE_ERR,
                                                "vxge_hw_vpath_recover_"
                                                "from_reset failed for vpath: "
@@ -2049,83 +1977,109 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
                                        return status;
                        }
                }
+       }
+
        return status;
 }
 
 /* close vpaths */
-void vxge_close_vpaths(struct vxgedev *vdev, int index)
+static void vxge_close_vpaths(struct vxgedev *vdev, int index)
 {
+       struct vxge_vpath *vpath;
        int i;
+
        for (i = index; i < vdev->no_of_vpath; i++) {
-               if (vdev->vpaths[i].handle && vdev->vpaths[i].is_open) {
-                       vxge_hw_vpath_close(vdev->vpaths[i].handle);
+               vpath = &vdev->vpaths[i];
+
+               if (vpath->handle && vpath->is_open) {
+                       vxge_hw_vpath_close(vpath->handle);
                        vdev->stats.vpaths_open--;
                }
-               vdev->vpaths[i].is_open = 0;
-               vdev->vpaths[i].handle  = NULL;
+               vpath->is_open = 0;
+               vpath->handle = NULL;
        }
 }
 
 /* open vpaths */
-int vxge_open_vpaths(struct vxgedev *vdev)
+static int vxge_open_vpaths(struct vxgedev *vdev)
 {
+       struct vxge_hw_vpath_attr attr;
        enum vxge_hw_status status;
-       int i;
+       struct vxge_vpath *vpath;
        u32 vp_id = 0;
-       struct vxge_hw_vpath_attr attr;
+       int i;
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
-               vxge_assert(vdev->vpaths[i].is_configured);
-               attr.vp_id = vdev->vpaths[i].device_id;
+               vpath = &vdev->vpaths[i];
+               vxge_assert(vpath->is_configured);
+
+               if (!vdev->titan1) {
+                       struct vxge_hw_vp_config *vcfg;
+                       vcfg = &vdev->devh->config.vp_config[vpath->device_id];
+
+                       vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
+                       vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
+                       vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
+                       vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
+                       vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
+                       vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
+                       vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
+                       vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
+                       vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
+               }
+
+               attr.vp_id = vpath->device_id;
                attr.fifo_attr.callback = vxge_xmit_compl;
                attr.fifo_attr.txdl_term = vxge_tx_term;
                attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
-               attr.fifo_attr.userdata = (void *)&vdev->vpaths[i].fifo;
+               attr.fifo_attr.userdata = &vpath->fifo;
 
                attr.ring_attr.callback = vxge_rx_1b_compl;
                attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
                attr.ring_attr.rxd_term = vxge_rx_term;
                attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
-               attr.ring_attr.userdata = (void *)&vdev->vpaths[i].ring;
+               attr.ring_attr.userdata = &vpath->ring;
 
-               vdev->vpaths[i].ring.ndev = vdev->ndev;
-               vdev->vpaths[i].ring.pdev = vdev->pdev;
-               status = vxge_hw_vpath_open(vdev->devh, &attr,
-                               &(vdev->vpaths[i].handle));
+               vpath->ring.ndev = vdev->ndev;
+               vpath->ring.pdev = vdev->pdev;
+               status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
                if (status == VXGE_HW_OK) {
-                       vdev->vpaths[i].fifo.handle =
+                       vpath->fifo.handle =
                            (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
-                       vdev->vpaths[i].ring.handle =
+                       vpath->ring.handle =
                            (struct __vxge_hw_ring *)attr.ring_attr.userdata;
-                       vdev->vpaths[i].fifo.tx_steering_type =
+                       vpath->fifo.tx_steering_type =
                                vdev->config.tx_steering_type;
-                       vdev->vpaths[i].fifo.ndev = vdev->ndev;
-                       vdev->vpaths[i].fifo.pdev = vdev->pdev;
-                       vdev->vpaths[i].fifo.indicate_max_pkts =
+                       vpath->fifo.ndev = vdev->ndev;
+                       vpath->fifo.pdev = vdev->pdev;
+                       if (vdev->config.tx_steering_type)
+                               vpath->fifo.txq =
+                                       netdev_get_tx_queue(vdev->ndev, i);
+                       else
+                               vpath->fifo.txq =
+                                       netdev_get_tx_queue(vdev->ndev, 0);
+                       vpath->fifo.indicate_max_pkts =
                                vdev->config.fifo_indicate_max_pkts;
-                       vdev->vpaths[i].ring.rx_vector_no = 0;
-                       vdev->vpaths[i].ring.rx_csum = vdev->rx_csum;
-                       vdev->vpaths[i].is_open = 1;
-                       vdev->vp_handles[i] = vdev->vpaths[i].handle;
-                       vdev->vpaths[i].ring.gro_enable =
-                                               vdev->config.gro_enable;
-                       vdev->vpaths[i].ring.vlan_tag_strip =
-                                               vdev->vlan_tag_strip;
+                       vpath->ring.rx_vector_no = 0;
+                       vpath->ring.rx_csum = vdev->rx_csum;
+                       vpath->ring.rx_hwts = vdev->rx_hwts;
+                       vpath->is_open = 1;
+                       vdev->vp_handles[i] = vpath->handle;
+                       vpath->ring.gro_enable = vdev->config.gro_enable;
+                       vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
                        vdev->stats.vpaths_open++;
                } else {
                        vdev->stats.vpath_open_fail++;
                        vxge_debug_init(VXGE_ERR,
                                "%s: vpath: %d failed to open "
                                "with status: %d",
-                           vdev->ndev->name, vdev->vpaths[i].device_id,
+                           vdev->ndev->name, vpath->device_id,
                                status);
                        vxge_close_vpaths(vdev, 0);
                        return -EPERM;
                }
 
-               vp_id =
-                 ((struct __vxge_hw_vpath_handle *)vdev->vpaths[i].handle)->
-                 vpath->vp_id;
+               vp_id = vpath->handle->vpath->vp_id;
                vdev->vpaths_deployed |= vxge_mBIT(vp_id);
        }
        return VXGE_HW_OK;
@@ -2147,18 +2101,18 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
        struct __vxge_hw_device *hldev;
        u64 reason;
        enum vxge_hw_status status;
-       struct vxgedev *vdev = (struct vxgedev *) dev_id;;
+       struct vxgedev *vdev = (struct vxgedev *)dev_id;
 
        vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
        dev = vdev->ndev;
-       hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
+       hldev = pci_get_drvdata(vdev->pdev);
 
        if (pci_channel_offline(vdev->pdev))
                return IRQ_NONE;
 
        if (unlikely(!is_vxge_card_up(vdev)))
-               return IRQ_NONE;
+               return IRQ_HANDLED;
 
        status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
                        &reason);
@@ -2256,8 +2210,8 @@ start:
        /* Alarm MSIX Vectors count */
        vdev->intr_cnt++;
 
-       vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry),
-                                               GFP_KERNEL);
+       vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
+                               GFP_KERNEL);
        if (!vdev->entries) {
                vxge_debug_init(VXGE_ERR,
                        "%s: memory allocation failed",
@@ -2266,9 +2220,9 @@ start:
                goto alloc_entries_failed;
        }
 
-       vdev->vxge_entries =
-               kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry),
-                               GFP_KERNEL);
+       vdev->vxge_entries = kcalloc(vdev->intr_cnt,
+                                    sizeof(struct vxge_msix_entry),
+                                    GFP_KERNEL);
        if (!vdev->vxge_entries) {
                vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
                        VXGE_DRIVER_NAME);
@@ -2299,7 +2253,6 @@ start:
        vdev->vxge_entries[j].in_use = 0;
 
        ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
-
        if (ret > 0) {
                vxge_debug_init(VXGE_ERR,
                        "%s: MSI-X enable failed for %d vectors, ret: %d",
@@ -2345,17 +2298,16 @@ static int vxge_enable_msix(struct vxgedev *vdev)
        ret = vxge_alloc_msix(vdev);
        if (!ret) {
                for (i = 0; i < vdev->no_of_vpath; i++) {
+                       struct vxge_vpath *vpath = &vdev->vpaths[i];
 
-                       /* If fifo or ring are not enabled
-                          the MSIX vector for that should be set to 0
-                          Hence initializeing this array to all 0s.
-                       */
-                       vdev->vpaths[i].ring.rx_vector_no =
-                               (vdev->vpaths[i].device_id *
-                                       VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
+                       /* If fifo or ring are not enabled, the MSIX vector for
+                        * it should be set to 0.
+                        */
+                       vpath->ring.rx_vector_no = (vpath->device_id *
+                                               VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
 
-                       vxge_hw_vpath_msix_set(vdev->vpaths[i].handle,
-                                       tim_msix_id, VXGE_ALARM_MSIX_ID);
+                       vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
+                                              VXGE_ALARM_MSIX_ID);
                }
        }
 
@@ -2388,8 +2340,8 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
 
 static void vxge_rem_isr(struct vxgedev *vdev)
 {
-       struct __vxge_hw_device  *hldev;
-       hldev = (struct __vxge_hw_device  *) pci_get_drvdata(vdev->pdev);
+       struct __vxge_hw_device *hldev;
+       hldev = pci_get_drvdata(vdev->pdev);
 
 #ifdef CONFIG_PCI_MSI
        if (vdev->config.intr_type == MSI_X) {
@@ -2570,9 +2522,10 @@ static void vxge_poll_vp_reset(unsigned long data)
 static void vxge_poll_vp_lockup(unsigned long data)
 {
        struct vxgedev *vdev = (struct vxgedev *)data;
-       int i;
-       struct vxge_ring *ring;
        enum vxge_hw_status status = VXGE_HW_OK;
+       struct vxge_vpath *vpath;
+       struct vxge_ring *ring;
+       int i;
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
                ring = &vdev->vpaths[i].ring;
@@ -2586,13 +2539,13 @@ static void vxge_poll_vp_lockup(unsigned long data)
 
                                /* schedule vpath reset */
                                if (!test_and_set_bit(i, &vdev->vp_reset)) {
+                                       vpath = &vdev->vpaths[i];
 
                                        /* disable interrupts for this vpath */
                                        vxge_vpath_intr_disable(vdev, i);
 
                                        /* stop the queue for this vpath */
-                                       vxge_stop_tx_queue(&vdev->vpaths[i].
-                                                               fifo);
+                                       netif_tx_stop_queue(vpath->fifo.txq);
                                        continue;
                                }
                        }
@@ -2615,20 +2568,21 @@ static void vxge_poll_vp_lockup(unsigned long data)
  * Return value: '0' on success and an appropriate (-)ve integer as
  * defined in errno.h file on failure.
  */
-int
+static int
 vxge_open(struct net_device *dev)
 {
        enum vxge_hw_status status;
        struct vxgedev *vdev;
        struct __vxge_hw_device *hldev;
+       struct vxge_vpath *vpath;
        int ret = 0;
        int i;
        u64 val64, function_mode;
        vxge_debug_entryexit(VXGE_TRACE,
                "%s: %s:%d", dev->name, __func__, __LINE__);
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
-       hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+       vdev = netdev_priv(dev);
+       hldev = pci_get_drvdata(vdev->pdev);
        function_mode = vdev->config.device_hw_info.function_mode;
 
        /* make sure you have link off by default every time Nic is
@@ -2654,20 +2608,21 @@ vxge_open(struct net_device *dev)
                goto out1;
        }
 
-
        if (vdev->config.intr_type != MSI_X) {
                netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
                        vdev->config.napi_weight);
                napi_enable(&vdev->napi);
-               for (i = 0; i < vdev->no_of_vpath; i++)
-                       vdev->vpaths[i].ring.napi_p = &vdev->napi;
+               for (i = 0; i < vdev->no_of_vpath; i++) {
+                       vpath = &vdev->vpaths[i];
+                       vpath->ring.napi_p = &vdev->napi;
+               }
        } else {
                for (i = 0; i < vdev->no_of_vpath; i++) {
-                       netif_napi_add(dev, &vdev->vpaths[i].ring.napi,
+                       vpath = &vdev->vpaths[i];
+                       netif_napi_add(dev, &vpath->ring.napi,
                            vxge_poll_msix, vdev->config.napi_weight);
-                       napi_enable(&vdev->vpaths[i].ring.napi);
-                       vdev->vpaths[i].ring.napi_p =
-                               &vdev->vpaths[i].ring.napi;
+                       napi_enable(&vpath->ring.napi);
+                       vpath->ring.napi_p = &vpath->ring.napi;
                }
        }
 
@@ -2682,11 +2637,14 @@ vxge_open(struct net_device *dev)
                        goto out2;
                }
        }
+       printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
+              hldev->config.rth_en ? "enabled" : "disabled");
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
+               vpath = &vdev->vpaths[i];
+
                /* set initial mtu before enabling the device */
-               status = vxge_hw_vpath_mtu_set(vdev->vpaths[i].handle,
-                                               vdev->mtu);
+               status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
                if (status != VXGE_HW_OK) {
                        vxge_debug_init(VXGE_ERR,
                                "%s: fatal: can not set new MTU", dev->name);
@@ -2700,10 +2658,21 @@ vxge_open(struct net_device *dev)
                "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
        VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
 
-       /* Reprogram the DA table with populated mac addresses */
-       for (i = 0; i < vdev->no_of_vpath; i++) {
-               vxge_restore_vpath_mac_addr(&vdev->vpaths[i]);
-               vxge_restore_vpath_vid_table(&vdev->vpaths[i]);
+       /* Restore the DA, VID table and also multicast and promiscuous mode
+        * states
+        */
+       if (vdev->all_multi_flg) {
+               for (i = 0; i < vdev->no_of_vpath; i++) {
+                       vpath = &vdev->vpaths[i];
+                       vxge_restore_vpath_mac_addr(vpath);
+                       vxge_restore_vpath_vid_table(vpath);
+
+                       status = vxge_hw_vpath_mcast_enable(vpath->handle);
+                       if (status != VXGE_HW_OK)
+                               vxge_debug_init(VXGE_ERR,
+                                       "%s:%d Enabling multicast failed",
+                                       __func__, __LINE__);
+               }
        }
 
        /* Enable vpath to sniff all unicast/multicast traffic that not
@@ -2732,14 +2701,14 @@ vxge_open(struct net_device *dev)
 
        /* Enabling Bcast and mcast for all vpath */
        for (i = 0; i < vdev->no_of_vpath; i++) {
-               status = vxge_hw_vpath_bcast_enable(vdev->vpaths[i].handle);
+               vpath = &vdev->vpaths[i];
+               status = vxge_hw_vpath_bcast_enable(vpath->handle);
                if (status != VXGE_HW_OK)
                        vxge_debug_init(VXGE_ERR,
                                "%s : Can not enable bcast for vpath "
                                "id %d", dev->name, i);
                if (vdev->config.addr_learn_en) {
-                       status =
-                           vxge_hw_vpath_mcast_enable(vdev->vpaths[i].handle);
+                       status = vxge_hw_vpath_mcast_enable(vpath->handle);
                        if (status != VXGE_HW_OK)
                                vxge_debug_init(VXGE_ERR,
                                        "%s : Can not enable mcast for vpath "
@@ -2755,9 +2724,10 @@ vxge_open(struct net_device *dev)
                vxge_os_timer(vdev->vp_reset_timer,
                        vxge_poll_vp_reset, vdev, (HZ/2));
 
-       if (vdev->vp_lockup_timer.function == NULL)
-               vxge_os_timer(vdev->vp_lockup_timer,
-                       vxge_poll_vp_lockup, vdev, (HZ/2));
+       /* There is no need to check for RxD leak and RxD lookup on Titan1A */
+       if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
+               vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
+                             HZ / 2);
 
        set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
 
@@ -2765,7 +2735,7 @@ vxge_open(struct net_device *dev)
 
        if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
                netif_carrier_on(vdev->ndev);
-               printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
+               netdev_notice(vdev->ndev, "Link Up\n");
                vdev->stats.link_up++;
        }
 
@@ -2774,12 +2744,14 @@ vxge_open(struct net_device *dev)
        smp_wmb();
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
-               vxge_hw_vpath_enable(vdev->vpaths[i].handle);
+               vpath = &vdev->vpaths[i];
+
+               vxge_hw_vpath_enable(vpath->handle);
                smp_wmb();
-               vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
+               vxge_hw_vpath_rx_doorbell_init(vpath->handle);
        }
 
-       vxge_start_all_tx_queue(vdev);
+       netif_tx_start_all_queues(vdev->ndev);
        goto out0;
 
 out2:
@@ -2803,7 +2775,7 @@ out0:
 }
 
 /* Loop throught the mac address list and delete all the entries */
-void vxge_free_mac_add_list(struct vxge_vpath *vpath)
+static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
 {
 
        struct list_head *entry, *next;
@@ -2827,7 +2799,7 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
        }
 }
 
-int do_vxge_close(struct net_device *dev, int do_io)
+static int do_vxge_close(struct net_device *dev, int do_io)
 {
        enum vxge_hw_status status;
        struct vxgedev *vdev;
@@ -2837,8 +2809,8 @@ int do_vxge_close(struct net_device *dev, int do_io)
        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
                dev->name, __func__, __LINE__);
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
-       hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+       vdev = netdev_priv(dev);
+       hldev = pci_get_drvdata(vdev->pdev);
 
        if (unlikely(!is_vxge_card_up(vdev)))
                return 0;
@@ -2848,7 +2820,6 @@ int do_vxge_close(struct net_device *dev, int do_io)
        while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
                msleep(50);
 
-       clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
        if (do_io) {
                /* Put the vpath back in normal mode */
                vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
@@ -2888,10 +2859,17 @@ int do_vxge_close(struct net_device *dev, int do_io)
 
                smp_wmb();
        }
-       del_timer_sync(&vdev->vp_lockup_timer);
+
+       if (vdev->titan1)
+               del_timer_sync(&vdev->vp_lockup_timer);
 
        del_timer_sync(&vdev->vp_reset_timer);
 
+       if (do_io)
+               vxge_hw_device_wait_receive_idle(hldev);
+
+       clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+
        /* Disable napi */
        if (vdev->config.intr_type != MSI_X)
                napi_disable(&vdev->napi);
@@ -2901,15 +2879,13 @@ int do_vxge_close(struct net_device *dev, int do_io)
        }
 
        netif_carrier_off(vdev->ndev);
-       printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
-       vxge_stop_all_tx_queue(vdev);
+       netdev_notice(vdev->ndev, "Link Down\n");
+       netif_tx_stop_all_queues(vdev->ndev);
 
        /* Note that at this point xmit() is stopped by upper layer */
        if (do_io)
                vxge_hw_device_intr_disable(vdev->devh);
 
-       mdelay(1000);
-
        vxge_rem_isr(vdev);
 
        vxge_napi_del_all(vdev);
@@ -2938,7 +2914,7 @@ int do_vxge_close(struct net_device *dev, int do_io)
  * Return value: '0' on success and an appropriate (-)ve integer as
  * defined in errno.h file on failure.
  */
-int
+static int
 vxge_close(struct net_device *dev)
 {
        do_vxge_close(dev, 1);
@@ -2996,26 +2972,18 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu)
 }
 
 /**
- * vxge_get_stats
+ * vxge_get_stats64
  * @dev: pointer to the device structure
+ * @stats: pointer to struct rtnl_link_stats64
  *
- * Updates the device statistics structure. This function updates the device
- * statistics structure in the net_device structure and returns a pointer
- * to the same.
  */
-static struct net_device_stats *
-vxge_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
 {
-       struct vxgedev *vdev;
-       struct net_device_stats *net_stats;
+       struct vxgedev *vdev = netdev_priv(dev);
        int k;
 
-       vdev = netdev_priv(dev);
-
-       net_stats = &vdev->stats.net_stats;
-
-       memset(net_stats, 0, sizeof(struct net_device_stats));
-
+       /* net_stats already zeroed by caller */
        for (k = 0; k < vdev->no_of_vpath; k++) {
                net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
                net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
@@ -3032,6 +3000,101 @@ vxge_get_stats(struct net_device *dev)
        return net_stats;
 }
 
+static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev,
+                                                int enable)
+{
+       enum vxge_hw_status status;
+       u64 val64;
+
+       /* Timestamp is passed to the driver via the FCS, therefore we
+        * must disable the FCS stripping by the adapter.  Since this is
+        * required for the driver to load (due to a hardware bug),
+        * there is no need to do anything special here.
+        */
+       if (enable)
+               val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
+                       VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
+                       VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
+       else
+               val64 = 0;
+
+       status = vxge_hw_mgmt_reg_write(vdev->devh,
+                                       vxge_hw_mgmt_reg_type_mrpcim,
+                                       0,
+                                       offsetof(struct vxge_hw_mrpcim_reg,
+                                                xmac_timestamp),
+                                       val64);
+       vxge_hw_device_flush_io(vdev->devh);
+       return status;
+}
+
+static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
+{
+       struct hwtstamp_config config;
+       enum vxge_hw_status status;
+       int i;
+
+       if (copy_from_user(&config, data, sizeof(config)))
+               return -EFAULT;
+
+       /* reserved for future extensions */
+       if (config.flags)
+               return -EINVAL;
+
+       /* Transmit HW Timestamp not supported */
+       switch (config.tx_type) {
+       case HWTSTAMP_TX_OFF:
+               break;
+       case HWTSTAMP_TX_ON:
+       default:
+               return -ERANGE;
+       }
+
+       switch (config.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               status = vxge_timestamp_config(vdev, 0);
+               if (status != VXGE_HW_OK)
+                       return -EFAULT;
+
+               vdev->rx_hwts = 0;
+               config.rx_filter = HWTSTAMP_FILTER_NONE;
+               break;
+
+       case HWTSTAMP_FILTER_ALL:
+       case HWTSTAMP_FILTER_SOME:
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+               status = vxge_timestamp_config(vdev, 1);
+               if (status != VXGE_HW_OK)
+                       return -EFAULT;
+
+               vdev->rx_hwts = 1;
+               config.rx_filter = HWTSTAMP_FILTER_ALL;
+               break;
+
+       default:
+                return -ERANGE;
+       }
+
+       for (i = 0; i < vdev->no_of_vpath; i++)
+               vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
+
+       if (copy_to_user(data, &config, sizeof(config)))
+               return -EFAULT;
+
+       return 0;
+}
+
 /**
  * vxge_ioctl
  * @dev: Device pointer.
@@ -3044,7 +3107,20 @@ vxge_get_stats(struct net_device *dev)
  */
 static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-       return -EOPNOTSUPP;
+       struct vxgedev *vdev = netdev_priv(dev);
+       int ret;
+
+       switch (cmd) {
+       case SIOCSHWTSTAMP:
+               ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
+               if (ret)
+                       return ret;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
 }
 
 /**
@@ -3062,7 +3138,7 @@ vxge_tx_watchdog(struct net_device *dev)
 
        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       vdev = netdev_priv(dev);
 
        vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
 
@@ -3090,7 +3166,7 @@ vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
 
        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       vdev = netdev_priv(dev);
 
        vpath = &vdev->vpaths[0];
        if ((NULL == grp) && (vpath->is_open)) {
@@ -3139,7 +3215,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
        struct vxge_vpath *vpath;
        int vp_id;
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       vdev = netdev_priv(dev);
 
        /* Add these vlan to the vid table */
        for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
@@ -3166,7 +3242,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 
        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       vdev = netdev_priv(dev);
 
        vlan_group_set_device(vdev->vlgrp, vid, NULL);
 
@@ -3184,7 +3260,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 static const struct net_device_ops vxge_netdev_ops = {
        .ndo_open               = vxge_open,
        .ndo_stop               = vxge_close,
-       .ndo_get_stats          = vxge_get_stats,
+       .ndo_get_stats64        = vxge_get_stats64,
        .ndo_start_xmit         = vxge_xmit,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_multicast_list = vxge_set_multicast,
@@ -3203,19 +3279,32 @@ static const struct net_device_ops vxge_netdev_ops = {
 #endif
 };
 
-int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
-                                  struct vxge_config *config,
-                                  int high_dma, int no_of_vpath,
-                                  struct vxgedev **vdev_out)
+static int __devinit vxge_device_revision(struct vxgedev *vdev)
+{
+       int ret;
+       u8 revision;
+
+       ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
+       if (ret)
+               return -EIO;
+
+       vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
+       return 0;
+}
+
+static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
+                                         struct vxge_config *config,
+                                         int high_dma, int no_of_vpath,
+                                         struct vxgedev **vdev_out)
 {
        struct net_device *ndev;
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxgedev *vdev;
-       int i, ret = 0, no_of_queue = 1;
+       int ret = 0, no_of_queue = 1;
        u64 stat;
 
        *vdev_out = NULL;
-       if (config->tx_steering_type == TX_MULTIQ_STEERING)
+       if (config->tx_steering_type)
                no_of_queue = no_of_vpath;
 
        ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
@@ -3241,6 +3330,11 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
        vdev->pdev = hldev->pdev;
        memcpy(&vdev->config, config, sizeof(struct vxge_config));
        vdev->rx_csum = 1;      /* Enable Rx CSUM by default. */
+       vdev->rx_hwts = 0;
+
+       ret = vxge_device_revision(vdev);
+       if (ret < 0)
+               goto _out1;
 
        SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
 
@@ -3254,7 +3348,12 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
 
        ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
 
-       initialize_ethtool_ops(ndev);
+       vxge_initialize_ethtool_ops(ndev);
+
+       if (vdev->config.rth_steering != NO_STEERING) {
+               ndev->features |= NETIF_F_RXHASH;
+               hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
+       }
 
        /* Allocate memory for vpath */
        vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
@@ -3284,16 +3383,6 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
        if (vdev->config.gro_enable)
                ndev->features |= NETIF_F_GRO;
 
-       if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
-               ndev->real_num_tx_queues = no_of_vpath;
-
-#ifdef NETIF_F_LLTX
-       ndev->features |= NETIF_F_LLTX;
-#endif
-
-       for (i = 0; i < no_of_vpath; i++)
-               spin_lock_init(&vdev->vpaths[i].fifo.tx_lock);
-
        if (register_netdev(ndev)) {
                vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
                        "%s: %s : device registration failed!",
@@ -3315,6 +3404,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
                "%s: Ethernet device registered",
                ndev->name);
 
+       hldev->ndev = ndev;
        *vdev_out = vdev;
 
        /* Resetting the Device stats */
@@ -3349,36 +3439,29 @@ _out0:
  *
  * This function will unregister and free network device
  */
-void
-vxge_device_unregister(struct __vxge_hw_device *hldev)
+static void vxge_device_unregister(struct __vxge_hw_device *hldev)
 {
        struct vxgedev *vdev;
        struct net_device *dev;
        char buf[IFNAMSIZ];
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-       (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-       u32 level_trace;
-#endif
 
        dev = hldev->ndev;
        vdev = netdev_priv(dev);
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-       (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-       level_trace = vdev->level_trace;
-#endif
-       vxge_debug_entryexit(level_trace,
-               "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
 
-       memcpy(buf, vdev->ndev->name, IFNAMSIZ);
+       vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
+                            __func__, __LINE__);
+
+       memcpy(buf, dev->name, IFNAMSIZ);
 
        /* in 2.6 will call stop() if device is up */
        unregister_netdev(dev);
 
        flush_scheduled_work();
 
-       vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
-       vxge_debug_entryexit(level_trace,
-               "%s: %s:%d  Exiting...", buf, __func__, __LINE__);
+       vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
+                       buf);
+       vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d  Exiting...", buf,
+                            __func__, __LINE__);
 }
 
 /*
@@ -3392,7 +3475,8 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
                        enum vxge_hw_event type, u64 vp_id)
 {
        struct net_device *dev = hldev->ndev;
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+       struct vxgedev *vdev = netdev_priv(dev);
+       struct vxge_vpath *vpath = NULL;
        int vpath_idx;
 
        vxge_debug_entryexit(vdev->level_trace,
@@ -3403,9 +3487,11 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
         */
        vdev->cric_err_event = type;
 
-       for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++)
-               if (vdev->vpaths[vpath_idx].device_id == vp_id)
+       for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
+               vpath = &vdev->vpaths[vpath_idx];
+               if (vpath->device_id == vp_id)
                        break;
+       }
 
        if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
                if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
@@ -3442,8 +3528,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
                                vxge_vpath_intr_disable(vdev, vpath_idx);
 
                                /* stop the queue for this vpath */
-                               vxge_stop_tx_queue(&vdev->vpaths[vpath_idx].
-                                                       fifo);
+                               netif_tx_stop_queue(vpath->fifo.txq);
                        }
                }
        }
@@ -3899,8 +3984,7 @@ static int vxge_pm_resume(struct pci_dev *pdev)
 static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
                                                pci_channel_state_t state)
 {
-       struct __vxge_hw_device  *hldev =
-               (struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+       struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
        struct net_device *netdev = hldev->ndev;
 
        netif_device_detach(netdev);
@@ -3929,16 +4013,13 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
  */
 static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
 {
-       struct __vxge_hw_device  *hldev =
-               (struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+       struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
        struct net_device *netdev = hldev->ndev;
 
        struct vxgedev *vdev = netdev_priv(netdev);
 
        if (pci_enable_device(pdev)) {
-               printk(KERN_ERR "%s: "
-                       "Cannot re-enable device after reset\n",
-                       VXGE_DRIVER_NAME);
+               netdev_err(netdev, "Cannot re-enable device after reset\n");
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
@@ -3957,15 +4038,13 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
  */
 static void vxge_io_resume(struct pci_dev *pdev)
 {
-       struct __vxge_hw_device  *hldev =
-               (struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+       struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
        struct net_device *netdev = hldev->ndev;
 
        if (netif_running(netdev)) {
                if (vxge_open(netdev)) {
-                       printk(KERN_ERR "%s: "
-                               "Can't bring device back up after reset\n",
-                               VXGE_DRIVER_NAME);
+                       netdev_err(netdev,
+                                  "Can't bring device back up after reset\n");
                        return;
                }
        }
@@ -4003,6 +4082,142 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
        return num_functions;
 }
 
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
+{
+       struct __vxge_hw_device *hldev = vdev->devh;
+       u32 maj, min, bld, cmaj, cmin, cbld;
+       enum vxge_hw_status status;
+       const struct firmware *fw;
+       int ret;
+
+       ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
+       if (ret) {
+               vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
+                               VXGE_DRIVER_NAME, fw_name);
+               goto out;
+       }
+
+       /* Load the new firmware onto the adapter */
+       status = vxge_update_fw_image(hldev, fw->data, fw->size);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR,
+                               "%s: FW image download to adapter failed '%s'.",
+                               VXGE_DRIVER_NAME, fw_name);
+               ret = -EIO;
+               goto out;
+       }
+
+       /* Read the version of the new firmware */
+       status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR,
+                               "%s: Upgrade read version failed '%s'.",
+                               VXGE_DRIVER_NAME, fw_name);
+               ret = -EIO;
+               goto out;
+       }
+
+       cmaj = vdev->config.device_hw_info.fw_version.major;
+       cmin = vdev->config.device_hw_info.fw_version.minor;
+       cbld = vdev->config.device_hw_info.fw_version.build;
+       /* It's possible the version in /lib/firmware is not the latest version.
+        * If so, we could get into a loop of trying to upgrade to the latest
+        * and flashing the older version.
+        */
+       if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
+           !override) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
+              maj, min, bld);
+
+       /* Flash the adapter with the new firmware */
+       status = vxge_hw_flash_fw(hldev);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
+                               VXGE_DRIVER_NAME, fw_name);
+               ret = -EIO;
+               goto out;
+       }
+
+       printk(KERN_NOTICE "Upgrade of firmware successful!  Adapter must be "
+              "hard reset before using, thus requiring a system reboot or a "
+              "hotplug event.\n");
+
+out:
+       return ret;
+}
+
+static int vxge_probe_fw_update(struct vxgedev *vdev)
+{
+       u32 maj, min, bld;
+       int ret, gpxe = 0;
+       char *fw_name;
+
+       maj = vdev->config.device_hw_info.fw_version.major;
+       min = vdev->config.device_hw_info.fw_version.minor;
+       bld = vdev->config.device_hw_info.fw_version.build;
+
+       if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
+               return 0;
+
+       /* Ignore the build number when determining if the current firmware is
+        * "too new" to load the driver
+        */
+       if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
+               vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
+                               "version, unable to load driver\n",
+                               VXGE_DRIVER_NAME);
+               return -EINVAL;
+       }
+
+       /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
+        * work with this driver.
+        */
+       if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
+               vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
+                               "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
+               return -EINVAL;
+       }
+
+       /* If file not specified, determine gPXE or not */
+       if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
+               int i;
+               for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
+                       if (vdev->devh->eprom_versions[i]) {
+                               gpxe = 1;
+                               break;
+                       }
+       }
+       if (gpxe)
+               fw_name = "vxge/X3fw-pxe.ncf";
+       else
+               fw_name = "vxge/X3fw.ncf";
+
+       ret = vxge_fw_upgrade(vdev, fw_name, 0);
+       /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
+        * probe, so ignore them
+        */
+       if (ret != -EINVAL && ret != -ENOENT)
+               return -EIO;
+       else
+               ret = 0;
+
+       if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
+           VXGE_FW_VER(maj, min, 0)) {
+               vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
+                               " be used with this driver.\n"
+                               "Please get the latest version from "
+                               "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
+                               VXGE_DRIVER_NAME, maj, min, bld);
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
 /**
  * vxge_probe
  * @pdev : structure containing the PCI related information of the device.
@@ -4017,13 +4232,13 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
 static int __devinit
 vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
 {
-       struct __vxge_hw_device  *hldev;
+       struct __vxge_hw_device *hldev;
        enum vxge_hw_status status;
        int ret;
        int high_dma = 0;
        u64 vpath_mask = 0;
        struct vxgedev *vdev;
-       struct vxge_config ll_config;
+       struct vxge_config *ll_config = NULL;
        struct vxge_hw_device_config *device_config = NULL;
        struct vxge_hw_device_attr attr;
        int i, j, no_of_vpath = 0, max_vpath_supported = 0;
@@ -4082,17 +4297,24 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                goto _exit0;
        }
 
-       memset(&ll_config, 0, sizeof(struct vxge_config));
-       ll_config.tx_steering_type = TX_MULTIQ_STEERING;
-       ll_config.intr_type = MSI_X;
-       ll_config.napi_weight = NEW_NAPI_WEIGHT;
-       ll_config.rth_steering = RTH_STEERING;
+       ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL);
+       if (!ll_config) {
+               ret = -ENOMEM;
+               vxge_debug_init(VXGE_ERR,
+                       "ll_config : malloc failed %s %d",
+                       __FILE__, __LINE__);
+               goto _exit0;
+       }
+       ll_config->tx_steering_type = TX_MULTIQ_STEERING;
+       ll_config->intr_type = MSI_X;
+       ll_config->napi_weight = NEW_NAPI_WEIGHT;
+       ll_config->rth_steering = RTH_STEERING;
 
        /* get the default configuration parameters */
        vxge_hw_device_config_default_get(device_config);
 
        /* initialize configuration parameters */
-       vxge_device_config_init(device_config, &ll_config.intr_type);
+       vxge_device_config_init(device_config, &ll_config->intr_type);
 
        ret = pci_enable_device(pdev);
        if (ret) {
@@ -4145,7 +4367,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                (unsigned long long)pci_resource_start(pdev, 0));
 
        status = vxge_hw_device_hw_info_get(attr.bar0,
-                       &ll_config.device_hw_info);
+                       &ll_config->device_hw_info);
        if (status != VXGE_HW_OK) {
                vxge_debug_init(VXGE_ERR,
                        "%s: Reading of hardware info failed."
@@ -4154,17 +4376,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                goto _exit3;
        }
 
-       if (ll_config.device_hw_info.fw_version.major !=
-               VXGE_DRIVER_FW_VERSION_MAJOR) {
-               vxge_debug_init(VXGE_ERR,
-                       "%s: Incorrect firmware version."
-                       "Please upgrade the firmware to version 1.x.x",
-                       VXGE_DRIVER_NAME);
-               ret = -EINVAL;
-               goto _exit3;
-       }
-
-       vpath_mask = ll_config.device_hw_info.vpath_mask;
+       vpath_mask = ll_config->device_hw_info.vpath_mask;
        if (vpath_mask == 0) {
                vxge_debug_ll_config(VXGE_TRACE,
                        "%s: No vpaths available in device", VXGE_DRIVER_NAME);
@@ -4176,10 +4388,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                "%s:%d  Vpath mask = %llx", __func__, __LINE__,
                (unsigned long long)vpath_mask);
 
-       function_mode = ll_config.device_hw_info.function_mode;
-       host_type = ll_config.device_hw_info.host_type;
+       function_mode = ll_config->device_hw_info.function_mode;
+       host_type = ll_config->device_hw_info.host_type;
        is_privileged = __vxge_hw_device_is_privilaged(host_type,
-               ll_config.device_hw_info.func_id);
+               ll_config->device_hw_info.func_id);
 
        /* Check how many vpaths are available */
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
@@ -4193,7 +4405,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
 
        /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
        if (is_sriov(function_mode) && (max_config_dev > 1) &&
-               (ll_config.intr_type != INTA) &&
+               (ll_config->intr_type != INTA) &&
                (is_privileged == VXGE_HW_OK)) {
                ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
                        ? (max_config_dev - 1) : num_vfs);
@@ -4206,7 +4418,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
         * Configure vpaths and get driver configured number of vpaths
         * which is less than or equal to the maximum vpaths per function.
         */
-       no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, &ll_config);
+       no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
        if (!no_of_vpath) {
                vxge_debug_ll_config(VXGE_ERR,
                        "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
@@ -4227,11 +4439,37 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                        goto _exit3;
        }
 
+       if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
+                       ll_config->device_hw_info.fw_version.minor,
+                       ll_config->device_hw_info.fw_version.build) >=
+           VXGE_EPROM_FW_VER) {
+               struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
+
+               status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
+               if (status != VXGE_HW_OK) {
+                       vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
+                                       VXGE_DRIVER_NAME);
+                       /* This is a non-fatal error, continue */
+               }
+
+               for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+                       hldev->eprom_versions[i] = img[i].version;
+                       if (!img[i].is_valid)
+                               break;
+                       vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
+                                       "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
+                                       VXGE_EPROM_IMG_MAJOR(img[i].version),
+                                       VXGE_EPROM_IMG_MINOR(img[i].version),
+                                       VXGE_EPROM_IMG_FIX(img[i].version),
+                                       VXGE_EPROM_IMG_BUILD(img[i].version));
+               }
+       }
+
        /* if FCS stripping is not disabled in MAC fail driver load */
-       if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
-               vxge_debug_init(VXGE_ERR,
-                       "%s: FCS stripping is not disabled in MAC"
-                       " failing driver load", VXGE_DRIVER_NAME);
+       status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
+                               " failing driver load", VXGE_DRIVER_NAME);
                ret = -EINVAL;
                goto _exit4;
        }
@@ -4241,32 +4479,36 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
        /* set private device info */
        pci_set_drvdata(pdev, hldev);
 
-       ll_config.gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
-       ll_config.fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
-       ll_config.addr_learn_en = addr_learn_en;
-       ll_config.rth_algorithm = RTH_ALG_JENKINS;
-       ll_config.rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
-       ll_config.rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
-       ll_config.rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
-       ll_config.rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
-       ll_config.rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
-       ll_config.rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
-       ll_config.rth_bkt_sz = RTH_BUCKET_SIZE;
-       ll_config.tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
-       ll_config.rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
-
-       if (vxge_device_register(hldev, &ll_config, high_dma, no_of_vpath,
-               &vdev)) {
+       ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
+       ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
+       ll_config->addr_learn_en = addr_learn_en;
+       ll_config->rth_algorithm = RTH_ALG_JENKINS;
+       ll_config->rth_hash_type_tcpipv4 = 1;
+       ll_config->rth_hash_type_ipv4 = 0;
+       ll_config->rth_hash_type_tcpipv6 = 0;
+       ll_config->rth_hash_type_ipv6 = 0;
+       ll_config->rth_hash_type_tcpipv6ex = 0;
+       ll_config->rth_hash_type_ipv6ex = 0;
+       ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
+       ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
+       ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
+
+       ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
+                                  &vdev);
+       if (ret) {
                ret = -EINVAL;
                goto _exit4;
        }
 
+       ret = vxge_probe_fw_update(vdev);
+       if (ret)
+               goto _exit5;
+
        vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
        VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
                vxge_hw_device_trace_level_get(hldev));
 
        /* set private HW device info */
-       hldev->ndev = vdev->ndev;
        vdev->mtu = VXGE_HW_DEFAULT_MTU;
        vdev->bar0 = attr.bar0;
        vdev->max_vpath_supported = max_vpath_supported;
@@ -4281,12 +4523,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
 
                vdev->vpaths[j].is_configured = 1;
                vdev->vpaths[j].device_id = i;
-               vdev->vpaths[j].fifo.driver_id = j;
                vdev->vpaths[j].ring.driver_id = j;
                vdev->vpaths[j].vdev = vdev;
                vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
                memcpy((u8 *)vdev->vpaths[j].macaddr,
-                               (u8 *)ll_config.device_hw_info.mac_addrs[i],
+                               ll_config->device_hw_info.mac_addrs[i],
                                ETH_ALEN);
 
                /* Initialize the mac address list header */
@@ -4307,18 +4548,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
 
        macaddr = (u8 *)vdev->vpaths[0].macaddr;
 
-       ll_config.device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
-       ll_config.device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
-       ll_config.device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
+       ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
+       ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
+       ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
 
        vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
-               vdev->ndev->name, ll_config.device_hw_info.serial_number);
+               vdev->ndev->name, ll_config->device_hw_info.serial_number);
 
        vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
-               vdev->ndev->name, ll_config.device_hw_info.part_number);
+               vdev->ndev->name, ll_config->device_hw_info.part_number);
 
        vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
-               vdev->ndev->name, ll_config.device_hw_info.product_desc);
+               vdev->ndev->name, ll_config->device_hw_info.product_desc);
 
        vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
                vdev->ndev->name, macaddr);
@@ -4328,11 +4569,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
 
        vxge_debug_init(VXGE_TRACE,
                "%s: Firmware version : %s Date : %s", vdev->ndev->name,
-               ll_config.device_hw_info.fw_version.version,
-               ll_config.device_hw_info.fw_date.date);
+               ll_config->device_hw_info.fw_version.version,
+               ll_config->device_hw_info.fw_date.date);
 
        if (new_device) {
-               switch (ll_config.device_hw_info.function_mode) {
+               switch (ll_config->device_hw_info.function_mode) {
                case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
                        vxge_debug_init(VXGE_TRACE,
                        "%s: Single Function Mode Enabled", vdev->ndev->name);
@@ -4355,7 +4596,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
        vxge_print_parm(vdev, vpath_mask);
 
        /* Store the fw version for ethttool option */
-       strcpy(vdev->fw_version, ll_config.device_hw_info.fw_version.version);
+       strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
        memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
        memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
 
@@ -4369,7 +4610,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                                "%s: mac_addr_list : memory allocation failed",
                                vdev->ndev->name);
                        ret = -EPERM;
-                       goto _exit5;
+                       goto _exit6;
                }
                macaddr = (u8 *)&entry->macaddr;
                memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
@@ -4394,7 +4635,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
         * present to prevent such a failure.
         */
 
-       if (ll_config.device_hw_info.function_mode ==
+       if (ll_config->device_hw_info.function_mode ==
                VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
                if (vdev->config.intr_type == INTA)
                        vxge_hw_device_unmask_all(hldev);
@@ -4406,12 +4647,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
        VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
                vxge_hw_device_trace_level_get(hldev));
 
+       kfree(ll_config);
        return 0;
 
-_exit5:
+_exit6:
        for (i = 0; i < vdev->no_of_vpath; i++)
                vxge_free_mac_add_list(&vdev->vpaths[i]);
-
+_exit5:
        vxge_device_unregister(hldev);
 _exit4:
        pci_disable_sriov(pdev);
@@ -4423,6 +4665,7 @@ _exit2:
 _exit1:
        pci_disable_device(pdev);
 _exit0:
+       kfree(ll_config);
        kfree(device_config);
        driver_config->config_dev_cnt--;
        pci_set_drvdata(pdev, NULL);
@@ -4435,34 +4678,25 @@ _exit0:
  * Description: This function is called by the Pci subsystem to release a
  * PCI device and free up all resource held up by the device.
  */
-static void __devexit
-vxge_remove(struct pci_dev *pdev)
+static void __devexit vxge_remove(struct pci_dev *pdev)
 {
-       struct __vxge_hw_device  *hldev;
+       struct __vxge_hw_device *hldev;
        struct vxgedev *vdev = NULL;
        struct net_device *dev;
        int i = 0;
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-       (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-       u32 level_trace;
-#endif
 
-       hldev = (struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+       hldev = pci_get_drvdata(pdev);
 
        if (hldev == NULL)
                return;
+
        dev = hldev->ndev;
        vdev = netdev_priv(dev);
 
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-       (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-       level_trace = vdev->level_trace;
-#endif
-       vxge_debug_entryexit(level_trace,
-               "%s:%d", __func__, __LINE__);
+       vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
 
-       vxge_debug_init(level_trace,
-               "%s : removing PCI device...", __func__);
+       vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
+                       __func__);
        vxge_device_unregister(hldev);
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
@@ -4480,16 +4714,16 @@ vxge_remove(struct pci_dev *pdev)
        /* we are safe to free it now */
        free_netdev(dev);
 
-       vxge_debug_init(level_trace,
-               "%s:%d  Device unregistered", __func__, __LINE__);
+       vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
+                       __func__, __LINE__);
 
        vxge_hw_device_terminate(hldev);
 
        pci_disable_device(pdev);
        pci_release_regions(pdev);
        pci_set_drvdata(pdev, NULL);
-       vxge_debug_entryexit(level_trace,
-               "%s:%d  Exiting...", __func__, __LINE__);
+       vxge_debug_entryexit(vdev->level_trace, "%s:%d  Exiting...", __func__,
+                            __LINE__);
 }
 
 static struct pci_error_handlers vxge_err_handler = {
@@ -4514,13 +4748,9 @@ static int __init
 vxge_starter(void)
 {
        int ret = 0;
-       char version[32];
-       snprintf(version, 32, "%s", DRV_VERSION);
 
-       printk(KERN_INFO "%s: Copyright(c) 2002-2009 Neterion Inc\n",
-               VXGE_DRIVER_NAME);
-       printk(KERN_INFO "%s: Driver version: %s\n",
-                       VXGE_DRIVER_NAME, version);
+       pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
+       pr_info("Driver version: %s\n", DRV_VERSION);
 
        verify_bandwidth();