]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/holtmann/bluet...
authorJohn W. Linville <linville@tuxdriver.com>
Thu, 5 Aug 2010 19:54:28 +0000 (15:54 -0400)
committerJohn W. Linville <linville@tuxdriver.com>
Thu, 5 Aug 2010 19:54:28 +0000 (15:54 -0400)
229 files changed:
MAINTAINERS
arch/um/drivers/net_kern.c
drivers/atm/nicstar.c
drivers/net/3c59x.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/benet/be.h
drivers/net/benet/be_cmds.c
drivers/net/benet/be_cmds.h
drivers/net/benet/be_ethtool.c
drivers/net/benet/be_hw.h
drivers/net/benet/be_main.c
drivers/net/bnx2x/Makefile [new file with mode: 0644]
drivers/net/bnx2x/bnx2x.h [moved from drivers/net/bnx2x.h with 84% similarity]
drivers/net/bnx2x/bnx2x_cmn.c [new file with mode: 0644]
drivers/net/bnx2x/bnx2x_cmn.h [new file with mode: 0644]
drivers/net/bnx2x/bnx2x_dump.h [moved from drivers/net/bnx2x_dump.h with 100% similarity]
drivers/net/bnx2x/bnx2x_ethtool.c [new file with mode: 0644]
drivers/net/bnx2x/bnx2x_fw_defs.h [moved from drivers/net/bnx2x_fw_defs.h with 100% similarity]
drivers/net/bnx2x/bnx2x_fw_file_hdr.h [moved from drivers/net/bnx2x_fw_file_hdr.h with 100% similarity]
drivers/net/bnx2x/bnx2x_hsi.h [moved from drivers/net/bnx2x_hsi.h with 100% similarity]
drivers/net/bnx2x/bnx2x_init.h [moved from drivers/net/bnx2x_init.h with 100% similarity]
drivers/net/bnx2x/bnx2x_init_ops.h [moved from drivers/net/bnx2x_init_ops.h with 100% similarity]
drivers/net/bnx2x/bnx2x_link.c [moved from drivers/net/bnx2x_link.c with 100% similarity]
drivers/net/bnx2x/bnx2x_link.h [moved from drivers/net/bnx2x_link.h with 100% similarity]
drivers/net/bnx2x/bnx2x_main.c [moved from drivers/net/bnx2x_main.c with 56% similarity]
drivers/net/bnx2x/bnx2x_reg.h [moved from drivers/net/bnx2x_reg.h with 100% similarity]
drivers/net/bnx2x/bnx2x_stats.c [new file with mode: 0644]
drivers/net/bnx2x/bnx2x_stats.h [new file with mode: 0644]
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs.c
drivers/net/caif/caif_spi.c
drivers/net/can/Kconfig
drivers/net/can/Makefile
drivers/net/can/flexcan.c [new file with mode: 0644]
drivers/net/can/usb/Kconfig
drivers/net/can/usb/Makefile
drivers/net/can/usb/esd_usb2.c [new file with mode: 0644]
drivers/net/cnic.c
drivers/net/cxgb3/t3_hw.c
drivers/net/cxgb4/cxgb4.h
drivers/net/cxgb4/cxgb4_main.c
drivers/net/cxgb4/cxgb4_uld.h
drivers/net/cxgb4/sge.c
drivers/net/cxgb4/t4_hw.c
drivers/net/cxgb4/t4_hw.h
drivers/net/cxgb4/t4_msg.h
drivers/net/cxgb4/t4_regs.h
drivers/net/cxgb4/t4fw_api.h
drivers/net/davinci_emac.c
drivers/net/dnet.c
drivers/net/e1000/e1000.h
drivers/net/e1000/e1000_ethtool.c
drivers/net/e1000/e1000_main.c
drivers/net/e1000e/e1000.h
drivers/net/e1000e/hw.h
drivers/net/e1000e/netdev.c
drivers/net/ethoc.c
drivers/net/fec.c
drivers/net/forcedeth.c
drivers/net/hp100.c
drivers/net/igb/e1000_82575.c
drivers/net/igb/e1000_defines.h
drivers/net/igb/igb_main.c
drivers/net/igbvf/netdev.c
drivers/net/irda/smsc-ircc2.c
drivers/net/ixgbe/ixgbe_ethtool.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbevf/ixgbevf_main.c
drivers/net/ks8842.c
drivers/net/ksz884x.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/mv643xx_eth.c
drivers/net/phy/marvell.c
drivers/net/qla3xxx.c
drivers/net/qlcnic/qlcnic.h
drivers/net/qlcnic/qlcnic_ethtool.c
drivers/net/qlcnic/qlcnic_main.c
drivers/net/qlge/qlge.h
drivers/net/qlge/qlge_dbg.c
drivers/net/r6040.c
drivers/net/s2io.c
drivers/net/s2io.h
drivers/net/sky2.c
drivers/net/stmmac/common.h
drivers/net/stmmac/dwmac1000.h
drivers/net/stmmac/dwmac1000_core.c
drivers/net/stmmac/dwmac100_core.c
drivers/net/stmmac/enh_desc.c
drivers/net/stmmac/stmmac_main.c
drivers/net/tg3.c
drivers/net/tg3.h
drivers/net/tulip/tulip_core.c
drivers/net/tun.c
drivers/net/ucc_geth.c
drivers/net/usb/hso.c
drivers/net/usb/usbnet.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxge/vxge-main.c
drivers/net/wan/farsync.c
drivers/net/wimax/i2400m/i2400m-usb.h
drivers/net/wimax/i2400m/usb.c
drivers/net/wireless/ath/ath9k/ar9002_calib.c
drivers/net/wireless/ath/ath9k/ar9003_calib.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_paprd.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/calib.c
drivers/net/wireless/ath/ath9k/calib.h
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-debug.h
drivers/net/wireless/p54/p54pci.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/rt2x00/rt2x00pci.c
drivers/net/wireless/wl12xx/wl1271_spi.c
drivers/s390/net/claw.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3.h
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/vhost/net.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
include/linux/Kbuild
include/linux/can/platform/flexcan.h [new file with mode: 0644]
include/linux/cgroup.h
include/linux/etherdevice.h
include/linux/if_macvlan.h
include/linux/ks8842.h
include/linux/netdevice.h
include/linux/netfilter/Kbuild
include/linux/netfilter/nfnetlink_log.h
include/linux/netfilter/xt_CHECKSUM.h [new file with mode: 0644]
include/linux/netfilter/xt_cpu.h [new file with mode: 0644]
include/linux/netfilter/xt_ipvs.h [new file with mode: 0644]
include/linux/netfilter/xt_quota.h
include/linux/pci_ids.h
include/linux/rtnetlink.h
include/linux/skbuff.h
include/net/ip_vs.h
include/net/irda/irda.h
include/net/irda/irlap_frame.h
include/net/mac80211.h
include/net/netfilter/nf_conntrack_extend.h
include/net/netfilter/nf_nat_protocol.h
include/net/netfilter/nfnetlink_log.h
include/net/tc_act/tc_mirred.h
kernel/cgroup.c
net/Kconfig
net/bridge/br_device.c
net/bridge/br_fdb.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_stp_bpdu.c
net/caif/cfrfml.c
net/can/raw.c
net/core/dev.c
net/core/drop_monitor.c
net/core/net-sysfs.c
net/core/netpoll.c
net/core/pktgen.c
net/core/skbuff.c
net/ipv4/ip_output.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_REJECT.c
net/ipv4/netfilter/nf_nat_core.c
net/ipv4/netfilter/nf_nat_proto_common.c
net/ipv4/netfilter/nf_nat_proto_dccp.c
net/ipv4/netfilter/nf_nat_proto_gre.c
net/ipv4/netfilter/nf_nat_proto_icmp.c
net/ipv4/netfilter/nf_nat_proto_sctp.c
net/ipv4/netfilter/nf_nat_proto_tcp.c
net/ipv4/netfilter/nf_nat_proto_udp.c
net/ipv4/netfilter/nf_nat_proto_udplite.c
net/ipv4/netfilter/nf_nat_proto_unknown.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv6/addrconf.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/mac80211/cfg.c
net/mac80211/main.c
net/mac80211/scan.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/ipvs/Kconfig
net/netfilter/ipvs/ip_vs_app.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/ipvs/ip_vs_proto.c
net/netfilter/ipvs/ip_vs_proto_sctp.c
net/netfilter/ipvs/ip_vs_proto_tcp.c
net/netfilter/ipvs/ip_vs_proto_udp.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_extend.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/xt_CHECKSUM.c [new file with mode: 0644]
net/netfilter/xt_TPROXY.c
net/netfilter/xt_cpu.c [new file with mode: 0644]
net/netfilter/xt_ipvs.c [new file with mode: 0644]
net/netfilter/xt_quota.c
net/netlink/af_netlink.c
net/netlink/genetlink.c
net/rose/rose_route.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/cls_u32.c

index b1b9298435586c648a52334039df0012848b039e..b04b97fe32175abedd065a7dbe8434026e3f6be8 100644 (file)
@@ -1365,7 +1365,7 @@ BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
 M:     Eilon Greenstein <eilong@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
-F:     drivers/net/bnx2x*
+F:     drivers/net/bnx2x/
 
 BROADCOM TG3 GIGABIT ETHERNET DRIVER
 M:     Matt Carlson <mcarlson@broadcom.com>
index f05372694233a55734500b8aa3578a0338f00d43..2ab233ba32c1564f8323884017108b0d53978366 100644 (file)
 #include "net_kern.h"
 #include "net_user.h"
 
-static inline void set_ether_mac(struct net_device *dev, unsigned char *addr)
-{
-       memcpy(dev->dev_addr, addr, ETH_ALEN);
-}
-
 #define DRIVER_NAME "uml-netdev"
 
 static DEFINE_SPINLOCK(opened_lock);
@@ -266,7 +261,7 @@ static int uml_net_set_mac(struct net_device *dev, void *addr)
        struct sockaddr *hwaddr = addr;
 
        spin_lock_irq(&lp->lock);
-       set_ether_mac(dev, hwaddr->sa_data);
+       eth_mac_addr(dev, hwaddr->sa_data);
        spin_unlock_irq(&lp->lock);
 
        return 0;
@@ -380,7 +375,6 @@ static const struct net_device_ops uml_netdev_ops = {
        .ndo_tx_timeout         = uml_net_tx_timeout,
        .ndo_set_mac_address    = uml_net_set_mac,
        .ndo_change_mtu         = uml_net_change_mtu,
-       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
 };
 
@@ -478,7 +472,7 @@ static void eth_configure(int n, void *init, char *mac,
            ((*transport->user->init)(&lp->user, dev) != 0))
                goto out_unregister;
 
-       set_ether_mac(dev, device->mac);
+       eth_mac_addr(dev, device->mac);
        dev->mtu = transport->user->mtu;
        dev->netdev_ops = &uml_netdev_ops;
        dev->ethtool_ops = &uml_net_ethtool_ops;
index 729a149b6b2ba90ae8f081e4345135e7523c6e9b..2f3516b7f118cd59e7b9156ba1b004a6caee6c86 100644 (file)
@@ -154,7 +154,6 @@ static void which_list(ns_dev * card, struct sk_buff *skb);
 #endif
 static void ns_poll(unsigned long arg);
 static int ns_parse_mac(char *mac, unsigned char *esi);
-static short ns_h2i(char c);
 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
                       unsigned long addr);
 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
@@ -2824,9 +2823,9 @@ static int ns_parse_mac(char *mac, unsigned char *esi)
                return -1;
        j = 0;
        for (i = 0; i < 6; i++) {
-               if ((byte1 = ns_h2i(mac[j++])) < 0)
+               if ((byte1 = hex_to_bin(mac[j++])) < 0)
                        return -1;
-               if ((byte0 = ns_h2i(mac[j++])) < 0)
+               if ((byte0 = hex_to_bin(mac[j++])) < 0)
                        return -1;
                esi[i] = (unsigned char)(byte1 * 16 + byte0);
                if (i < 5) {
@@ -2837,16 +2836,6 @@ static int ns_parse_mac(char *mac, unsigned char *esi)
        return 0;
 }
 
-static short ns_h2i(char c)
-{
-       if (c >= '0' && c <= '9')
-               return (short)(c - '0');
-       if (c >= 'A' && c <= 'F')
-               return (short)(c - 'A' + 10);
-       if (c >= 'a' && c <= 'f')
-               return (short)(c - 'a' + 10);
-       return -1;
-}
 
 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
                       unsigned long addr)
index 069a03f717d304e1c9e2b1e0387181b612118661..c754d88e5ec92d0af82d80094a4576256a50dc61 100644 (file)
@@ -1020,10 +1020,16 @@ static int __devinit vortex_init_one(struct pci_dev *pdev,
        ioaddr = pci_iomap(pdev, pci_bar, 0);
        if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
                ioaddr = pci_iomap(pdev, 0, 0);
+       if (!ioaddr) {
+               pci_disable_device(pdev);
+               rc = -ENOMEM;
+               goto out;
+       }
 
        rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq,
                           ent->driver_data, unit);
        if (rc < 0) {
+               pci_iounmap(pdev, ioaddr);
                pci_disable_device(pdev);
                goto out;
        }
@@ -1387,7 +1393,7 @@ static int __devinit vortex_probe1(struct device *gendev,
                mii_preamble_required++;
                if (vp->drv_flags & EXTRA_PREAMBLE)
                        mii_preamble_required++;
-               mdio_sync(ioaddr, 32);
+               mdio_sync(vp, 32);
                mdio_read(dev, 24, MII_BMSR);
                for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
                        int mii_status, phyx;
@@ -2912,6 +2918,36 @@ static void vortex_get_drvinfo(struct net_device *dev,
        }
 }
 
+static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct vortex_private *vp = netdev_priv(dev);
+
+       spin_lock_irq(&vp->lock);
+       wol->supported = WAKE_MAGIC;
+
+       wol->wolopts = 0;
+       if (vp->enable_wol)
+               wol->wolopts |= WAKE_MAGIC;
+       spin_unlock_irq(&vp->lock);
+}
+
+static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct vortex_private *vp = netdev_priv(dev);
+       if (wol->wolopts & ~WAKE_MAGIC)
+               return -EINVAL;
+
+       spin_lock_irq(&vp->lock);
+       if (wol->wolopts & WAKE_MAGIC)
+               vp->enable_wol = 1;
+       else
+               vp->enable_wol = 0;
+       acpi_set_WOL(dev);
+       spin_unlock_irq(&vp->lock);
+
+       return 0;
+}
+
 static const struct ethtool_ops vortex_ethtool_ops = {
        .get_drvinfo            = vortex_get_drvinfo,
        .get_strings            = vortex_get_strings,
@@ -2923,6 +2959,8 @@ static const struct ethtool_ops vortex_ethtool_ops = {
        .set_settings           = vortex_set_settings,
        .get_link               = ethtool_op_get_link,
        .nway_reset             = vortex_nway_reset,
+       .get_wol                = vortex_get_wol,
+       .set_wol                = vortex_set_wol,
 };
 
 #ifdef CONFIG_PCI
index 8c13df7fdc16110c6d4d9551f8ef2acecbf745a7..ebe68395ecf8c98cd292052d6e78cf5ad9f4ff31 100644 (file)
@@ -1751,7 +1751,7 @@ config TLAN
 
 config KS8842
        tristate "Micrel KSZ8841/42 with generic bus interface"
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && DMA_ENGINE
        help
         This platform driver is for KSZ8841(1-port) / KS8842(2-port)
         ethernet switch chip (managed, VLAN, QoS) from Micrel or
index ce555819c8fceb7f386ae84c9b7fb0a924b72758..56e8c27f77cebe9ef3b9ac3c1284f7e2975f8448 100644 (file)
@@ -84,8 +84,7 @@ obj-$(CONFIG_FEALNX) += fealnx.o
 obj-$(CONFIG_TIGON3) += tg3.o
 obj-$(CONFIG_BNX2) += bnx2.o
 obj-$(CONFIG_CNIC) += cnic.o
-obj-$(CONFIG_BNX2X) += bnx2x.o
-bnx2x-objs := bnx2x_main.o bnx2x_link.o
+obj-$(CONFIG_BNX2X) += bnx2x/
 spidernet-y += spider_net.o spider_net_ethtool.o
 obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o
 obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
index f17428caecf1974a390e789a896056ad77ecdf2d..99197bd54da558ef26cf8a62a54af19aaa02e0f8 100644 (file)
@@ -33,7 +33,7 @@
 
 #include "be_hw.h"
 
-#define DRV_VER                        "2.102.147u"
+#define DRV_VER                        "2.103.175u"
 #define DRV_NAME               "be2net"
 #define BE_NAME                        "ServerEngines BladeEngine2 10Gbps NIC"
 #define BE3_NAME               "ServerEngines BladeEngine3 10Gbps NIC"
@@ -220,7 +220,16 @@ struct be_rx_obj {
        struct be_rx_page_info page_info_tbl[RX_Q_LEN];
 };
 
+struct be_vf_cfg {
+       unsigned char vf_mac_addr[ETH_ALEN];
+       u32 vf_if_handle;
+       u32 vf_pmac_id;
+       u16 vf_vlan_tag;
+       u32 vf_tx_rate;
+};
+
 #define BE_NUM_MSIX_VECTORS            2       /* 1 each for Tx and Rx */
+#define BE_INVALID_PMAC_ID             0xffffffff
 struct be_adapter {
        struct pci_dev *pdev;
        struct net_device *netdev;
@@ -276,9 +285,11 @@ struct be_adapter {
        u32 port_num;
        bool promiscuous;
        bool wol;
-       u32 cap;
+       u32 function_mode;
        u32 rx_fc;              /* Rx flow control */
        u32 tx_fc;              /* Tx flow control */
+       bool ue_detected;
+       bool stats_ioctl_sent;
        int link_speed;
        u8 port_type;
        u8 transceiver;
@@ -288,8 +299,7 @@ struct be_adapter {
        struct completion flash_compl;
 
        bool sriov_enabled;
-       u32 vf_if_handle[BE_MAX_VF];
-       u32 vf_pmac_id[BE_MAX_VF];
+       struct be_vf_cfg vf_cfg[BE_MAX_VF];
        u8 base_eq_id;
        u8 is_virtfn;
 };
index 344e062b7f25edad262ac4edc3acc2f29ac9ea36..3d305494a6066fb987510abebe34a1332f80c114 100644 (file)
@@ -75,8 +75,10 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
                        be_dws_le_to_cpu(&resp->hw_stats,
                                                sizeof(resp->hw_stats));
                        netdev_stats_update(adapter);
+                       adapter->stats_ioctl_sent = false;
                }
-       } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
+       } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
+                  (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
                extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
                                CQE_STATUS_EXTD_MASK;
                dev_warn(&adapter->pdev->dev,
@@ -205,6 +207,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
 
                if (msecs > 4000) {
                        dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
+                       be_dump_ue(adapter);
                        return -1;
                }
 
@@ -949,6 +952,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
        sge->len = cpu_to_le32(nonemb_cmd->size);
 
        be_mcc_notify(adapter);
+       adapter->stats_ioctl_sent = true;
 
 err:
        spin_unlock_bh(&adapter->mcc_lock);
@@ -1257,7 +1261,7 @@ err:
 }
 
 /* Uses mbox */
-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_query_fw_cfg *req;
@@ -1278,7 +1282,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
        if (!status) {
                struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
                *port_num = le32_to_cpu(resp->phys_port);
-               *cap = le32_to_cpu(resp->function_cap);
+               *mode = le32_to_cpu(resp->function_mode);
        }
 
        spin_unlock(&adapter->mbox_lock);
@@ -1730,3 +1734,36 @@ err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
+
+int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
+{
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_set_qos *req;
+       int status;
+
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+
+       req = embedded_payload(wrb);
+
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+                               OPCODE_COMMON_SET_QOS);
+
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                       OPCODE_COMMON_SET_QOS, sizeof(*req));
+
+       req->hdr.domain = domain;
+       req->valid_bits = BE_QOS_BITS_NIC;
+       req->max_bps_nic = bps;
+
+       status = be_mcc_notify_wait(adapter);
+
+err:
+       spin_unlock_bh(&adapter->mcc_lock);
+       return status;
+}
index 912a0586f060ef5499a6e924ea49c0ed0826ca27..bdc10a28cfda9feb11ffdecee141a6e333a9e92d 100644 (file)
@@ -124,6 +124,7 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_CQ_CREATE                                12
 #define OPCODE_COMMON_EQ_CREATE                                13
 #define OPCODE_COMMON_MCC_CREATE                       21
+#define OPCODE_COMMON_SET_QOS                          28
 #define OPCODE_COMMON_SEEPROM_READ                     30
 #define OPCODE_COMMON_NTWK_RX_FILTER                   34
 #define OPCODE_COMMON_GET_FW_VERSION                   35
@@ -748,7 +749,7 @@ struct be_cmd_resp_query_fw_cfg {
        u32 be_config_number;
        u32 asic_revision;
        u32 phys_port;
-       u32 function_cap;
+       u32 function_mode;
        u32 rsvd[26];
 };
 
@@ -894,6 +895,22 @@ struct be_cmd_resp_get_phy_info {
        u32 future_use[4];
 };
 
+/*********************** Set QOS ***********************/
+
+#define BE_QOS_BITS_NIC                                1
+
+struct be_cmd_req_set_qos {
+       struct be_cmd_req_hdr hdr;
+       u32 valid_bits;
+       u32 max_bps_nic;
+       u32 rsvd[7];
+};
+
+struct be_cmd_resp_set_qos {
+       struct be_cmd_resp_hdr hdr;
+       u32 rsvd;
+};
+
 extern int be_pci_fnum_get(struct be_adapter *adapter);
 extern int be_cmd_POST(struct be_adapter *adapter);
 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -974,4 +991,6 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
                                u8 loopback_type, u8 enable);
 extern int be_cmd_get_phy_info(struct be_adapter *adapter,
                struct be_dma_mem *cmd);
+extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
+extern void be_dump_ue(struct be_adapter *adapter);
 
index c0ade242895d36b56412b94dfb3b85fcf9d7114c..cd16243c7c364a858d849ac3f70ac10bbbfcb966 100644 (file)
@@ -322,10 +322,11 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        int status;
        u16 intf_type;
 
-       if (adapter->link_speed < 0) {
+       if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
                status = be_cmd_link_status_query(adapter, &link_up,
                                                &mac_speed, &link_speed);
 
+               be_link_status_update(adapter, link_up);
                /* link_speed is in units of 10 Mbps */
                if (link_speed) {
                        ecmd->speed = link_speed*10;
index 06839676e3c474fa598f86f7ab93a5130b99453d..6c8f9bb8bfe67cfec9618e4690a6a068ee32855f 100644 (file)
 #define PCICFG_PM_CONTROL_OFFSET               0x44
 #define PCICFG_PM_CONTROL_MASK                 0x108   /* bits 3 & 8 */
 
+/********* Online Control Registers *******/
+#define PCICFG_ONLINE0                         0xB0
+#define PCICFG_ONLINE1                         0xB4
+
+/********* UE Status and Mask Registers ***/
+#define PCICFG_UE_STATUS_LOW                   0xA0
+#define PCICFG_UE_STATUS_HIGH                  0xA4
+#define PCICFG_UE_STATUS_LOW_MASK              0xA8
+#define PCICFG_UE_STATUS_HI_MASK               0xAC
+
 /********* ISR0 Register offset **********/
 #define CEV_ISR0_OFFSET                        0xC18
 #define CEV_ISR_SIZE                           4
index e6ca92334d6d0bbb72de8ddc03930388986efaf5..74e146f470c60e9df5ff01806623a0aaaaa0ec82 100644 (file)
@@ -40,6 +40,76 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
        { 0 }
 };
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
+/* UE Status Low CSR */
+static char *ue_status_low_desc[] = {
+       "CEV",
+       "CTX",
+       "DBUF",
+       "ERX",
+       "Host",
+       "MPU",
+       "NDMA",
+       "PTC ",
+       "RDMA ",
+       "RXF ",
+       "RXIPS ",
+       "RXULP0 ",
+       "RXULP1 ",
+       "RXULP2 ",
+       "TIM ",
+       "TPOST ",
+       "TPRE ",
+       "TXIPS ",
+       "TXULP0 ",
+       "TXULP1 ",
+       "UC ",
+       "WDMA ",
+       "TXULP2 ",
+       "HOST1 ",
+       "P0_OB_LINK ",
+       "P1_OB_LINK ",
+       "HOST_GPIO ",
+       "MBOX ",
+       "AXGMAC0",
+       "AXGMAC1",
+       "JTAG",
+       "MPU_INTPEND"
+};
+/* UE Status High CSR */
+static char *ue_status_hi_desc[] = {
+       "LPCMEMHOST",
+       "MGMT_MAC",
+       "PCS0ONLINE",
+       "MPU_IRAM",
+       "PCS1ONLINE",
+       "PCTL0",
+       "PCTL1",
+       "PMEM",
+       "RR",
+       "TXPB",
+       "RXPP",
+       "XAUI",
+       "TXP",
+       "ARM",
+       "IPC",
+       "HOST2",
+       "HOST3",
+       "HOST4",
+       "HOST5",
+       "HOST6",
+       "HOST7",
+       "HOST8",
+       "HOST9",
+       "NETC"
+       "Unknown",
+       "Unknown",
+       "Unknown",
+       "Unknown",
+       "Unknown",
+       "Unknown",
+       "Unknown",
+       "Unknown"
+};
 
 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
 {
@@ -552,11 +622,18 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
  * If the user configures more, place BE in vlan promiscuous mode.
  */
-static int be_vid_config(struct be_adapter *adapter)
+static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
 {
        u16 vtag[BE_NUM_VLANS_SUPPORTED];
        u16 ntags = 0, i;
        int status = 0;
+       u32 if_handle;
+
+       if (vf) {
+               if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
+               vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
+               status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
+       }
 
        if (adapter->vlans_added <= adapter->max_vlans)  {
                /* Construct VLAN Table to give to HW */
@@ -572,6 +649,7 @@ static int be_vid_config(struct be_adapter *adapter)
                status = be_cmd_vlan_config(adapter, adapter->if_handle,
                                        NULL, 0, 1, 1);
        }
+
        return status;
 }
 
@@ -592,27 +670,28 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
+       adapter->vlans_added++;
        if (!be_physfn(adapter))
                return;
 
        adapter->vlan_tag[vid] = 1;
-       adapter->vlans_added++;
        if (adapter->vlans_added <= (adapter->max_vlans + 1))
-               be_vid_config(adapter);
+               be_vid_config(adapter, false, 0);
 }
 
 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
+       adapter->vlans_added--;
+       vlan_group_set_device(adapter->vlan_grp, vid, NULL);
+
        if (!be_physfn(adapter))
                return;
 
        adapter->vlan_tag[vid] = 0;
-       vlan_group_set_device(adapter->vlan_grp, vid, NULL);
-       adapter->vlans_added--;
        if (adapter->vlans_added <= adapter->max_vlans)
-               be_vid_config(adapter);
+               be_vid_config(adapter, false, 0);
 }
 
 static void be_set_multicast_list(struct net_device *netdev)
@@ -656,14 +735,93 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
        if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
                return -EINVAL;
 
-       status = be_cmd_pmac_del(adapter, adapter->vf_if_handle[vf],
-                               adapter->vf_pmac_id[vf]);
+       if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
+               status = be_cmd_pmac_del(adapter,
+                                       adapter->vf_cfg[vf].vf_if_handle,
+                                       adapter->vf_cfg[vf].vf_pmac_id);
 
-       status = be_cmd_pmac_add(adapter, mac, adapter->vf_if_handle[vf],
-                               &adapter->vf_pmac_id[vf]);
-       if (!status)
+       status = be_cmd_pmac_add(adapter, mac,
+                               adapter->vf_cfg[vf].vf_if_handle,
+                               &adapter->vf_cfg[vf].vf_pmac_id);
+
+       if (status)
                dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
                                mac, vf);
+       else
+               memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+
+       return status;
+}
+
+static int be_get_vf_config(struct net_device *netdev, int vf,
+                       struct ifla_vf_info *vi)
+{
+       struct be_adapter *adapter = netdev_priv(netdev);
+
+       if (!adapter->sriov_enabled)
+               return -EPERM;
+
+       if (vf >= num_vfs)
+               return -EINVAL;
+
+       vi->vf = vf;
+       vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
+       vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
+       vi->qos = 0;
+       memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
+
+       return 0;
+}
+
+static int be_set_vf_vlan(struct net_device *netdev,
+                       int vf, u16 vlan, u8 qos)
+{
+       struct be_adapter *adapter = netdev_priv(netdev);
+       int status = 0;
+
+       if (!adapter->sriov_enabled)
+               return -EPERM;
+
+       if ((vf >= num_vfs) || (vlan > 4095))
+               return -EINVAL;
+
+       if (vlan) {
+               adapter->vf_cfg[vf].vf_vlan_tag = vlan;
+               adapter->vlans_added++;
+       } else {
+               adapter->vf_cfg[vf].vf_vlan_tag = 0;
+               adapter->vlans_added--;
+       }
+
+       status = be_vid_config(adapter, true, vf);
+
+       if (status)
+               dev_info(&adapter->pdev->dev,
+                               "VLAN %d config on VF %d failed\n", vlan, vf);
+       return status;
+}
+
+static int be_set_vf_tx_rate(struct net_device *netdev,
+                       int vf, int rate)
+{
+       struct be_adapter *adapter = netdev_priv(netdev);
+       int status = 0;
+
+       if (!adapter->sriov_enabled)
+               return -EPERM;
+
+       if ((vf >= num_vfs) || (rate < 0))
+               return -EINVAL;
+
+       if (rate > 10000)
+               rate = 10000;
+
+       adapter->vf_cfg[vf].vf_tx_rate = rate;
+       status = be_cmd_set_qos(adapter, rate / 10, vf);
+
+       if (status)
+               dev_info(&adapter->pdev->dev,
+                               "tx rate %d on VF %d failed\n", rate, vf);
        return status;
 }
 
@@ -875,7 +1033,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
 
        /* vlanf could be wrongly set in some cards.
         * ignore if vtm is not set */
-       if ((adapter->cap & 0x400) && !vtm)
+       if ((adapter->function_mode & 0x400) && !vtm)
                vlanf = 0;
 
        if (unlikely(vlanf)) {
@@ -915,7 +1073,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
 
        /* vlanf could be wrongly set in some cards.
         * ignore if vtm is not set */
-       if ((adapter->cap & 0x400) && !vtm)
+       if ((adapter->function_mode & 0x400) && !vtm)
                vlanf = 0;
 
        skb = napi_get_frags(&eq_obj->napi);
@@ -1585,12 +1743,66 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
        return 1;
 }
 
+static inline bool be_detect_ue(struct be_adapter *adapter)
+{
+       u32 online0 = 0, online1 = 0;
+
+       pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
+
+       pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
+
+       if (!online0 || !online1) {
+               adapter->ue_detected = true;
+               dev_err(&adapter->pdev->dev,
+                       "UE Detected!! online0=%d online1=%d\n",
+                       online0, online1);
+               return true;
+       }
+
+       return false;
+}
+
+void be_dump_ue(struct be_adapter *adapter)
+{
+       u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
+       u32 i;
+
+       pci_read_config_dword(adapter->pdev,
+                               PCICFG_UE_STATUS_LOW, &ue_status_lo);
+       pci_read_config_dword(adapter->pdev,
+                               PCICFG_UE_STATUS_HIGH, &ue_status_hi);
+       pci_read_config_dword(adapter->pdev,
+                               PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
+       pci_read_config_dword(adapter->pdev,
+                               PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
+
+       ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
+       ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
+
+       if (ue_status_lo) {
+               for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
+                       if (ue_status_lo & 1)
+                               dev_err(&adapter->pdev->dev,
+                               "UE: %s bit set\n", ue_status_low_desc[i]);
+               }
+       }
+       if (ue_status_hi) {
+               for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
+                       if (ue_status_hi & 1)
+                               dev_err(&adapter->pdev->dev,
+                               "UE: %s bit set\n", ue_status_hi_desc[i]);
+               }
+       }
+
+}
+
 static void be_worker(struct work_struct *work)
 {
        struct be_adapter *adapter =
                container_of(work, struct be_adapter, work.work);
 
-       be_cmd_get_stats(adapter, &adapter->stats.cmd);
+       if (!adapter->stats_ioctl_sent)
+               be_cmd_get_stats(adapter, &adapter->stats.cmd);
 
        /* Set EQ delay */
        be_rx_eqd_update(adapter);
@@ -1602,6 +1814,10 @@ static void be_worker(struct work_struct *work)
                adapter->rx_post_starved = false;
                be_post_rx_frags(adapter);
        }
+       if (!adapter->ue_detected) {
+               if (be_detect_ue(adapter))
+                       be_dump_ue(adapter);
+       }
 
        schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
 }
@@ -1629,10 +1845,11 @@ static void be_msix_enable(struct be_adapter *adapter)
 
 static void be_sriov_enable(struct be_adapter *adapter)
 {
-#ifdef CONFIG_PCI_IOV
-       int status;
        be_check_sriov_fn_type(adapter);
+#ifdef CONFIG_PCI_IOV
        if (be_physfn(adapter) && num_vfs) {
+               int status;
+
                status = pci_enable_sriov(adapter->pdev, num_vfs);
                adapter->sriov_enabled = status ? false : true;
        }
@@ -1822,7 +2039,7 @@ static int be_open(struct net_device *netdev)
        be_link_status_update(adapter, link_up);
 
        if (be_physfn(adapter)) {
-               status = be_vid_config(adapter);
+               status = be_vid_config(adapter, false, 0);
                if (status)
                        goto err;
 
@@ -1903,13 +2120,15 @@ static int be_setup(struct be_adapter *adapter)
                        cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
                                        | BE_IF_FLAGS_BROADCAST;
                        status = be_cmd_if_create(adapter, cap_flags, en_flags,
-                                       mac, true, &adapter->vf_if_handle[vf],
+                                       mac, true,
+                                       &adapter->vf_cfg[vf].vf_if_handle,
                                        NULL, vf+1);
                        if (status) {
                                dev_err(&adapter->pdev->dev,
                                "Interface Create failed for VF %d\n", vf);
                                goto if_destroy;
                        }
+                       adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
                        vf++;
                }
        } else if (!be_physfn(adapter)) {
@@ -1943,8 +2162,9 @@ tx_qs_destroy:
        be_tx_queues_destroy(adapter);
 if_destroy:
        for (vf = 0; vf < num_vfs; vf++)
-               if (adapter->vf_if_handle[vf])
-                       be_cmd_if_destroy(adapter, adapter->vf_if_handle[vf]);
+               if (adapter->vf_cfg[vf].vf_if_handle)
+                       be_cmd_if_destroy(adapter,
+                                       adapter->vf_cfg[vf].vf_if_handle);
        be_cmd_if_destroy(adapter, adapter->if_handle);
 do_none:
        return status;
@@ -2187,7 +2407,10 @@ static struct net_device_ops be_netdev_ops = {
        .ndo_vlan_rx_register   = be_vlan_register,
        .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
        .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
-       .ndo_set_vf_mac         = be_set_vf_mac
+       .ndo_set_vf_mac         = be_set_vf_mac,
+       .ndo_set_vf_vlan        = be_set_vf_vlan,
+       .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
+       .ndo_get_vf_config      = be_get_vf_config
 };
 
 static void be_netdev_init(struct net_device *netdev)
@@ -2406,7 +2629,7 @@ static int be_get_config(struct be_adapter *adapter)
                return status;
 
        status = be_cmd_query_fw_cfg(adapter,
-                               &adapter->port_num, &adapter->cap);
+                               &adapter->port_num, &adapter->function_mode);
        if (status)
                return status;
 
@@ -2426,7 +2649,7 @@ static int be_get_config(struct be_adapter *adapter)
                memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
        }
 
-       if (adapter->cap & 0x400)
+       if (adapter->function_mode & 0x400)
                adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
        else
                adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
new file mode 100644 (file)
index 0000000..084afce
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# Makefile for Broadcom 10-Gigabit ethernet driver
+#
+
+obj-$(CONFIG_BNX2X) += bnx2x.o
+
+bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o
similarity index 84%
rename from drivers/net/bnx2x.h
rename to drivers/net/bnx2x/bnx2x.h
index 8bd23687c530f2e244c7050091d14c05b8fa7901..53af9c93e75c3bca661abfb8dce7aabe96589f52 100644 (file)
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
+#define DRV_MODULE_VERSION      "1.52.53-3"
+#define DRV_MODULE_RELDATE      "2010/18/04"
+#define BNX2X_BC_VER            0x040200
+
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 #define BCM_VLAN                       1
 #endif
@@ -32,7 +36,7 @@
 
 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
 #define BCM_CNIC 1
-#include "cnic_if.h"
+#include "../cnic_if.h"
 #endif
 
 
 #endif
 
 #include <linux/mdio.h>
+#include <linux/pci.h>
 #include "bnx2x_reg.h"
 #include "bnx2x_fw_defs.h"
 #include "bnx2x_hsi.h"
 #include "bnx2x_link.h"
+#include "bnx2x_stats.h"
 
 /* error/debug prints */
 
@@ -106,6 +112,7 @@ do {                                                                 \
                dev_info(&bp->pdev->dev, __fmt, ##__args);       \
 } while (0)
 
+void bnx2x_panic_dump(struct bnx2x *bp);
 
 #ifdef BNX2X_STOP_ON_ERROR
 #define bnx2x_panic() do { \
@@ -248,43 +255,6 @@ union db_prod {
 #define NEXT_SGE_MASK_ELEM(el)         (((el) + 1) & RX_SGE_MASK_LEN_MASK)
 
 
-struct bnx2x_eth_q_stats {
-       u32 total_bytes_received_hi;
-       u32 total_bytes_received_lo;
-       u32 total_bytes_transmitted_hi;
-       u32 total_bytes_transmitted_lo;
-       u32 total_unicast_packets_received_hi;
-       u32 total_unicast_packets_received_lo;
-       u32 total_multicast_packets_received_hi;
-       u32 total_multicast_packets_received_lo;
-       u32 total_broadcast_packets_received_hi;
-       u32 total_broadcast_packets_received_lo;
-       u32 total_unicast_packets_transmitted_hi;
-       u32 total_unicast_packets_transmitted_lo;
-       u32 total_multicast_packets_transmitted_hi;
-       u32 total_multicast_packets_transmitted_lo;
-       u32 total_broadcast_packets_transmitted_hi;
-       u32 total_broadcast_packets_transmitted_lo;
-       u32 valid_bytes_received_hi;
-       u32 valid_bytes_received_lo;
-
-       u32 error_bytes_received_hi;
-       u32 error_bytes_received_lo;
-       u32 etherstatsoverrsizepkts_hi;
-       u32 etherstatsoverrsizepkts_lo;
-       u32 no_buff_discard_hi;
-       u32 no_buff_discard_lo;
-
-       u32 driver_xoff;
-       u32 rx_err_discard_pkt;
-       u32 rx_skb_alloc_failed;
-       u32 hw_csum_err;
-};
-
-#define BNX2X_NUM_Q_STATS              13
-#define Q_STATS_OFFSET32(stat_name) \
-                       (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
-
 struct bnx2x_fastpath {
 
        struct napi_struct      napi;
@@ -593,27 +563,6 @@ struct bnx2x_common {
 
 /* port */
 
-struct nig_stats {
-       u32 brb_discard;
-       u32 brb_packet;
-       u32 brb_truncate;
-       u32 flow_ctrl_discard;
-       u32 flow_ctrl_octets;
-       u32 flow_ctrl_packet;
-       u32 mng_discard;
-       u32 mng_octet_inp;
-       u32 mng_octet_out;
-       u32 mng_packet_inp;
-       u32 mng_packet_out;
-       u32 pbf_octets;
-       u32 pbf_packet;
-       u32 safc_inp;
-       u32 egress_mac_pkt0_lo;
-       u32 egress_mac_pkt0_hi;
-       u32 egress_mac_pkt1_lo;
-       u32 egress_mac_pkt1_hi;
-};
-
 struct bnx2x_port {
        u32                     pmf;
 
@@ -641,156 +590,6 @@ struct bnx2x_port {
 /* end of port */
 
 
-enum bnx2x_stats_event {
-       STATS_EVENT_PMF = 0,
-       STATS_EVENT_LINK_UP,
-       STATS_EVENT_UPDATE,
-       STATS_EVENT_STOP,
-       STATS_EVENT_MAX
-};
-
-enum bnx2x_stats_state {
-       STATS_STATE_DISABLED = 0,
-       STATS_STATE_ENABLED,
-       STATS_STATE_MAX
-};
-
-struct bnx2x_eth_stats {
-       u32 total_bytes_received_hi;
-       u32 total_bytes_received_lo;
-       u32 total_bytes_transmitted_hi;
-       u32 total_bytes_transmitted_lo;
-       u32 total_unicast_packets_received_hi;
-       u32 total_unicast_packets_received_lo;
-       u32 total_multicast_packets_received_hi;
-       u32 total_multicast_packets_received_lo;
-       u32 total_broadcast_packets_received_hi;
-       u32 total_broadcast_packets_received_lo;
-       u32 total_unicast_packets_transmitted_hi;
-       u32 total_unicast_packets_transmitted_lo;
-       u32 total_multicast_packets_transmitted_hi;
-       u32 total_multicast_packets_transmitted_lo;
-       u32 total_broadcast_packets_transmitted_hi;
-       u32 total_broadcast_packets_transmitted_lo;
-       u32 valid_bytes_received_hi;
-       u32 valid_bytes_received_lo;
-
-       u32 error_bytes_received_hi;
-       u32 error_bytes_received_lo;
-       u32 etherstatsoverrsizepkts_hi;
-       u32 etherstatsoverrsizepkts_lo;
-       u32 no_buff_discard_hi;
-       u32 no_buff_discard_lo;
-
-       u32 rx_stat_ifhcinbadoctets_hi;
-       u32 rx_stat_ifhcinbadoctets_lo;
-       u32 tx_stat_ifhcoutbadoctets_hi;
-       u32 tx_stat_ifhcoutbadoctets_lo;
-       u32 rx_stat_dot3statsfcserrors_hi;
-       u32 rx_stat_dot3statsfcserrors_lo;
-       u32 rx_stat_dot3statsalignmenterrors_hi;
-       u32 rx_stat_dot3statsalignmenterrors_lo;
-       u32 rx_stat_dot3statscarriersenseerrors_hi;
-       u32 rx_stat_dot3statscarriersenseerrors_lo;
-       u32 rx_stat_falsecarriererrors_hi;
-       u32 rx_stat_falsecarriererrors_lo;
-       u32 rx_stat_etherstatsundersizepkts_hi;
-       u32 rx_stat_etherstatsundersizepkts_lo;
-       u32 rx_stat_dot3statsframestoolong_hi;
-       u32 rx_stat_dot3statsframestoolong_lo;
-       u32 rx_stat_etherstatsfragments_hi;
-       u32 rx_stat_etherstatsfragments_lo;
-       u32 rx_stat_etherstatsjabbers_hi;
-       u32 rx_stat_etherstatsjabbers_lo;
-       u32 rx_stat_maccontrolframesreceived_hi;
-       u32 rx_stat_maccontrolframesreceived_lo;
-       u32 rx_stat_bmac_xpf_hi;
-       u32 rx_stat_bmac_xpf_lo;
-       u32 rx_stat_bmac_xcf_hi;
-       u32 rx_stat_bmac_xcf_lo;
-       u32 rx_stat_xoffstateentered_hi;
-       u32 rx_stat_xoffstateentered_lo;
-       u32 rx_stat_xonpauseframesreceived_hi;
-       u32 rx_stat_xonpauseframesreceived_lo;
-       u32 rx_stat_xoffpauseframesreceived_hi;
-       u32 rx_stat_xoffpauseframesreceived_lo;
-       u32 tx_stat_outxonsent_hi;
-       u32 tx_stat_outxonsent_lo;
-       u32 tx_stat_outxoffsent_hi;
-       u32 tx_stat_outxoffsent_lo;
-       u32 tx_stat_flowcontroldone_hi;
-       u32 tx_stat_flowcontroldone_lo;
-       u32 tx_stat_etherstatscollisions_hi;
-       u32 tx_stat_etherstatscollisions_lo;
-       u32 tx_stat_dot3statssinglecollisionframes_hi;
-       u32 tx_stat_dot3statssinglecollisionframes_lo;
-       u32 tx_stat_dot3statsmultiplecollisionframes_hi;
-       u32 tx_stat_dot3statsmultiplecollisionframes_lo;
-       u32 tx_stat_dot3statsdeferredtransmissions_hi;
-       u32 tx_stat_dot3statsdeferredtransmissions_lo;
-       u32 tx_stat_dot3statsexcessivecollisions_hi;
-       u32 tx_stat_dot3statsexcessivecollisions_lo;
-       u32 tx_stat_dot3statslatecollisions_hi;
-       u32 tx_stat_dot3statslatecollisions_lo;
-       u32 tx_stat_etherstatspkts64octets_hi;
-       u32 tx_stat_etherstatspkts64octets_lo;
-       u32 tx_stat_etherstatspkts65octetsto127octets_hi;
-       u32 tx_stat_etherstatspkts65octetsto127octets_lo;
-       u32 tx_stat_etherstatspkts128octetsto255octets_hi;
-       u32 tx_stat_etherstatspkts128octetsto255octets_lo;
-       u32 tx_stat_etherstatspkts256octetsto511octets_hi;
-       u32 tx_stat_etherstatspkts256octetsto511octets_lo;
-       u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
-       u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
-       u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
-       u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
-       u32 tx_stat_etherstatspktsover1522octets_hi;
-       u32 tx_stat_etherstatspktsover1522octets_lo;
-       u32 tx_stat_bmac_2047_hi;
-       u32 tx_stat_bmac_2047_lo;
-       u32 tx_stat_bmac_4095_hi;
-       u32 tx_stat_bmac_4095_lo;
-       u32 tx_stat_bmac_9216_hi;
-       u32 tx_stat_bmac_9216_lo;
-       u32 tx_stat_bmac_16383_hi;
-       u32 tx_stat_bmac_16383_lo;
-       u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
-       u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
-       u32 tx_stat_bmac_ufl_hi;
-       u32 tx_stat_bmac_ufl_lo;
-
-       u32 pause_frames_received_hi;
-       u32 pause_frames_received_lo;
-       u32 pause_frames_sent_hi;
-       u32 pause_frames_sent_lo;
-
-       u32 etherstatspkts1024octetsto1522octets_hi;
-       u32 etherstatspkts1024octetsto1522octets_lo;
-       u32 etherstatspktsover1522octets_hi;
-       u32 etherstatspktsover1522octets_lo;
-
-       u32 brb_drop_hi;
-       u32 brb_drop_lo;
-       u32 brb_truncate_hi;
-       u32 brb_truncate_lo;
-
-       u32 mac_filter_discard;
-       u32 xxoverflow_discard;
-       u32 brb_truncate_discard;
-       u32 mac_discard;
-
-       u32 driver_xoff;
-       u32 rx_err_discard_pkt;
-       u32 rx_skb_alloc_failed;
-       u32 hw_csum_err;
-
-       u32 nig_timer_max;
-};
-
-#define BNX2X_NUM_STATS                        43
-#define STATS_OFFSET32(stat_name) \
-                       (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
-
 
 #ifdef BCM_CNIC
 #define MAX_CONTEXT                    15
@@ -1006,6 +805,8 @@ struct bnx2x {
 
        int                     multi_mode;
        int                     num_queues;
+       int                     disable_tpa;
+       int                     int_mode;
 
        u32                     rx_mode;
 #define BNX2X_RX_MODE_NONE             0
@@ -1062,6 +863,10 @@ struct bnx2x {
 
        /* used to synchronize stats collecting */
        int                     stats_state;
+
+       /* used for synchronization of concurrent threads statistics handling */
+       spinlock_t              stats_lock;
+
        /* used by dmae command loader */
        struct dmae_command     stats_dmae;
        int                     executer_idx;
@@ -1130,6 +935,10 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command);
 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
                               u32 addr, u32 len);
+void bnx2x_calc_fc_adv(struct bnx2x *bp);
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+                 u32 data_hi, u32 data_lo, int common);
+void bnx2x_update_coalesce(struct bnx2x *bp);
 
 static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
                           int wait)
@@ -1371,6 +1180,18 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 #define BNX2X_VPD_LEN                  128
 #define VENDOR_ID_LEN                  4
 
+#ifdef BNX2X_MAIN
+#define BNX2X_EXTERN
+#else
+#define BNX2X_EXTERN extern
+#endif
+
+BNX2X_EXTERN int load_count[3]; /* 0-common, 1-port0, 2-port1 */
+
 /* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
 
+extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
+
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
+
 #endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
new file mode 100644 (file)
index 0000000..02bf710
--- /dev/null
@@ -0,0 +1,2252 @@
+/* bnx2x_cmn.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include "bnx2x_cmn.h"
+
+#ifdef BCM_VLAN
+#include <linux/if_vlan.h>
+#endif
+
+static int bnx2x_poll(struct napi_struct *napi, int budget);
+
+/* free skb in the packet ring at pos idx
+ * return idx of last bd freed
+ */
+static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+                            u16 idx)
+{
+       struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
+       struct eth_tx_start_bd *tx_start_bd;
+       struct eth_tx_bd *tx_data_bd;
+       struct sk_buff *skb = tx_buf->skb;
+       u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
+       int nbd;
+
+       /* prefetch skb end pointer to speedup dev_kfree_skb() */
+       prefetch(&skb->end);
+
+       DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
+          idx, tx_buf, skb);
+
+       /* unmap first bd */
+       DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
+       tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
+       dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+                        BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
+
+       nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
+#ifdef BNX2X_STOP_ON_ERROR
+       if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
+               BNX2X_ERR("BAD nbd!\n");
+               bnx2x_panic();
+       }
+#endif
+       new_cons = nbd + tx_buf->first_bd;
+
+       /* Get the next bd */
+       bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+       /* Skip a parse bd... */
+       --nbd;
+       bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+       /* ...and the TSO split header bd since they have no mapping */
+       if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+               --nbd;
+               bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+       }
+
+       /* now free frags */
+       while (nbd > 0) {
+
+               DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
+               tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
+               dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+                              BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+               if (--nbd)
+                       bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+       }
+
+       /* release skb */
+       WARN_ON(!skb);
+       dev_kfree_skb(skb);
+       tx_buf->first_bd = 0;
+       tx_buf->skb = NULL;
+
+       return new_cons;
+}
+
+int bnx2x_tx_int(struct bnx2x_fastpath *fp)
+{
+       struct bnx2x *bp = fp->bp;
+       struct netdev_queue *txq;
+       u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
+
+#ifdef BNX2X_STOP_ON_ERROR
+       if (unlikely(bp->panic))
+               return -1;
+#endif
+
+       txq = netdev_get_tx_queue(bp->dev, fp->index);
+       hw_cons = le16_to_cpu(*fp->tx_cons_sb);
+       sw_cons = fp->tx_pkt_cons;
+
+       while (sw_cons != hw_cons) {
+               u16 pkt_cons;
+
+               pkt_cons = TX_BD(sw_cons);
+
+               /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
+
+               DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
+                  hw_cons, sw_cons, pkt_cons);
+
+/*             if (NEXT_TX_IDX(sw_cons) != hw_cons) {
+                       rmb();
+                       prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
+               }
+*/
+               bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
+               sw_cons++;
+       }
+
+       fp->tx_pkt_cons = sw_cons;
+       fp->tx_bd_cons = bd_cons;
+
+       /* Need to make the tx_bd_cons update visible to start_xmit()
+        * before checking for netif_tx_queue_stopped().  Without the
+        * memory barrier, there is a small possibility that
+        * start_xmit() will miss it and cause the queue to be stopped
+        * forever.
+        */
+       smp_mb();
+
+       /* TBD need a thresh? */
+       if (unlikely(netif_tx_queue_stopped(txq))) {
+               /* Taking tx_lock() is needed to prevent reenabling the queue
+                * while it's empty. This could have happen if rx_action() gets
+                * suspended in bnx2x_tx_int() after the condition before
+                * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
+                *
+                * stops the queue->sees fresh tx_bd_cons->releases the queue->
+                * sends some packets consuming the whole queue again->
+                * stops the queue
+                */
+
+               __netif_tx_lock(txq, smp_processor_id());
+
+               if ((netif_tx_queue_stopped(txq)) &&
+                   (bp->state == BNX2X_STATE_OPEN) &&
+                   (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
+                       netif_tx_wake_queue(txq);
+
+               __netif_tx_unlock(txq);
+       }
+       return 0;
+}
+
+static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
+                                            u16 idx)
+{
+       u16 last_max = fp->last_max_sge;
+
+       if (SUB_S16(idx, last_max) > 0)
+               fp->last_max_sge = idx;
+}
+
+static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
+                                 struct eth_fast_path_rx_cqe *fp_cqe)
+{
+       struct bnx2x *bp = fp->bp;
+       u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
+                                    le16_to_cpu(fp_cqe->len_on_bd)) >>
+                     SGE_PAGE_SHIFT;
+       u16 last_max, last_elem, first_elem;
+       u16 delta = 0;
+       u16 i;
+
+       if (!sge_len)
+               return;
+
+       /* First mark all used pages */
+       for (i = 0; i < sge_len; i++)
+               SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
+
+       DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
+          sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
+
+       /* Here we assume that the last SGE index is the biggest */
+       prefetch((void *)(fp->sge_mask));
+       bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
+
+       last_max = RX_SGE(fp->last_max_sge);
+       last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
+       first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
+
+       /* If ring is not full */
+       if (last_elem + 1 != first_elem)
+               last_elem++;
+
+       /* Now update the prod */
+       for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
+               if (likely(fp->sge_mask[i]))
+                       break;
+
+               fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
+               delta += RX_SGE_MASK_ELEM_SZ;
+       }
+
+       if (delta > 0) {
+               fp->rx_sge_prod += delta;
+               /* clear page-end entries */
+               bnx2x_clear_sge_mask_next_elems(fp);
+       }
+
+       DP(NETIF_MSG_RX_STATUS,
+          "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
+          fp->last_max_sge, fp->rx_sge_prod);
+}
+
+static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
+                           struct sk_buff *skb, u16 cons, u16 prod)
+{
+       struct bnx2x *bp = fp->bp;
+       struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+       struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+       struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+       dma_addr_t mapping;
+
+       /* move empty skb from pool to prod and map it */
+       prod_rx_buf->skb = fp->tpa_pool[queue].skb;
+       mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
+                                bp->rx_buf_size, DMA_FROM_DEVICE);
+       dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+
+       /* move partial skb from cons to pool (don't unmap yet) */
+       fp->tpa_pool[queue] = *cons_rx_buf;
+
+       /* mark bin state as start - print error if current state != stop */
+       if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
+               BNX2X_ERR("start of bin not in stop [%d]\n", queue);
+
+       fp->tpa_state[queue] = BNX2X_TPA_START;
+
+       /* point prod_bd to new skb */
+       prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+       prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+#ifdef BNX2X_STOP_ON_ERROR
+       fp->tpa_queue_used |= (1 << queue);
+#ifdef _ASM_GENERIC_INT_L64_H
+       DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
+#else
+       DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
+#endif
+          fp->tpa_queue_used);
+#endif
+}
+
+static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+                              struct sk_buff *skb,
+                              struct eth_fast_path_rx_cqe *fp_cqe,
+                              u16 cqe_idx)
+{
+       struct sw_rx_page *rx_pg, old_rx_pg;
+       u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
+       u32 i, frag_len, frag_size, pages;
+       int err;
+       int j;
+
+       frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
+       pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
+
+       /* This is needed in order to enable forwarding support */
+       if (frag_size)
+               skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
+                                              max(frag_size, (u32)len_on_bd));
+
+#ifdef BNX2X_STOP_ON_ERROR
+       if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
+               BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
+                         pages, cqe_idx);
+               BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
+                         fp_cqe->pkt_len, len_on_bd);
+               bnx2x_panic();
+               return -EINVAL;
+       }
+#endif
+
+       /* Run through the SGL and compose the fragmented skb */
+       for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
+               u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
+
+               /* FW gives the indices of the SGE as if the ring is an array
+                  (meaning that "next" element will consume 2 indices) */
+               frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
+               rx_pg = &fp->rx_page_ring[sge_idx];
+               old_rx_pg = *rx_pg;
+
+               /* If we fail to allocate a substitute page, we simply stop
+                  where we are and drop the whole packet */
+               err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
+               if (unlikely(err)) {
+                       fp->eth_q_stats.rx_skb_alloc_failed++;
+                       return err;
+               }
+
+               /* Unmap the page as we r going to pass it to the stack */
+               dma_unmap_page(&bp->pdev->dev,
+                              dma_unmap_addr(&old_rx_pg, mapping),
+                              SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+
+               /* Add one frag and update the appropriate fields in the skb */
+               skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+
+               skb->data_len += frag_len;
+               skb->truesize += frag_len;
+               skb->len += frag_len;
+
+               frag_size -= frag_len;
+       }
+
+       return 0;
+}
+
+static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+                          u16 queue, int pad, int len, union eth_rx_cqe *cqe,
+                          u16 cqe_idx)
+{
+       struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
+       struct sk_buff *skb = rx_buf->skb;
+       /* alloc new skb */
+       struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+
+       /* Unmap skb in the pool anyway, as we are going to change
+          pool entry status to BNX2X_TPA_STOP even if new skb allocation
+          fails. */
+       dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
+                        bp->rx_buf_size, DMA_FROM_DEVICE);
+
+       if (likely(new_skb)) {
+               /* fix ip xsum and give it to the stack */
+               /* (no need to map the new skb) */
+#ifdef BCM_VLAN
+               int is_vlan_cqe =
+                       (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
+                        PARSING_FLAGS_VLAN);
+               int is_not_hwaccel_vlan_cqe =
+                       (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
+#endif
+
+               prefetch(skb);
+               prefetch(((char *)(skb)) + 128);
+
+#ifdef BNX2X_STOP_ON_ERROR
+               if (pad + len > bp->rx_buf_size) {
+                       BNX2X_ERR("skb_put is about to fail...  "
+                                 "pad %d  len %d  rx_buf_size %d\n",
+                                 pad, len, bp->rx_buf_size);
+                       bnx2x_panic();
+                       return;
+               }
+#endif
+
+               skb_reserve(skb, pad);
+               skb_put(skb, len);
+
+               skb->protocol = eth_type_trans(skb, bp->dev);
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+               {
+                       struct iphdr *iph;
+
+                       iph = (struct iphdr *)skb->data;
+#ifdef BCM_VLAN
+                       /* If there is no Rx VLAN offloading -
+                          take VLAN tag into an account */
+                       if (unlikely(is_not_hwaccel_vlan_cqe))
+                               iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
+#endif
+                       iph->check = 0;
+                       iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
+               }
+
+               if (!bnx2x_fill_frag_skb(bp, fp, skb,
+                                        &cqe->fast_path_cqe, cqe_idx)) {
+#ifdef BCM_VLAN
+                       if ((bp->vlgrp != NULL) && is_vlan_cqe &&
+                           (!is_not_hwaccel_vlan_cqe))
+                               vlan_gro_receive(&fp->napi, bp->vlgrp,
+                                                le16_to_cpu(cqe->fast_path_cqe.
+                                                            vlan_tag), skb);
+                       else
+#endif
+                               napi_gro_receive(&fp->napi, skb);
+               } else {
+                       DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
+                          " - dropping packet!\n");
+                       dev_kfree_skb(skb);
+               }
+
+
+               /* put new skb in bin */
+               fp->tpa_pool[queue].skb = new_skb;
+
+       } else {
+               /* else drop the packet and keep the buffer in the bin */
+               DP(NETIF_MSG_RX_STATUS,
+                  "Failed to allocate new skb - dropping packet!\n");
+               fp->eth_q_stats.rx_skb_alloc_failed++;
+       }
+
+       fp->tpa_state[queue] = BNX2X_TPA_STOP;
+}
+
+/* Set Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
+                                       struct sk_buff *skb)
+{
+       /* Set Toeplitz hash from CQE */
+       if ((bp->dev->features & NETIF_F_RXHASH) &&
+           (cqe->fast_path_cqe.status_flags &
+            ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+               skb->rxhash =
+               le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
+}
+
+int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+{
+       struct bnx2x *bp = fp->bp;
+       u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
+       u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
+       int rx_pkt = 0;
+
+#ifdef BNX2X_STOP_ON_ERROR
+       if (unlikely(bp->panic))
+               return 0;
+#endif
+
+       /* CQ "next element" is of the size of the regular element,
+          that's why it's ok here */
+       hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
+       if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
+               hw_comp_cons++;
+
+       bd_cons = fp->rx_bd_cons;
+       bd_prod = fp->rx_bd_prod;
+       bd_prod_fw = bd_prod;
+       sw_comp_cons = fp->rx_comp_cons;
+       sw_comp_prod = fp->rx_comp_prod;
+
+       /* Memory barrier necessary as speculative reads of the rx
+        * buffer can be ahead of the index in the status block
+        */
+       rmb();
+
+       DP(NETIF_MSG_RX_STATUS,
+          "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
+          fp->index, hw_comp_cons, sw_comp_cons);
+
+       while (sw_comp_cons != hw_comp_cons) {
+               struct sw_rx_bd *rx_buf = NULL;
+               struct sk_buff *skb;
+               union eth_rx_cqe *cqe;
+               u8 cqe_fp_flags;
+               u16 len, pad;
+
+               comp_ring_cons = RCQ_BD(sw_comp_cons);
+               bd_prod = RX_BD(bd_prod);
+               bd_cons = RX_BD(bd_cons);
+
+               /* Prefetch the page containing the BD descriptor
+                  at producer's index. It will be needed when new skb is
+                  allocated */
+               prefetch((void *)(PAGE_ALIGN((unsigned long)
+                                            (&fp->rx_desc_ring[bd_prod])) -
+                                 PAGE_SIZE + 1));
+
+               cqe = &fp->rx_comp_ring[comp_ring_cons];
+               cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+
+               DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
+                  "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
+                  cqe_fp_flags, cqe->fast_path_cqe.status_flags,
+                  le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
+                  le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
+                  le16_to_cpu(cqe->fast_path_cqe.pkt_len));
+
+               /* is this a slowpath msg? */
+               if (unlikely(CQE_TYPE(cqe_fp_flags))) {
+                       bnx2x_sp_event(fp, cqe);
+                       goto next_cqe;
+
+               /* this is an rx packet */
+               } else {
+                       rx_buf = &fp->rx_buf_ring[bd_cons];
+                       skb = rx_buf->skb;
+                       prefetch(skb);
+                       len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
+                       pad = cqe->fast_path_cqe.placement_offset;
+
+                       /* If CQE is marked both TPA_START and TPA_END
+                          it is a non-TPA CQE */
+                       if ((!fp->disable_tpa) &&
+                           (TPA_TYPE(cqe_fp_flags) !=
+                                       (TPA_TYPE_START | TPA_TYPE_END))) {
+                               u16 queue = cqe->fast_path_cqe.queue_index;
+
+                               if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
+                                       DP(NETIF_MSG_RX_STATUS,
+                                          "calling tpa_start on queue %d\n",
+                                          queue);
+
+                                       bnx2x_tpa_start(fp, queue, skb,
+                                                       bd_cons, bd_prod);
+
+                                       /* Set Toeplitz hash for an LRO skb */
+                                       bnx2x_set_skb_rxhash(bp, cqe, skb);
+
+                                       goto next_rx;
+                               }
+
+                               if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
+                                       DP(NETIF_MSG_RX_STATUS,
+                                          "calling tpa_stop on queue %d\n",
+                                          queue);
+
+                                       if (!BNX2X_RX_SUM_FIX(cqe))
+                                               BNX2X_ERR("STOP on none TCP "
+                                                         "data\n");
+
+                                       /* This is a size of the linear data
+                                          on this skb */
+                                       len = le16_to_cpu(cqe->fast_path_cqe.
+                                                               len_on_bd);
+                                       bnx2x_tpa_stop(bp, fp, queue, pad,
+                                                   len, cqe, comp_ring_cons);
+#ifdef BNX2X_STOP_ON_ERROR
+                                       if (bp->panic)
+                                               return 0;
+#endif
+
+                                       bnx2x_update_sge_prod(fp,
+                                                       &cqe->fast_path_cqe);
+                                       goto next_cqe;
+                               }
+                       }
+
+                       dma_sync_single_for_device(&bp->pdev->dev,
+                                       dma_unmap_addr(rx_buf, mapping),
+                                                  pad + RX_COPY_THRESH,
+                                                  DMA_FROM_DEVICE);
+                       prefetch(((char *)(skb)) + 128);
+
+                       /* is this an error packet? */
+                       if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+                               DP(NETIF_MSG_RX_ERR,
+                                  "ERROR  flags %x  rx packet %u\n",
+                                  cqe_fp_flags, sw_comp_cons);
+                               fp->eth_q_stats.rx_err_discard_pkt++;
+                               goto reuse_rx;
+                       }
+
+                       /* Since we don't have a jumbo ring
+                        * copy small packets if mtu > 1500
+                        */
+                       if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+                           (len <= RX_COPY_THRESH)) {
+                               struct sk_buff *new_skb;
+
+                               new_skb = netdev_alloc_skb(bp->dev,
+                                                          len + pad);
+                               if (new_skb == NULL) {
+                                       DP(NETIF_MSG_RX_ERR,
+                                          "ERROR  packet dropped "
+                                          "because of alloc failure\n");
+                                       fp->eth_q_stats.rx_skb_alloc_failed++;
+                                       goto reuse_rx;
+                               }
+
+                               /* aligned copy */
+                               skb_copy_from_linear_data_offset(skb, pad,
+                                                   new_skb->data + pad, len);
+                               skb_reserve(new_skb, pad);
+                               skb_put(new_skb, len);
+
+                               bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
+
+                               skb = new_skb;
+
+                       } else
+                       if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
+                               dma_unmap_single(&bp->pdev->dev,
+                                       dma_unmap_addr(rx_buf, mapping),
+                                                bp->rx_buf_size,
+                                                DMA_FROM_DEVICE);
+                               skb_reserve(skb, pad);
+                               skb_put(skb, len);
+
+                       } else {
+                               DP(NETIF_MSG_RX_ERR,
+                                  "ERROR  packet dropped because "
+                                  "of alloc failure\n");
+                               fp->eth_q_stats.rx_skb_alloc_failed++;
+reuse_rx:
+                               bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
+                               goto next_rx;
+                       }
+
+                       skb->protocol = eth_type_trans(skb, bp->dev);
+
+                       /* Set Toeplitz hash for a none-LRO skb */
+                       bnx2x_set_skb_rxhash(bp, cqe, skb);
+
+                       skb->ip_summed = CHECKSUM_NONE;
+                       if (bp->rx_csum) {
+                               if (likely(BNX2X_RX_CSUM_OK(cqe)))
+                                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                               else
+                                       fp->eth_q_stats.hw_csum_err++;
+                       }
+               }
+
+               skb_record_rx_queue(skb, fp->index);
+
+#ifdef BCM_VLAN
+               if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
+                   (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
+                    PARSING_FLAGS_VLAN))
+                       vlan_gro_receive(&fp->napi, bp->vlgrp,
+                               le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
+               else
+#endif
+                       napi_gro_receive(&fp->napi, skb);
+
+
+next_rx:
+               rx_buf->skb = NULL;
+
+               bd_cons = NEXT_RX_IDX(bd_cons);
+               bd_prod = NEXT_RX_IDX(bd_prod);
+               bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
+               rx_pkt++;
+next_cqe:
+               sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
+               sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
+
+               if (rx_pkt == budget)
+                       break;
+       } /* while */
+
+       fp->rx_bd_cons = bd_cons;
+       fp->rx_bd_prod = bd_prod_fw;
+       fp->rx_comp_cons = sw_comp_cons;
+       fp->rx_comp_prod = sw_comp_prod;
+
+       /* Update producers */
+       bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
+                            fp->rx_sge_prod);
+
+       fp->rx_pkt += rx_pkt;
+       fp->rx_calls++;
+
+       return rx_pkt;
+}
+
+static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
+{
+       struct bnx2x_fastpath *fp = fp_cookie;
+       struct bnx2x *bp = fp->bp;
+
+       /* Return here if interrupt is disabled */
+       if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+               DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
+               return IRQ_HANDLED;
+       }
+
+       DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
+          fp->index, fp->sb_id);
+       bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+       if (unlikely(bp->panic))
+               return IRQ_HANDLED;
+#endif
+
+       /* Handle Rx and Tx according to MSI-X vector */
+       prefetch(fp->rx_cons_sb);
+       prefetch(fp->tx_cons_sb);
+       prefetch(&fp->status_blk->u_status_block.status_block_index);
+       prefetch(&fp->status_blk->c_status_block.status_block_index);
+       napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+
+       return IRQ_HANDLED;
+}
+
+
+/* HW Lock for shared dual port PHYs */
+void bnx2x_acquire_phy_lock(struct bnx2x *bp)
+{
+       mutex_lock(&bp->port.phy_mutex);
+
+       if (bp->port.need_hw_lock)
+               bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+}
+
+void bnx2x_release_phy_lock(struct bnx2x *bp)
+{
+       if (bp->port.need_hw_lock)
+               bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+
+       mutex_unlock(&bp->port.phy_mutex);
+}
+
+void bnx2x_link_report(struct bnx2x *bp)
+{
+       if (bp->flags & MF_FUNC_DIS) {
+               netif_carrier_off(bp->dev);
+               netdev_err(bp->dev, "NIC Link is Down\n");
+               return;
+       }
+
+       if (bp->link_vars.link_up) {
+               u16 line_speed;
+
+               if (bp->state == BNX2X_STATE_OPEN)
+                       netif_carrier_on(bp->dev);
+               netdev_info(bp->dev, "NIC Link is Up, ");
+
+               line_speed = bp->link_vars.line_speed;
+               if (IS_E1HMF(bp)) {
+                       u16 vn_max_rate;
+
+                       vn_max_rate =
+                               ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
+                                FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+                       if (vn_max_rate < line_speed)
+                               line_speed = vn_max_rate;
+               }
+               pr_cont("%d Mbps ", line_speed);
+
+               if (bp->link_vars.duplex == DUPLEX_FULL)
+                       pr_cont("full duplex");
+               else
+                       pr_cont("half duplex");
+
+               if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
+                       if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
+                               pr_cont(", receive ");
+                               if (bp->link_vars.flow_ctrl &
+                                   BNX2X_FLOW_CTRL_TX)
+                                       pr_cont("& transmit ");
+                       } else {
+                               pr_cont(", transmit ");
+                       }
+                       pr_cont("flow control ON");
+               }
+               pr_cont("\n");
+
+       } else { /* link_down */
+               netif_carrier_off(bp->dev);
+               netdev_err(bp->dev, "NIC Link is Down\n");
+       }
+}
+
+void bnx2x_init_rx_rings(struct bnx2x *bp)
+{
+       int func = BP_FUNC(bp);
+       int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
+                                             ETH_MAX_AGGREGATION_QUEUES_E1H;
+       u16 ring_prod, cqe_ring_prod;
+       int i, j;
+
+       bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
+       DP(NETIF_MSG_IFUP,
+          "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
+
+       if (bp->flags & TPA_ENABLE_FLAG) {
+
+               for_each_queue(bp, j) {
+                       struct bnx2x_fastpath *fp = &bp->fp[j];
+
+                       for (i = 0; i < max_agg_queues; i++) {
+                               fp->tpa_pool[i].skb =
+                                  netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+                               if (!fp->tpa_pool[i].skb) {
+                                       BNX2X_ERR("Failed to allocate TPA "
+                                                 "skb pool for queue[%d] - "
+                                                 "disabling TPA on this "
+                                                 "queue!\n", j);
+                                       bnx2x_free_tpa_pool(bp, fp, i);
+                                       fp->disable_tpa = 1;
+                                       break;
+                               }
+                               dma_unmap_addr_set((struct sw_rx_bd *)
+                                                       &bp->fp->tpa_pool[i],
+                                                  mapping, 0);
+                               fp->tpa_state[i] = BNX2X_TPA_STOP;
+                       }
+               }
+       }
+
+       for_each_queue(bp, j) {
+               struct bnx2x_fastpath *fp = &bp->fp[j];
+
+               fp->rx_bd_cons = 0;
+               fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
+               fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
+
+               /* "next page" elements initialization */
+               /* SGE ring */
+               for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+                       struct eth_rx_sge *sge;
+
+                       sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
+                       sge->addr_hi =
+                               cpu_to_le32(U64_HI(fp->rx_sge_mapping +
+                                       BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+                       sge->addr_lo =
+                               cpu_to_le32(U64_LO(fp->rx_sge_mapping +
+                                       BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+               }
+
+               bnx2x_init_sge_ring_bit_mask(fp);
+
+               /* RX BD ring */
+               for (i = 1; i <= NUM_RX_RINGS; i++) {
+                       struct eth_rx_bd *rx_bd;
+
+                       rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
+                       rx_bd->addr_hi =
+                               cpu_to_le32(U64_HI(fp->rx_desc_mapping +
+                                           BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+                       rx_bd->addr_lo =
+                               cpu_to_le32(U64_LO(fp->rx_desc_mapping +
+                                           BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+               }
+
+               /* CQ ring */
+               for (i = 1; i <= NUM_RCQ_RINGS; i++) {
+                       struct eth_rx_cqe_next_page *nextpg;
+
+                       nextpg = (struct eth_rx_cqe_next_page *)
+                               &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
+                       nextpg->addr_hi =
+                               cpu_to_le32(U64_HI(fp->rx_comp_mapping +
+                                          BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+                       nextpg->addr_lo =
+                               cpu_to_le32(U64_LO(fp->rx_comp_mapping +
+                                          BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+               }
+
+               /* Allocate SGEs and initialize the ring elements */
+               for (i = 0, ring_prod = 0;
+                    i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
+
+                       if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
+                               BNX2X_ERR("was only able to allocate "
+                                         "%d rx sges\n", i);
+                               BNX2X_ERR("disabling TPA for queue[%d]\n", j);
+                               /* Cleanup already allocated elements */
+                               bnx2x_free_rx_sge_range(bp, fp, ring_prod);
+                               bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
+                               fp->disable_tpa = 1;
+                               ring_prod = 0;
+                               break;
+                       }
+                       ring_prod = NEXT_SGE_IDX(ring_prod);
+               }
+               fp->rx_sge_prod = ring_prod;
+
+               /* Allocate BDs and initialize BD ring */
+               fp->rx_comp_cons = 0;
+               cqe_ring_prod = ring_prod = 0;
+               for (i = 0; i < bp->rx_ring_size; i++) {
+                       if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+                               BNX2X_ERR("was only able to allocate "
+                                         "%d rx skbs on queue[%d]\n", i, j);
+                               fp->eth_q_stats.rx_skb_alloc_failed++;
+                               break;
+                       }
+                       ring_prod = NEXT_RX_IDX(ring_prod);
+                       cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
+                       WARN_ON(ring_prod <= i);
+               }
+
+               fp->rx_bd_prod = ring_prod;
+               /* must not have more available CQEs than BDs */
+               fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
+                                        cqe_ring_prod);
+               fp->rx_pkt = fp->rx_calls = 0;
+
+               /* Warning!
+                * this will generate an interrupt (to the TSTORM)
+                * must only be done after chip is initialized
+                */
+               bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
+                                    fp->rx_sge_prod);
+               if (j != 0)
+                       continue;
+
+               REG_WR(bp, BAR_USTRORM_INTMEM +
+                      USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
+                      U64_LO(fp->rx_comp_mapping));
+               REG_WR(bp, BAR_USTRORM_INTMEM +
+                      USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
+                      U64_HI(fp->rx_comp_mapping));
+       }
+}
+static void bnx2x_free_tx_skbs(struct bnx2x *bp)
+{
+       int i;
+
+       for_each_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
+
+               u16 bd_cons = fp->tx_bd_cons;
+               u16 sw_prod = fp->tx_pkt_prod;
+               u16 sw_cons = fp->tx_pkt_cons;
+
+               while (sw_cons != sw_prod) {
+                       bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
+                       sw_cons++;
+               }
+       }
+}
+
+static void bnx2x_free_rx_skbs(struct bnx2x *bp)
+{
+       int i, j;
+
+       for_each_queue(bp, j) {
+               struct bnx2x_fastpath *fp = &bp->fp[j];
+
+               for (i = 0; i < NUM_RX_BD; i++) {
+                       struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
+                       struct sk_buff *skb = rx_buf->skb;
+
+                       if (skb == NULL)
+                               continue;
+
+                       dma_unmap_single(&bp->pdev->dev,
+                                        dma_unmap_addr(rx_buf, mapping),
+                                        bp->rx_buf_size, DMA_FROM_DEVICE);
+
+                       rx_buf->skb = NULL;
+                       dev_kfree_skb(skb);
+               }
+               if (!fp->disable_tpa)
+                       bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
+                                           ETH_MAX_AGGREGATION_QUEUES_E1 :
+                                           ETH_MAX_AGGREGATION_QUEUES_E1H);
+       }
+}
+
+void bnx2x_free_skbs(struct bnx2x *bp)
+{
+       bnx2x_free_tx_skbs(bp);
+       bnx2x_free_rx_skbs(bp);
+}
+
+static void bnx2x_free_msix_irqs(struct bnx2x *bp)
+{
+       int i, offset = 1;
+
+       free_irq(bp->msix_table[0].vector, bp->dev);
+       DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
+          bp->msix_table[0].vector);
+
+#ifdef BCM_CNIC
+       offset++;
+#endif
+       for_each_queue(bp, i) {
+               DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
+                  "state %x\n", i, bp->msix_table[i + offset].vector,
+                  bnx2x_fp(bp, i, state));
+
+               free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
+       }
+}
+
+void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
+{
+       if (bp->flags & USING_MSIX_FLAG) {
+               if (!disable_only)
+                       bnx2x_free_msix_irqs(bp);
+               pci_disable_msix(bp->pdev);
+               bp->flags &= ~USING_MSIX_FLAG;
+
+       } else if (bp->flags & USING_MSI_FLAG) {
+               if (!disable_only)
+                       free_irq(bp->pdev->irq, bp->dev);
+               pci_disable_msi(bp->pdev);
+               bp->flags &= ~USING_MSI_FLAG;
+
+       } else if (!disable_only)
+               free_irq(bp->pdev->irq, bp->dev);
+}
+
+static int bnx2x_enable_msix(struct bnx2x *bp)
+{
+       int i, rc, offset = 1;
+       int igu_vec = 0;
+
+       bp->msix_table[0].entry = igu_vec;
+       DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
+
+#ifdef BCM_CNIC
+       igu_vec = BP_L_ID(bp) + offset;
+       bp->msix_table[1].entry = igu_vec;
+       DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
+       offset++;
+#endif
+       for_each_queue(bp, i) {
+               igu_vec = BP_L_ID(bp) + offset + i;
+               bp->msix_table[i + offset].entry = igu_vec;
+               DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
+                  "(fastpath #%u)\n", i + offset, igu_vec, i);
+       }
+
+       rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
+                            BNX2X_NUM_QUEUES(bp) + offset);
+
+       /*
+        * reconfigure number of tx/rx queues according to available
+        * MSI-X vectors
+        */
+       if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
+               /* vectors available for FP */
+               int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
+
+               DP(NETIF_MSG_IFUP,
+                  "Trying to use less MSI-X vectors: %d\n", rc);
+
+               rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
+
+               if (rc) {
+                       DP(NETIF_MSG_IFUP,
+                          "MSI-X is not attainable  rc %d\n", rc);
+                       return rc;
+               }
+
+               bp->num_queues = min(bp->num_queues, fp_vec);
+
+               DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
+                                 bp->num_queues);
+       } else if (rc) {
+               DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
+               return rc;
+       }
+
+       bp->flags |= USING_MSIX_FLAG;
+
+       return 0;
+}
+
+static int bnx2x_req_msix_irqs(struct bnx2x *bp)
+{
+       int i, rc, offset = 1;
+
+       rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
+                        bp->dev->name, bp->dev);
+       if (rc) {
+               BNX2X_ERR("request sp irq failed\n");
+               return -EBUSY;
+       }
+
+#ifdef BCM_CNIC
+       offset++;
+#endif
+       for_each_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
+               snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+                        bp->dev->name, i);
+
+               rc = request_irq(bp->msix_table[i + offset].vector,
+                                bnx2x_msix_fp_int, 0, fp->name, fp);
+               if (rc) {
+                       BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
+                       bnx2x_free_msix_irqs(bp);
+                       return -EBUSY;
+               }
+
+               fp->state = BNX2X_FP_STATE_IRQ;
+       }
+
+       i = BNX2X_NUM_QUEUES(bp);
+       netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
+              " ... fp[%d] %d\n",
+              bp->msix_table[0].vector,
+              0, bp->msix_table[offset].vector,
+              i - 1, bp->msix_table[offset + i - 1].vector);
+
+       return 0;
+}
+
+static int bnx2x_enable_msi(struct bnx2x *bp)
+{
+       int rc;
+
+       rc = pci_enable_msi(bp->pdev);
+       if (rc) {
+               DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
+               return -1;
+       }
+       bp->flags |= USING_MSI_FLAG;
+
+       return 0;
+}
+
+static int bnx2x_req_irq(struct bnx2x *bp)
+{
+       unsigned long flags;
+       int rc;
+
+       if (bp->flags & USING_MSI_FLAG)
+               flags = 0;
+       else
+               flags = IRQF_SHARED;
+
+       rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
+                        bp->dev->name, bp->dev);
+       if (!rc)
+               bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
+
+       return rc;
+}
+
+static void bnx2x_napi_enable(struct bnx2x *bp)
+{
+       int i;
+
+       for_each_queue(bp, i)
+               napi_enable(&bnx2x_fp(bp, i, napi));
+}
+
+static void bnx2x_napi_disable(struct bnx2x *bp)
+{
+       int i;
+
+       for_each_queue(bp, i)
+               napi_disable(&bnx2x_fp(bp, i, napi));
+}
+
+void bnx2x_netif_start(struct bnx2x *bp)
+{
+       int intr_sem;
+
+       intr_sem = atomic_dec_and_test(&bp->intr_sem);
+       smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
+
+       if (intr_sem) {
+               if (netif_running(bp->dev)) {
+                       bnx2x_napi_enable(bp);
+                       bnx2x_int_enable(bp);
+                       if (bp->state == BNX2X_STATE_OPEN)
+                               netif_tx_wake_all_queues(bp->dev);
+               }
+       }
+}
+
+void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
+{
+       bnx2x_int_disable_sync(bp, disable_hw);
+       bnx2x_napi_disable(bp);
+       netif_tx_disable(bp->dev);
+}
+static int bnx2x_set_num_queues(struct bnx2x *bp)
+{
+       int rc = 0;
+
+       switch (bp->int_mode) {
+       case INT_MODE_INTx:
+       case INT_MODE_MSI:
+               bp->num_queues = 1;
+               DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
+               break;
+       default:
+               /* Set number of queues according to bp->multi_mode value */
+               bnx2x_set_num_queues_msix(bp);
+
+               DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
+                  bp->num_queues);
+
+               /* if we can't use MSI-X we only need one fp,
+                * so try to enable MSI-X with the requested number of fp's
+                * and fallback to MSI or legacy INTx with one fp
+                */
+               rc = bnx2x_enable_msix(bp);
+               if (rc)
+                       /* failed to enable MSI-X */
+                       bp->num_queues = 1;
+               break;
+       }
+       bp->dev->real_num_tx_queues = bp->num_queues;
+       return rc;
+}
+
+/* must be called with rtnl_lock */
+int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
+{
+       u32 load_code;
+       int i, rc;
+
+#ifdef BNX2X_STOP_ON_ERROR
+       if (unlikely(bp->panic))
+               return -EPERM;
+#endif
+
+       bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
+
+       rc = bnx2x_set_num_queues(bp);
+
+       if (bnx2x_alloc_mem(bp)) {
+               bnx2x_free_irq(bp, true);
+               return -ENOMEM;
+       }
+
+       for_each_queue(bp, i)
+               bnx2x_fp(bp, i, disable_tpa) =
+                                       ((bp->flags & TPA_ENABLE_FLAG) == 0);
+
+       for_each_queue(bp, i)
+               netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+                              bnx2x_poll, 128);
+
+       bnx2x_napi_enable(bp);
+
+       if (bp->flags & USING_MSIX_FLAG) {
+               rc = bnx2x_req_msix_irqs(bp);
+               if (rc) {
+                       bnx2x_free_irq(bp, true);
+                       goto load_error1;
+               }
+       } else {
+               /* Fall to INTx if failed to enable MSI-X due to lack of
+                  memory (in bnx2x_set_num_queues()) */
+               if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
+                       bnx2x_enable_msi(bp);
+               bnx2x_ack_int(bp);
+               rc = bnx2x_req_irq(bp);
+               if (rc) {
+                       BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
+                       bnx2x_free_irq(bp, true);
+                       goto load_error1;
+               }
+               if (bp->flags & USING_MSI_FLAG) {
+                       bp->dev->irq = bp->pdev->irq;
+                       netdev_info(bp->dev, "using MSI  IRQ %d\n",
+                                   bp->pdev->irq);
+               }
+       }
+
+       /* Send LOAD_REQUEST command to MCP
+          Returns the type of LOAD command:
+          if it is the first port to be initialized
+          common blocks should be initialized, otherwise - not
+       */
+       if (!BP_NOMCP(bp)) {
+               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
+               if (!load_code) {
+                       BNX2X_ERR("MCP response failure, aborting\n");
+                       rc = -EBUSY;
+                       goto load_error2;
+               }
+               if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+                       rc = -EBUSY; /* other port in diagnostic mode */
+                       goto load_error2;
+               }
+
+       } else {
+               int port = BP_PORT(bp);
+
+               DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
+                  load_count[0], load_count[1], load_count[2]);
+               load_count[0]++;
+               load_count[1 + port]++;
+               DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
+                  load_count[0], load_count[1], load_count[2]);
+               if (load_count[0] == 1)
+                       load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
+               else if (load_count[1 + port] == 1)
+                       load_code = FW_MSG_CODE_DRV_LOAD_PORT;
+               else
+                       load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
+       }
+
+       if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+           (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
+               bp->port.pmf = 1;
+       else
+               bp->port.pmf = 0;
+       DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+
+       /* Initialize HW */
+       rc = bnx2x_init_hw(bp, load_code);
+       if (rc) {
+               BNX2X_ERR("HW init failed, aborting\n");
+               bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
+               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
+               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+               goto load_error2;
+       }
+
+       /* Setup NIC internals and enable interrupts */
+       bnx2x_nic_init(bp, load_code);
+
+       if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
+           (bp->common.shmem2_base))
+               SHMEM2_WR(bp, dcc_support,
+                         (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
+                          SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+
+       /* Send LOAD_DONE command to MCP */
+       if (!BP_NOMCP(bp)) {
+               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
+               if (!load_code) {
+                       BNX2X_ERR("MCP response failure, aborting\n");
+                       rc = -EBUSY;
+                       goto load_error3;
+               }
+       }
+
+       bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+
+       rc = bnx2x_setup_leading(bp);
+       if (rc) {
+               BNX2X_ERR("Setup leading failed!\n");
+#ifndef BNX2X_STOP_ON_ERROR
+               goto load_error3;
+#else
+               bp->panic = 1;
+               return -EBUSY;
+#endif
+       }
+
+       if (CHIP_IS_E1H(bp))
+               if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
+                       DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
+                       bp->flags |= MF_FUNC_DIS;
+               }
+
+       if (bp->state == BNX2X_STATE_OPEN) {
+#ifdef BCM_CNIC
+               /* Enable Timer scan */
+               REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
+#endif
+               for_each_nondefault_queue(bp, i) {
+                       rc = bnx2x_setup_multi(bp, i);
+                       if (rc)
+#ifdef BCM_CNIC
+                               goto load_error4;
+#else
+                               goto load_error3;
+#endif
+               }
+
+               if (CHIP_IS_E1(bp))
+                       bnx2x_set_eth_mac_addr_e1(bp, 1);
+               else
+                       bnx2x_set_eth_mac_addr_e1h(bp, 1);
+#ifdef BCM_CNIC
+               /* Set iSCSI L2 MAC */
+               mutex_lock(&bp->cnic_mutex);
+               if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
+                       bnx2x_set_iscsi_eth_mac_addr(bp, 1);
+                       bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
+                       bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
+                                     CNIC_SB_ID(bp));
+               }
+               mutex_unlock(&bp->cnic_mutex);
+#endif
+       }
+
+       if (bp->port.pmf)
+               bnx2x_initial_phy_init(bp, load_mode);
+
+       /* Start fast path */
+       switch (load_mode) {
+       case LOAD_NORMAL:
+               if (bp->state == BNX2X_STATE_OPEN) {
+                       /* Tx queue should be only reenabled */
+                       netif_tx_wake_all_queues(bp->dev);
+               }
+               /* Initialize the receive filter. */
+               bnx2x_set_rx_mode(bp->dev);
+               break;
+
+       case LOAD_OPEN:
+               netif_tx_start_all_queues(bp->dev);
+               if (bp->state != BNX2X_STATE_OPEN)
+                       netif_tx_disable(bp->dev);
+               /* Initialize the receive filter. */
+               bnx2x_set_rx_mode(bp->dev);
+               break;
+
+       case LOAD_DIAG:
+               /* Initialize the receive filter. */
+               bnx2x_set_rx_mode(bp->dev);
+               bp->state = BNX2X_STATE_DIAG;
+               break;
+
+       default:
+               break;
+       }
+
+       if (!bp->port.pmf)
+               bnx2x__link_status_update(bp);
+
+       /* start the timer */
+       mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+#ifdef BCM_CNIC
+       bnx2x_setup_cnic_irq_info(bp);
+       if (bp->state == BNX2X_STATE_OPEN)
+               bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
+#endif
+       bnx2x_inc_load_cnt(bp);
+
+       return 0;
+
+#ifdef BCM_CNIC
+load_error4:
+       /* Disable Timer scan */
+       REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
+#endif
+load_error3:
+       bnx2x_int_disable_sync(bp, 1);
+       if (!BP_NOMCP(bp)) {
+               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
+               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+       }
+       bp->port.pmf = 0;
+       /* Free SKBs, SGEs, TPA pool and driver internals */
+       bnx2x_free_skbs(bp);
+       for_each_queue(bp, i)
+               bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+load_error2:
+       /* Release IRQs */
+       bnx2x_free_irq(bp, false);
+load_error1:
+       bnx2x_napi_disable(bp);
+       for_each_queue(bp, i)
+               netif_napi_del(&bnx2x_fp(bp, i, napi));
+       bnx2x_free_mem(bp);
+
+       return rc;
+}
+
+/* must be called with rtnl_lock */
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+{
+       int i;
+
+       if (bp->state == BNX2X_STATE_CLOSED) {
+               /* Interface has been removed - nothing to recover */
+               bp->recovery_state = BNX2X_RECOVERY_DONE;
+               bp->is_leader = 0;
+               bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
+               smp_wmb();
+
+               return -EINVAL;
+       }
+
+#ifdef BCM_CNIC
+       bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+       bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+
+       /* Set "drop all" */
+       bp->rx_mode = BNX2X_RX_MODE_NONE;
+       bnx2x_set_storm_rx_mode(bp);
+
+       /* Disable HW interrupts, NAPI and Tx */
+       bnx2x_netif_stop(bp, 1);
+       netif_carrier_off(bp->dev);
+
+       del_timer_sync(&bp->timer);
+       SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
+                (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
+       bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+       /* Release IRQs */
+       bnx2x_free_irq(bp, false);
+
+       /* Cleanup the chip if needed */
+       if (unload_mode != UNLOAD_RECOVERY)
+               bnx2x_chip_cleanup(bp, unload_mode);
+
+       bp->port.pmf = 0;
+
+       /* Free SKBs, SGEs, TPA pool and driver internals */
+       bnx2x_free_skbs(bp);
+       for_each_queue(bp, i)
+               bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+       for_each_queue(bp, i)
+               netif_napi_del(&bnx2x_fp(bp, i, napi));
+       bnx2x_free_mem(bp);
+
+       bp->state = BNX2X_STATE_CLOSED;
+
+       /* The last driver must disable a "close the gate" if there is no
+        * parity attention or "process kill" pending.
+        */
+       if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
+           bnx2x_reset_is_done(bp))
+               bnx2x_disable_close_the_gate(bp);
+
+       /* Reset MCP mail box sequence if there is on going recovery */
+       if (unload_mode == UNLOAD_RECOVERY)
+               bp->fw_seq = 0;
+
+       return 0;
+}
+int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
+{
+       u16 pmcsr;
+
+       pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+       switch (state) {
+       case PCI_D0:
+               pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+                                     ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
+                                      PCI_PM_CTRL_PME_STATUS));
+
+               if (pmcsr & PCI_PM_CTRL_STATE_MASK)
+                       /* delay required during transition out of D3hot */
+                       msleep(20);
+               break;
+
+       case PCI_D3hot:
+               /* If there are other clients above don't
+                  shut down the power */
+               if (atomic_read(&bp->pdev->enable_cnt) != 1)
+                       return 0;
+               /* Don't shut down the power for emulation and FPGA */
+               if (CHIP_REV_IS_SLOW(bp))
+                       return 0;
+
+               pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+               pmcsr |= 3;
+
+               if (bp->wol)
+                       pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+
+               pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+                                     pmcsr);
+
+               /* No more memory access after this point until
+               * device is brought back to D0.
+               */
+               break;
+
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+
+
+/*
+ * net_device service functions
+ */
+
+static int bnx2x_poll(struct napi_struct *napi, int budget)
+{
+       int work_done = 0;
+       struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
+                                                napi);
+       struct bnx2x *bp = fp->bp;
+
+       while (1) {
+#ifdef BNX2X_STOP_ON_ERROR
+               if (unlikely(bp->panic)) {
+                       napi_complete(napi);
+                       return 0;
+               }
+#endif
+
+               if (bnx2x_has_tx_work(fp))
+                       bnx2x_tx_int(fp);
+
+               if (bnx2x_has_rx_work(fp)) {
+                       work_done += bnx2x_rx_int(fp, budget - work_done);
+
+                       /* must not complete if we consumed full budget */
+                       if (work_done >= budget)
+                               break;
+               }
+
+               /* Fall out from the NAPI loop if needed */
+               if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+                       bnx2x_update_fpsb_idx(fp);
+               /* bnx2x_has_rx_work() reads the status block, thus we need
+                * to ensure that status block indices have been actually read
+                * (bnx2x_update_fpsb_idx) prior to this check
+                * (bnx2x_has_rx_work) so that we won't write the "newer"
+                * value of the status block to IGU (if there was a DMA right
+                * after bnx2x_has_rx_work and if there is no rmb, the memory
+                * reading (bnx2x_update_fpsb_idx) may be postponed to right
+                * before bnx2x_ack_sb). In this case there will never be
+                * another interrupt until there is another update of the
+                * status block, while there is still unhandled work.
+                */
+                       rmb();
+
+                       if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+                               napi_complete(napi);
+                               /* Re-enable interrupts */
+                               bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
+                                            le16_to_cpu(fp->fp_c_idx),
+                                            IGU_INT_NOP, 1);
+                               bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
+                                            le16_to_cpu(fp->fp_u_idx),
+                                            IGU_INT_ENABLE, 1);
+                               break;
+                       }
+               }
+       }
+
+       return work_done;
+}
+
+
+/* we split the first BD into headers and data BDs
+ * to ease the pain of our fellow microcode engineers
+ * we use one mapping for both BDs
+ * So far this has only been observed to happen
+ * in Other Operating Systems(TM)
+ */
+static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
+                                  struct bnx2x_fastpath *fp,
+                                  struct sw_tx_bd *tx_buf,
+                                  struct eth_tx_start_bd **tx_bd, u16 hlen,
+                                  u16 bd_prod, int nbd)
+{
+       struct eth_tx_start_bd *h_tx_bd = *tx_bd;
+       struct eth_tx_bd *d_tx_bd;
+       dma_addr_t mapping;
+       int old_len = le16_to_cpu(h_tx_bd->nbytes);
+
+       /* first fix first BD */
+       h_tx_bd->nbd = cpu_to_le16(nbd);
+       h_tx_bd->nbytes = cpu_to_le16(hlen);
+
+       DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
+          "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
+          h_tx_bd->addr_lo, h_tx_bd->nbd);
+
+       /* now get a new data BD
+        * (after the pbd) and fill it */
+       bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+       d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
+
+       mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
+                          le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
+
+       d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+       d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+       d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
+
+       /* this marks the BD as one that has no individual mapping */
+       tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
+
+       DP(NETIF_MSG_TX_QUEUED,
+          "TSO split data size is %d (%x:%x)\n",
+          d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
+
+       /* update tx_bd */
+       *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
+
+       return bd_prod;
+}
+
+static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+{
+       if (fix > 0)
+               csum = (u16) ~csum_fold(csum_sub(csum,
+                               csum_partial(t_header - fix, fix, 0)));
+
+       else if (fix < 0)
+               csum = (u16) ~csum_fold(csum_add(csum,
+                               csum_partial(t_header, -fix, 0)));
+
+       return swab16(csum);
+}
+
+static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+{
+       u32 rc;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               rc = XMIT_PLAIN;
+
+       else {
+               if (skb->protocol == htons(ETH_P_IPV6)) {
+                       rc = XMIT_CSUM_V6;
+                       if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+                               rc |= XMIT_CSUM_TCP;
+
+               } else {
+                       rc = XMIT_CSUM_V4;
+                       if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+                               rc |= XMIT_CSUM_TCP;
+               }
+       }
+
+       if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+               rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
+
+       else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+               rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+
+       return rc;
+}
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+/* check if packet requires linearization (packet is too fragmented)
+   no need to check fragmentation if page size > 8K (there will be no
+   violation to FW restrictions) */
+static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
+                            u32 xmit_type)
+{
+       int to_copy = 0;
+       int hlen = 0;
+       int first_bd_sz = 0;
+
+       /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
+       if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
+
+               if (xmit_type & XMIT_GSO) {
+                       unsigned short lso_mss = skb_shinfo(skb)->gso_size;
+                       /* Check if LSO packet needs to be copied:
+                          3 = 1 (for headers BD) + 2 (for PBD and last BD) */
+                       int wnd_size = MAX_FETCH_BD - 3;
+                       /* Number of windows to check */
+                       int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
+                       int wnd_idx = 0;
+                       int frag_idx = 0;
+                       u32 wnd_sum = 0;
+
+                       /* Headers length */
+                       hlen = (int)(skb_transport_header(skb) - skb->data) +
+                               tcp_hdrlen(skb);
+
+                       /* Amount of data (w/o headers) on linear part of SKB*/
+                       first_bd_sz = skb_headlen(skb) - hlen;
+
+                       wnd_sum  = first_bd_sz;
+
+                       /* Calculate the first sum - it's special */
+                       for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
+                               wnd_sum +=
+                                       skb_shinfo(skb)->frags[frag_idx].size;
+
+                       /* If there was data on linear skb data - check it */
+                       if (first_bd_sz > 0) {
+                               if (unlikely(wnd_sum < lso_mss)) {
+                                       to_copy = 1;
+                                       goto exit_lbl;
+                               }
+
+                               wnd_sum -= first_bd_sz;
+                       }
+
+                       /* Others are easier: run through the frag list and
+                          check all windows */
+                       for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
+                               wnd_sum +=
+                         skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
+
+                               if (unlikely(wnd_sum < lso_mss)) {
+                                       to_copy = 1;
+                                       break;
+                               }
+                               wnd_sum -=
+                                       skb_shinfo(skb)->frags[wnd_idx].size;
+                       }
+               } else {
+                       /* in non-LSO too fragmented packet should always
+                          be linearized */
+                       to_copy = 1;
+               }
+       }
+
+exit_lbl:
+       if (unlikely(to_copy))
+               DP(NETIF_MSG_TX_QUEUED,
+                  "Linearization IS REQUIRED for %s packet. "
+                  "num_frags %d  hlen %d  first_bd_sz %d\n",
+                  (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
+                  skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
+
+       return to_copy;
+}
+#endif
+
+/* called with netif_tx_lock
+ * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue()
+ */
+netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       struct bnx2x_fastpath *fp;
+       struct netdev_queue *txq;
+       struct sw_tx_bd *tx_buf;
+       struct eth_tx_start_bd *tx_start_bd;
+       struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
+       struct eth_tx_parse_bd *pbd = NULL;
+       u16 pkt_prod, bd_prod;
+       int nbd, fp_index;
+       dma_addr_t mapping;
+       u32 xmit_type = bnx2x_xmit_type(bp, skb);
+       int i;
+       u8 hlen = 0;
+       __le16 pkt_size = 0;
+       struct ethhdr *eth;
+       u8 mac_type = UNICAST_ADDRESS;
+
+#ifdef BNX2X_STOP_ON_ERROR
+       if (unlikely(bp->panic))
+               return NETDEV_TX_BUSY;
+#endif
+
+       fp_index = skb_get_queue_mapping(skb);
+       txq = netdev_get_tx_queue(dev, fp_index);
+
+       fp = &bp->fp[fp_index];
+
+       if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
+               fp->eth_q_stats.driver_xoff++;
+               netif_tx_stop_queue(txq);
+               BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
+               return NETDEV_TX_BUSY;
+       }
+
+       DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
+          "  gso type %x  xmit_type %x\n",
+          skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
+          ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
+
+       eth = (struct ethhdr *)skb->data;
+
+       /* set flag according to packet type (UNICAST_ADDRESS is default)*/
+       if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+               if (is_broadcast_ether_addr(eth->h_dest))
+                       mac_type = BROADCAST_ADDRESS;
+               else
+                       mac_type = MULTICAST_ADDRESS;
+       }
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+       /* First, check if we need to linearize the skb (due to FW
+          restrictions). No need to check fragmentation if page size > 8K
+          (there will be no violation to FW restrictions) */
+       if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
+               /* Statistics of linearization */
+               bp->lin_cnt++;
+               if (skb_linearize(skb) != 0) {
+                       DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
+                          "silently dropping this SKB\n");
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
+       }
+#endif
+
+       /*
+       Please read carefully. First we use one BD which we mark as start,
+       then we have a parsing info BD (used for TSO or xsum),
+       and only then we have the rest of the TSO BDs.
+       (don't forget to mark the last one as last,
+       and to unmap only AFTER you write to the BD ...)
+       And above all, all pdb sizes are in words - NOT DWORDS!
+       */
+
+       pkt_prod = fp->tx_pkt_prod++;
+       bd_prod = TX_BD(fp->tx_bd_prod);
+
+       /* get a tx_buf and first BD */
+       tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
+       tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
+
+       tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+       tx_start_bd->general_data =  (mac_type <<
+                                       ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
+       /* header nbd */
+       tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
+
+       /* remember the first BD of the packet */
+       tx_buf->first_bd = fp->tx_bd_prod;
+       tx_buf->skb = skb;
+       tx_buf->flags = 0;
+
+       DP(NETIF_MSG_TX_QUEUED,
+          "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
+          pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
+
+#ifdef BCM_VLAN
+       if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
+           (bp->flags & HW_VLAN_TX_FLAG)) {
+               tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
+               tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
+       } else
+#endif
+               tx_start_bd->vlan = cpu_to_le16(pkt_prod);
+
+       /* turn on parsing and get a BD */
+       bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+       pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
+
+       memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
+
+       if (xmit_type & XMIT_CSUM) {
+               hlen = (skb_network_header(skb) - skb->data) / 2;
+
+               /* for now NS flag is not used in Linux */
+               pbd->global_data =
+                       (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+                                ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
+
+               pbd->ip_hlen = (skb_transport_header(skb) -
+                               skb_network_header(skb)) / 2;
+
+               hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
+
+               pbd->total_hlen = cpu_to_le16(hlen);
+               hlen = hlen*2;
+
+               tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
+
+               if (xmit_type & XMIT_CSUM_V4)
+                       tx_start_bd->bd_flags.as_bitfield |=
+                                               ETH_TX_BD_FLAGS_IP_CSUM;
+               else
+                       tx_start_bd->bd_flags.as_bitfield |=
+                                               ETH_TX_BD_FLAGS_IPV6;
+
+               if (xmit_type & XMIT_CSUM_TCP) {
+                       pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
+
+               } else {
+                       s8 fix = SKB_CS_OFF(skb); /* signed! */
+
+                       pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
+
+                       DP(NETIF_MSG_TX_QUEUED,
+                          "hlen %d  fix %d  csum before fix %x\n",
+                          le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
+
+                       /* HW bug: fixup the CSUM */
+                       pbd->tcp_pseudo_csum =
+                               bnx2x_csum_fix(skb_transport_header(skb),
+                                              SKB_CS(skb), fix);
+
+                       DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
+                          pbd->tcp_pseudo_csum);
+               }
+       }
+
+       mapping = dma_map_single(&bp->pdev->dev, skb->data,
+                                skb_headlen(skb), DMA_TO_DEVICE);
+
+       tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+       tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+       nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
+       tx_start_bd->nbd = cpu_to_le16(nbd);
+       tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+       pkt_size = tx_start_bd->nbytes;
+
+       DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
+          "  nbytes %d  flags %x  vlan %x\n",
+          tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
+          le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
+          tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
+
+       if (xmit_type & XMIT_GSO) {
+
+               DP(NETIF_MSG_TX_QUEUED,
+                  "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
+                  skb->len, hlen, skb_headlen(skb),
+                  skb_shinfo(skb)->gso_size);
+
+               tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
+
+               if (unlikely(skb_headlen(skb) > hlen))
+                       bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
+                                                hlen, bd_prod, ++nbd);
+
+               pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+               pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
+               pbd->tcp_flags = pbd_tcp_flags(skb);
+
+               if (xmit_type & XMIT_GSO_V4) {
+                       pbd->ip_id = swab16(ip_hdr(skb)->id);
+                       pbd->tcp_pseudo_csum =
+                               swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+                                                         ip_hdr(skb)->daddr,
+                                                         0, IPPROTO_TCP, 0));
+
+               } else
+                       pbd->tcp_pseudo_csum =
+                               swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                                                       &ipv6_hdr(skb)->daddr,
+                                                       0, IPPROTO_TCP, 0));
+
+               pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
+       }
+       tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+               tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
+               if (total_pkt_bd == NULL)
+                       total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
+
+               mapping = dma_map_page(&bp->pdev->dev, frag->page,
+                                      frag->page_offset,
+                                      frag->size, DMA_TO_DEVICE);
+
+               tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+               tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+               tx_data_bd->nbytes = cpu_to_le16(frag->size);
+               le16_add_cpu(&pkt_size, frag->size);
+
+               DP(NETIF_MSG_TX_QUEUED,
+                  "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
+                  i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
+                  le16_to_cpu(tx_data_bd->nbytes));
+       }
+
+       DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
+
+       bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+       /* now send a tx doorbell, counting the next BD
+        * if the packet contains or ends with it
+        */
+       if (TX_BD_POFF(bd_prod) < nbd)
+               nbd++;
+
+       if (total_pkt_bd != NULL)
+               total_pkt_bd->total_pkt_bytes = pkt_size;
+
+       if (pbd)
+               DP(NETIF_MSG_TX_QUEUED,
+                  "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
+                  "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
+                  pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
+                  pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
+                  pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
+
+       DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
+
+       /*
+        * Make sure that the BD data is updated before updating the producer
+        * since FW might read the BD right after the producer is updated.
+        * This is only applicable for weak-ordered memory model archs such
+        * as IA-64. The following barrier is also mandatory since FW will
+        * assumes packets must have BDs.
+        */
+       wmb();
+
+       fp->tx_db.data.prod += nbd;
+       barrier();
+       DOORBELL(bp, fp->index, fp->tx_db.raw);
+
+       mmiowb();
+
+       fp->tx_bd_prod += nbd;
+
+       if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
+               netif_tx_stop_queue(txq);
+
+               /* paired memory barrier is in bnx2x_tx_int(), we have to keep
+                * ordering of set_bit() in netif_tx_stop_queue() and read of
+                * fp->bd_tx_cons */
+               smp_mb();
+
+               fp->eth_q_stats.driver_xoff++;
+               if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
+                       netif_tx_wake_queue(txq);
+       }
+       fp->tx_pkt++;
+
+       return NETDEV_TX_OK;
+}
+/* called with rtnl_lock */
+int bnx2x_change_mac_addr(struct net_device *dev, void *p)
+{
+       struct sockaddr *addr = p;
+       struct bnx2x *bp = netdev_priv(dev);
+
+       if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
+               return -EINVAL;
+
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       if (netif_running(dev)) {
+               if (CHIP_IS_E1(bp))
+                       bnx2x_set_eth_mac_addr_e1(bp, 1);
+               else
+                       bnx2x_set_eth_mac_addr_e1h(bp, 1);
+       }
+
+       return 0;
+}
+
+/* called with rtnl_lock */
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       int rc = 0;
+
+       if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+               printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+               return -EAGAIN;
+       }
+
+       if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
+           ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
+               return -EINVAL;
+
+       /* This does not race with packet allocation
+        * because the actual alloc size is
+        * only updated as part of load
+        */
+       dev->mtu = new_mtu;
+
+       if (netif_running(dev)) {
+               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+               rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+       }
+
+       return rc;
+}
+
+void bnx2x_tx_timeout(struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+#ifdef BNX2X_STOP_ON_ERROR
+       if (!bp->panic)
+               bnx2x_panic();
+#endif
+       /* This allows the netif to be shutdown gracefully before resetting */
+       schedule_delayed_work(&bp->reset_task, 0);
+}
+
+#ifdef BCM_VLAN
+/* called with rtnl_lock */
+void bnx2x_vlan_rx_register(struct net_device *dev,
+                                  struct vlan_group *vlgrp)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       bp->vlgrp = vlgrp;
+
+       /* Set flags according to the required capabilities */
+       bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
+
+       if (dev->features & NETIF_F_HW_VLAN_TX)
+               bp->flags |= HW_VLAN_TX_FLAG;
+
+       if (dev->features & NETIF_F_HW_VLAN_RX)
+               bp->flags |= HW_VLAN_RX_FLAG;
+
+       if (netif_running(dev))
+               bnx2x_set_client_config(bp);
+}
+
+#endif
+int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnx2x *bp;
+
+       if (!dev) {
+               dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+               return -ENODEV;
+       }
+       bp = netdev_priv(dev);
+
+       rtnl_lock();
+
+       pci_save_state(pdev);
+
+       if (!netif_running(dev)) {
+               rtnl_unlock();
+               return 0;
+       }
+
+       netif_device_detach(dev);
+
+       bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+
+       bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
+
+       rtnl_unlock();
+
+       return 0;
+}
+
+int bnx2x_resume(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnx2x *bp;
+       int rc;
+
+       if (!dev) {
+               dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+               return -ENODEV;
+       }
+       bp = netdev_priv(dev);
+
+       if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+               printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+               return -EAGAIN;
+       }
+
+       rtnl_lock();
+
+       pci_restore_state(pdev);
+
+       if (!netif_running(dev)) {
+               rtnl_unlock();
+               return 0;
+       }
+
+       bnx2x_set_power_state(bp, PCI_D0);
+       netif_device_attach(dev);
+
+       rc = bnx2x_nic_load(bp, LOAD_OPEN);
+
+       rtnl_unlock();
+
+       return rc;
+}
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
new file mode 100644 (file)
index 0000000..d1979b1
--- /dev/null
@@ -0,0 +1,652 @@
+/* bnx2x_cmn.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+#ifndef BNX2X_CMN_H
+#define BNX2X_CMN_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+
+#include "bnx2x.h"
+
+
+/*********************** Interfaces ****************************
+ *  Functions that need to be implemented by each driver version
+ */
+
+/**
+ * Initialize link parameters structure variables.
+ *
+ * @param bp
+ * @param load_mode
+ *
+ * @return u8
+ */
+u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
+
+/**
+ * Configure hw according to link parameters structure.
+ *
+ * @param bp
+ */
+void bnx2x_link_set(struct bnx2x *bp);
+
+/**
+ * Query link status
+ *
+ * @param bp
+ *
+ * @return 0 - link is UP
+ */
+u8 bnx2x_link_test(struct bnx2x *bp);
+
+/**
+ * Handles link status change
+ *
+ * @param bp
+ */
+void bnx2x__link_status_update(struct bnx2x *bp);
+
+/**
+ * MSI-X slowpath interrupt handler
+ *
+ * @param irq
+ * @param dev_instance
+ *
+ * @return irqreturn_t
+ */
+irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
+
+/**
+ * non MSI-X interrupt handler
+ *
+ * @param irq
+ * @param dev_instance
+ *
+ * @return irqreturn_t
+ */
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
+#ifdef BCM_CNIC
+
+/**
+ * Send command to cnic driver
+ *
+ * @param bp
+ * @param cmd
+ */
+int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
+
+/**
+ * Provides cnic information for proper interrupt handling
+ *
+ * @param bp
+ */
+void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
+#endif
+
+/**
+ * Enable HW interrupts.
+ *
+ * @param bp
+ */
+void bnx2x_int_enable(struct bnx2x *bp);
+
+/**
+ * Disable interrupts. This function ensures that there are no
+ * ISRs or SP DPCs (sp_task) are running after it returns.
+ *
+ * @param bp
+ * @param disable_hw if true, disable HW interrupts.
+ */
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
+
+/**
+ * Init HW blocks according to current initialization stage:
+ * COMMON, PORT or FUNCTION.
+ *
+ * @param bp
+ * @param load_code: COMMON, PORT or FUNCTION
+ *
+ * @return int
+ */
+int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
+
+/**
+ * Init driver internals:
+ *  - rings
+ *  - status blocks
+ *  - etc.
+ *
+ * @param bp
+ * @param load_code COMMON, PORT or FUNCTION
+ */
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
+
+/**
+ * Allocate driver's memory.
+ *
+ * @param bp
+ *
+ * @return int
+ */
+int bnx2x_alloc_mem(struct bnx2x *bp);
+
+/**
+ * Release driver's memory.
+ *
+ * @param bp
+ */
+void bnx2x_free_mem(struct bnx2x *bp);
+
+/**
+ * Bring up a leading (the first) eth Client.
+ *
+ * @param bp
+ *
+ * @return int
+ */
+int bnx2x_setup_leading(struct bnx2x *bp);
+
+/**
+ * Setup non-leading eth Client.
+ *
+ * @param bp
+ * @param fp
+ *
+ * @return int
+ */
+int bnx2x_setup_multi(struct bnx2x *bp, int index);
+
+/**
+ * Set number of quueus according to mode and number of available
+ * msi-x vectors
+ *
+ * @param bp
+ *
+ */
+void bnx2x_set_num_queues_msix(struct bnx2x *bp);
+
+/**
+ * Cleanup chip internals:
+ * - Cleanup MAC configuration.
+ * - Close clients.
+ * - etc.
+ *
+ * @param bp
+ * @param unload_mode
+ */
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
+
+/**
+ * Acquire HW lock.
+ *
+ * @param bp
+ * @param resource Resource bit which was locked
+ *
+ * @return int
+ */
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
+
+/**
+ * Release HW lock.
+ *
+ * @param bp driver handle
+ * @param resource Resource bit which was locked
+ *
+ * @return int
+ */
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
+
+/**
+ * Configure eth MAC address in the HW according to the value in
+ * netdev->dev_addr for 57711
+ *
+ * @param bp driver handle
+ * @param set
+ */
+void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
+
+/**
+ * Configure eth MAC address in the HW according to the value in
+ * netdev->dev_addr for 57710
+ *
+ * @param bp driver handle
+ * @param set
+ */
+void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set);
+
+#ifdef BCM_CNIC
+/**
+ * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
+ * MAC(s). The function will wait until the ramrod completion
+ * returns.
+ *
+ * @param bp driver handle
+ * @param set set or clear the CAM entry
+ *
+ * @return 0 if cussess, -ENODEV if ramrod doesn't return.
+ */
+int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
+#endif
+
+/**
+ * Initialize status block in FW and HW
+ *
+ * @param bp driver handle
+ * @param sb host_status_block
+ * @param dma_addr_t mapping
+ * @param int sb_id
+ */
+void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
+                         dma_addr_t mapping, int sb_id);
+
+/**
+ * Reconfigure FW/HW according to dev->flags rx mode
+ *
+ * @param dev net_device
+ *
+ */
+void bnx2x_set_rx_mode(struct net_device *dev);
+
+/**
+ * Configure MAC filtering rules in a FW.
+ *
+ * @param bp driver handle
+ */
+void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+
+/* Parity errors related */
+void bnx2x_inc_load_cnt(struct bnx2x *bp);
+u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
+bool bnx2x_chk_parity_attn(struct bnx2x *bp);
+bool bnx2x_reset_is_done(struct bnx2x *bp);
+void bnx2x_disable_close_the_gate(struct bnx2x *bp);
+
+/**
+ * Perform statistics handling according to event
+ *
+ * @param bp driver handle
+ * @param even tbnx2x_stats_event
+ */
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
+
+/**
+ * Configures FW with client paramteres (like HW VLAN removal)
+ * for each active client.
+ *
+ * @param bp
+ */
+void bnx2x_set_client_config(struct bnx2x *bp);
+
+/**
+ * Handle sp events
+ *
+ * @param fp fastpath handle for the event
+ * @param rr_cqe eth_rx_cqe
+ */
+void bnx2x_sp_event(struct bnx2x_fastpath *fp,  union eth_rx_cqe *rr_cqe);
+
+
+static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
+{
+       struct host_status_block *fpsb = fp->status_blk;
+
+       barrier(); /* status block is written to by the chip */
+       fp->fp_c_idx = fpsb->c_status_block.status_block_index;
+       fp->fp_u_idx = fpsb->u_status_block.status_block_index;
+}
+
+static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
+                                       struct bnx2x_fastpath *fp,
+                                       u16 bd_prod, u16 rx_comp_prod,
+                                       u16 rx_sge_prod)
+{
+       struct ustorm_eth_rx_producers rx_prods = {0};
+       int i;
+
+       /* Update producers */
+       rx_prods.bd_prod = bd_prod;
+       rx_prods.cqe_prod = rx_comp_prod;
+       rx_prods.sge_prod = rx_sge_prod;
+
+       /*
+        * Make sure that the BD and SGE data is updated before updating the
+        * producers since FW might read the BD/SGE right after the producer
+        * is updated.
+        * This is only applicable for weak-ordered memory model archs such
+        * as IA-64. The following barrier is also mandatory since FW will
+        * assumes BDs must have buffers.
+        */
+       wmb();
+
+       for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
+               REG_WR(bp, BAR_USTRORM_INTMEM +
+                      USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
+                      ((u32 *)&rx_prods)[i]);
+
+       mmiowb(); /* keep prod updates ordered */
+
+       DP(NETIF_MSG_RX_STATUS,
+          "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
+          fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
+}
+
+
+
+static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
+                               u8 storm, u16 index, u8 op, u8 update)
+{
+       u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+                      COMMAND_REG_INT_ACK);
+       struct igu_ack_register igu_ack;
+
+       igu_ack.status_block_index = index;
+       igu_ack.sb_id_and_flags =
+                       ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
+                        (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
+                        (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
+                        (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
+
+       DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
+          (*(u32 *)&igu_ack), hc_addr);
+       REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
+
+       /* Make sure that ACK is written */
+       mmiowb();
+       barrier();
+}
+static inline u16 bnx2x_ack_int(struct bnx2x *bp)
+{
+       u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+                      COMMAND_REG_SIMD_MASK);
+       u32 result = REG_RD(bp, hc_addr);
+
+       DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
+          result, hc_addr);
+
+       return result;
+}
+
+/*
+ * fast path service functions
+ */
+
+static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
+{
+       /* Tell compiler that consumer and producer can change */
+       barrier();
+       return (fp->tx_pkt_prod != fp->tx_pkt_cons);
+}
+
+static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
+{
+       s16 used;
+       u16 prod;
+       u16 cons;
+
+       prod = fp->tx_bd_prod;
+       cons = fp->tx_bd_cons;
+
+       /* NUM_TX_RINGS = number of "next-page" entries
+          It will be used as a threshold */
+       used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
+
+#ifdef BNX2X_STOP_ON_ERROR
+       WARN_ON(used < 0);
+       WARN_ON(used > fp->bp->tx_ring_size);
+       WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
+#endif
+
+       return (s16)(fp->bp->tx_ring_size) - used;
+}
+
+static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
+{
+       u16 hw_cons;
+
+       /* Tell compiler that status block fields can change */
+       barrier();
+       hw_cons = le16_to_cpu(*fp->tx_cons_sb);
+       return hw_cons != fp->tx_pkt_cons;
+}
+
+static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
+                                    struct bnx2x_fastpath *fp, u16 index)
+{
+       struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
+       struct page *page = sw_buf->page;
+       struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+
+       /* Skip "next page" elements */
+       if (!page)
+               return;
+
+       dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
+                      SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
+       __free_pages(page, PAGES_PER_SGE_SHIFT);
+
+       sw_buf->page = NULL;
+       sge->addr_hi = 0;
+       sge->addr_lo = 0;
+}
+
+static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
+                                          struct bnx2x_fastpath *fp, int last)
+{
+       int i;
+
+       for (i = 0; i < last; i++)
+               bnx2x_free_rx_sge(bp, fp, i);
+}
+
+static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
+                                    struct bnx2x_fastpath *fp, u16 index)
+{
+       struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
+       struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
+       struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+       dma_addr_t mapping;
+
+       if (unlikely(page == NULL))
+               return -ENOMEM;
+
+       mapping = dma_map_page(&bp->pdev->dev, page, 0,
+                              SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+               __free_pages(page, PAGES_PER_SGE_SHIFT);
+               return -ENOMEM;
+       }
+
+       sw_buf->page = page;
+       dma_unmap_addr_set(sw_buf, mapping, mapping);
+
+       sge->addr_hi = cpu_to_le32(U64_HI(mapping));
+       sge->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+       return 0;
+}
+static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
+                                    struct bnx2x_fastpath *fp, u16 index)
+{
+       struct sk_buff *skb;
+       struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
+       struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
+       dma_addr_t mapping;
+
+       skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+       if (unlikely(skb == NULL))
+               return -ENOMEM;
+
+       mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
+                                DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+               dev_kfree_skb(skb);
+               return -ENOMEM;
+       }
+
+       rx_buf->skb = skb;
+       dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+       rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+       rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+       return 0;
+}
+
+/* note that we are not allocating a new skb,
+ * we are just moving one from cons to prod
+ * we are not creating a new mapping,
+ * so there is no need to check for dma_mapping_error().
+ */
+static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
+                              struct sk_buff *skb, u16 cons, u16 prod)
+{
+       struct bnx2x *bp = fp->bp;
+       struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+       struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+       struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
+       struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+
+       dma_sync_single_for_device(&bp->pdev->dev,
+                                  dma_unmap_addr(cons_rx_buf, mapping),
+                                  RX_COPY_THRESH, DMA_FROM_DEVICE);
+
+       prod_rx_buf->skb = cons_rx_buf->skb;
+       dma_unmap_addr_set(prod_rx_buf, mapping,
+                          dma_unmap_addr(cons_rx_buf, mapping));
+       *prod_bd = *cons_bd;
+}
+
+static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
+{
+       int i, j;
+
+       for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+               int idx = RX_SGE_CNT * i - 1;
+
+               for (j = 0; j < 2; j++) {
+                       SGE_MASK_CLEAR_BIT(fp, idx);
+                       idx--;
+               }
+       }
+}
+
+static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
+{
+       /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
+       memset(fp->sge_mask, 0xff,
+              (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
+
+       /* Clear the two last indices in the page to 1:
+          these are the indices that correspond to the "next" element,
+          hence will never be indicated and should be removed from
+          the calculations. */
+       bnx2x_clear_sge_mask_next_elems(fp);
+}
+static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
+                                      struct bnx2x_fastpath *fp, int last)
+{
+       int i;
+
+       for (i = 0; i < last; i++) {
+               struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
+               struct sk_buff *skb = rx_buf->skb;
+
+               if (skb == NULL) {
+                       DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
+                       continue;
+               }
+
+               if (fp->tpa_state[i] == BNX2X_TPA_START)
+                       dma_unmap_single(&bp->pdev->dev,
+                                        dma_unmap_addr(rx_buf, mapping),
+                                        bp->rx_buf_size, DMA_FROM_DEVICE);
+
+               dev_kfree_skb(skb);
+               rx_buf->skb = NULL;
+       }
+}
+
+
+static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
+{
+       int i, j;
+
+       for_each_queue(bp, j) {
+               struct bnx2x_fastpath *fp = &bp->fp[j];
+
+               for (i = 1; i <= NUM_TX_RINGS; i++) {
+                       struct eth_tx_next_bd *tx_next_bd =
+                               &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
+
+                       tx_next_bd->addr_hi =
+                               cpu_to_le32(U64_HI(fp->tx_desc_mapping +
+                                           BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+                       tx_next_bd->addr_lo =
+                               cpu_to_le32(U64_LO(fp->tx_desc_mapping +
+                                           BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+               }
+
+               fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
+               fp->tx_db.data.zero_fill1 = 0;
+               fp->tx_db.data.prod = 0;
+
+               fp->tx_pkt_prod = 0;
+               fp->tx_pkt_cons = 0;
+               fp->tx_bd_prod = 0;
+               fp->tx_bd_cons = 0;
+               fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
+               fp->tx_pkt = 0;
+       }
+}
+static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
+{
+       u16 rx_cons_sb;
+
+       /* Tell compiler that status block fields can change */
+       barrier();
+       rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
+       if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
+               rx_cons_sb++;
+       return (fp->rx_comp_cons != rx_cons_sb);
+}
+
+/* HW Lock for shared dual port PHYs */
+void bnx2x_acquire_phy_lock(struct bnx2x *bp);
+void bnx2x_release_phy_lock(struct bnx2x *bp);
+
+void bnx2x_link_report(struct bnx2x *bp);
+int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
+int bnx2x_tx_int(struct bnx2x_fastpath *fp);
+void bnx2x_init_rx_rings(struct bnx2x *bp);
+netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+int bnx2x_change_mac_addr(struct net_device *dev, void *p);
+void bnx2x_tx_timeout(struct net_device *dev);
+void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
+void bnx2x_netif_start(struct bnx2x *bp);
+void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
+void bnx2x_free_irq(struct bnx2x *bp, bool disable_only);
+int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
+int bnx2x_resume(struct pci_dev *pdev);
+void bnx2x_free_skbs(struct bnx2x *bp);
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
+int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
+int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
+
+#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
new file mode 100644 (file)
index 0000000..8b75b05
--- /dev/null
@@ -0,0 +1,1971 @@
+/* bnx2x_ethtool.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/crc32.h>
+
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dump.h"
+
+
+static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       cmd->supported = bp->port.supported;
+       cmd->advertising = bp->port.advertising;
+
+       if ((bp->state == BNX2X_STATE_OPEN) &&
+           !(bp->flags & MF_FUNC_DIS) &&
+           (bp->link_vars.link_up)) {
+               cmd->speed = bp->link_vars.line_speed;
+               cmd->duplex = bp->link_vars.duplex;
+               if (IS_E1HMF(bp)) {
+                       u16 vn_max_rate;
+
+                       vn_max_rate =
+                               ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
+                               FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+                       if (vn_max_rate < cmd->speed)
+                               cmd->speed = vn_max_rate;
+               }
+       } else {
+               cmd->speed = -1;
+               cmd->duplex = -1;
+       }
+
+       if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
+               u32 ext_phy_type =
+                       XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+
+               switch (ext_phy_type) {
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+                       cmd->port = PORT_FIBRE;
+                       break;
+
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+                       cmd->port = PORT_TP;
+                       break;
+
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+                       BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
+                                 bp->link_params.ext_phy_config);
+                       break;
+
+               default:
+                       DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
+                          bp->link_params.ext_phy_config);
+                       break;
+               }
+       } else
+               cmd->port = PORT_TP;
+
+       cmd->phy_address = bp->mdio.prtad;
+       cmd->transceiver = XCVR_INTERNAL;
+
+       if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
+               cmd->autoneg = AUTONEG_ENABLE;
+       else
+               cmd->autoneg = AUTONEG_DISABLE;
+
+       cmd->maxtxpkt = 0;
+       cmd->maxrxpkt = 0;
+
+       DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+          DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
+          DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
+          DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
+          cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
+          cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+          cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+       return 0;
+}
+
+static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       u32 advertising;
+
+       if (IS_E1HMF(bp))
+               return 0;
+
+       DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+          DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
+          DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
+          DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
+          cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
+          cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+          cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+       if (cmd->autoneg == AUTONEG_ENABLE) {
+               if (!(bp->port.supported & SUPPORTED_Autoneg)) {
+                       DP(NETIF_MSG_LINK, "Autoneg not supported\n");
+                       return -EINVAL;
+               }
+
+               /* advertise the requested speed and duplex if supported */
+               cmd->advertising &= bp->port.supported;
+
+               bp->link_params.req_line_speed = SPEED_AUTO_NEG;
+               bp->link_params.req_duplex = DUPLEX_FULL;
+               bp->port.advertising |= (ADVERTISED_Autoneg |
+                                        cmd->advertising);
+
+       } else { /* forced speed */
+               /* advertise the requested speed and duplex if supported */
+               switch (cmd->speed) {
+               case SPEED_10:
+                       if (cmd->duplex == DUPLEX_FULL) {
+                               if (!(bp->port.supported &
+                                     SUPPORTED_10baseT_Full)) {
+                                       DP(NETIF_MSG_LINK,
+                                          "10M full not supported\n");
+                                       return -EINVAL;
+                               }
+
+                               advertising = (ADVERTISED_10baseT_Full |
+                                              ADVERTISED_TP);
+                       } else {
+                               if (!(bp->port.supported &
+                                     SUPPORTED_10baseT_Half)) {
+                                       DP(NETIF_MSG_LINK,
+                                          "10M half not supported\n");
+                                       return -EINVAL;
+                               }
+
+                               advertising = (ADVERTISED_10baseT_Half |
+                                              ADVERTISED_TP);
+                       }
+                       break;
+
+               case SPEED_100:
+                       if (cmd->duplex == DUPLEX_FULL) {
+                               if (!(bp->port.supported &
+                                               SUPPORTED_100baseT_Full)) {
+                                       DP(NETIF_MSG_LINK,
+                                          "100M full not supported\n");
+                                       return -EINVAL;
+                               }
+
+                               advertising = (ADVERTISED_100baseT_Full |
+                                              ADVERTISED_TP);
+                       } else {
+                               if (!(bp->port.supported &
+                                               SUPPORTED_100baseT_Half)) {
+                                       DP(NETIF_MSG_LINK,
+                                          "100M half not supported\n");
+                                       return -EINVAL;
+                               }
+
+                               advertising = (ADVERTISED_100baseT_Half |
+                                              ADVERTISED_TP);
+                       }
+                       break;
+
+               case SPEED_1000:
+                       if (cmd->duplex != DUPLEX_FULL) {
+                               DP(NETIF_MSG_LINK, "1G half not supported\n");
+                               return -EINVAL;
+                       }
+
+                       if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
+                               DP(NETIF_MSG_LINK, "1G full not supported\n");
+                               return -EINVAL;
+                       }
+
+                       advertising = (ADVERTISED_1000baseT_Full |
+                                      ADVERTISED_TP);
+                       break;
+
+               case SPEED_2500:
+                       if (cmd->duplex != DUPLEX_FULL) {
+                               DP(NETIF_MSG_LINK,
+                                  "2.5G half not supported\n");
+                               return -EINVAL;
+                       }
+
+                       if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
+                               DP(NETIF_MSG_LINK,
+                                  "2.5G full not supported\n");
+                               return -EINVAL;
+                       }
+
+                       advertising = (ADVERTISED_2500baseX_Full |
+                                      ADVERTISED_TP);
+                       break;
+
+               case SPEED_10000:
+                       if (cmd->duplex != DUPLEX_FULL) {
+                               DP(NETIF_MSG_LINK, "10G half not supported\n");
+                               return -EINVAL;
+                       }
+
+                       if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
+                               DP(NETIF_MSG_LINK, "10G full not supported\n");
+                               return -EINVAL;
+                       }
+
+                       advertising = (ADVERTISED_10000baseT_Full |
+                                      ADVERTISED_FIBRE);
+                       break;
+
+               default:
+                       DP(NETIF_MSG_LINK, "Unsupported speed\n");
+                       return -EINVAL;
+               }
+
+               bp->link_params.req_line_speed = cmd->speed;
+               bp->link_params.req_duplex = cmd->duplex;
+               bp->port.advertising = advertising;
+       }
+
+       DP(NETIF_MSG_LINK, "req_line_speed %d\n"
+          DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
+          bp->link_params.req_line_speed, bp->link_params.req_duplex,
+          bp->port.advertising);
+
+       if (netif_running(dev)) {
+               bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+               bnx2x_link_set(bp);
+       }
+
+       return 0;
+}
+
+#define IS_E1_ONLINE(info)     (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
+#define IS_E1H_ONLINE(info)    (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
+
+static int bnx2x_get_regs_len(struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       int regdump_len = 0;
+       int i;
+
+       if (CHIP_IS_E1(bp)) {
+               for (i = 0; i < REGS_COUNT; i++)
+                       if (IS_E1_ONLINE(reg_addrs[i].info))
+                               regdump_len += reg_addrs[i].size;
+
+               for (i = 0; i < WREGS_COUNT_E1; i++)
+                       if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
+                               regdump_len += wreg_addrs_e1[i].size *
+                                       (1 + wreg_addrs_e1[i].read_regs_count);
+
+       } else { /* E1H */
+               for (i = 0; i < REGS_COUNT; i++)
+                       if (IS_E1H_ONLINE(reg_addrs[i].info))
+                               regdump_len += reg_addrs[i].size;
+
+               for (i = 0; i < WREGS_COUNT_E1H; i++)
+                       if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
+                               regdump_len += wreg_addrs_e1h[i].size *
+                                       (1 + wreg_addrs_e1h[i].read_regs_count);
+       }
+       regdump_len *= 4;
+       regdump_len += sizeof(struct dump_hdr);
+
+       return regdump_len;
+}
+
+static void bnx2x_get_regs(struct net_device *dev,
+                          struct ethtool_regs *regs, void *_p)
+{
+       u32 *p = _p, i, j;
+       struct bnx2x *bp = netdev_priv(dev);
+       struct dump_hdr dump_hdr = {0};
+
+       regs->version = 0;
+       memset(p, 0, regs->len);
+
+       if (!netif_running(bp->dev))
+               return;
+
+       dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
+       dump_hdr.dump_sign = dump_sign_all;
+       dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
+       dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
+       dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
+       dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
+       dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
+
+       memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
+       p += dump_hdr.hdr_size + 1;
+
+       if (CHIP_IS_E1(bp)) {
+               for (i = 0; i < REGS_COUNT; i++)
+                       if (IS_E1_ONLINE(reg_addrs[i].info))
+                               for (j = 0; j < reg_addrs[i].size; j++)
+                                       *p++ = REG_RD(bp,
+                                                     reg_addrs[i].addr + j*4);
+
+       } else { /* E1H */
+               for (i = 0; i < REGS_COUNT; i++)
+                       if (IS_E1H_ONLINE(reg_addrs[i].info))
+                               for (j = 0; j < reg_addrs[i].size; j++)
+                                       *p++ = REG_RD(bp,
+                                                     reg_addrs[i].addr + j*4);
+       }
+}
+
+#define PHY_FW_VER_LEN                 10
+
+static void bnx2x_get_drvinfo(struct net_device *dev,
+                             struct ethtool_drvinfo *info)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       u8 phy_fw_ver[PHY_FW_VER_LEN];
+
+       strcpy(info->driver, DRV_MODULE_NAME);
+       strcpy(info->version, DRV_MODULE_VERSION);
+
+       phy_fw_ver[0] = '\0';
+       if (bp->port.pmf) {
+               bnx2x_acquire_phy_lock(bp);
+               bnx2x_get_ext_phy_fw_version(&bp->link_params,
+                                            (bp->state != BNX2X_STATE_CLOSED),
+                                            phy_fw_ver, PHY_FW_VER_LEN);
+               bnx2x_release_phy_lock(bp);
+       }
+
+       strncpy(info->fw_version, bp->fw_ver, 32);
+       snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+                "bc %d.%d.%d%s%s",
+                (bp->common.bc_ver & 0xff0000) >> 16,
+                (bp->common.bc_ver & 0xff00) >> 8,
+                (bp->common.bc_ver & 0xff),
+                ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+       strcpy(info->bus_info, pci_name(bp->pdev));
+       info->n_stats = BNX2X_NUM_STATS;
+       info->testinfo_len = BNX2X_NUM_TESTS;
+       info->eedump_len = bp->common.flash_size;
+       info->regdump_len = bnx2x_get_regs_len(dev);
+}
+
+static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       if (bp->flags & NO_WOL_FLAG) {
+               wol->supported = 0;
+               wol->wolopts = 0;
+       } else {
+               wol->supported = WAKE_MAGIC;
+               if (bp->wol)
+                       wol->wolopts = WAKE_MAGIC;
+               else
+                       wol->wolopts = 0;
+       }
+       memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       if (wol->wolopts & ~WAKE_MAGIC)
+               return -EINVAL;
+
+       if (wol->wolopts & WAKE_MAGIC) {
+               if (bp->flags & NO_WOL_FLAG)
+                       return -EINVAL;
+
+               bp->wol = 1;
+       } else
+               bp->wol = 0;
+
+       return 0;
+}
+
+static u32 bnx2x_get_msglevel(struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       return bp->msg_enable;
+}
+
+static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       if (capable(CAP_NET_ADMIN))
+               bp->msg_enable = level;
+}
+
+static int bnx2x_nway_reset(struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       if (!bp->port.pmf)
+               return 0;
+
+       if (netif_running(dev)) {
+               bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+               bnx2x_link_set(bp);
+       }
+
+       return 0;
+}
+
+static u32 bnx2x_get_link(struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       if (bp->flags & MF_FUNC_DIS)
+               return 0;
+
+       return bp->link_vars.link_up;
+}
+
+static int bnx2x_get_eeprom_len(struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       return bp->common.flash_size;
+}
+
+static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
+{
+       int port = BP_PORT(bp);
+       int count, i;
+       u32 val = 0;
+
+       /* adjust timeout for emulation/FPGA */
+       count = NVRAM_TIMEOUT_COUNT;
+       if (CHIP_REV_IS_SLOW(bp))
+               count *= 100;
+
+       /* request access to nvram interface */
+       REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+              (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
+
+       for (i = 0; i < count*10; i++) {
+               val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+               if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
+                       break;
+
+               udelay(5);
+       }
+
+       if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
+               DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int bnx2x_release_nvram_lock(struct bnx2x *bp)
+{
+       int port = BP_PORT(bp);
+       int count, i;
+       u32 val = 0;
+
+       /* adjust timeout for emulation/FPGA */
+       count = NVRAM_TIMEOUT_COUNT;
+       if (CHIP_REV_IS_SLOW(bp))
+               count *= 100;
+
+       /* relinquish nvram interface */
+       REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+              (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
+
+       for (i = 0; i < count*10; i++) {
+               val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+               if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
+                       break;
+
+               udelay(5);
+       }
+
+       if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
+               DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static void bnx2x_enable_nvram_access(struct bnx2x *bp)
+{
+       u32 val;
+
+       val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+       /* enable both bits, even on read */
+       REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+              (val | MCPR_NVM_ACCESS_ENABLE_EN |
+                     MCPR_NVM_ACCESS_ENABLE_WR_EN));
+}
+
+static void bnx2x_disable_nvram_access(struct bnx2x *bp)
+{
+       u32 val;
+
+       val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+       /* disable both bits, even after read */
+       REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+              (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
+                       MCPR_NVM_ACCESS_ENABLE_WR_EN)));
+}
+
+static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
+                                 u32 cmd_flags)
+{
+       int count, i, rc;
+       u32 val;
+
+       /* build the command word */
+       cmd_flags |= MCPR_NVM_COMMAND_DOIT;
+
+       /* need to clear DONE bit separately */
+       REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+
+       /* address of the NVRAM to read from */
+       REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+              (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+       /* issue a read command */
+       REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+       /* adjust timeout for emulation/FPGA */
+       count = NVRAM_TIMEOUT_COUNT;
+       if (CHIP_REV_IS_SLOW(bp))
+               count *= 100;
+
+       /* wait for completion */
+       *ret_val = 0;
+       rc = -EBUSY;
+       for (i = 0; i < count; i++) {
+               udelay(5);
+               val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+
+               if (val & MCPR_NVM_COMMAND_DONE) {
+                       val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
+                       /* we read nvram data in cpu order
+                        * but ethtool sees it as an array of bytes
+                        * converting to big-endian will do the work */
+                       *ret_val = cpu_to_be32(val);
+                       rc = 0;
+                       break;
+               }
+       }
+
+       return rc;
+}
+
+static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+                           int buf_size)
+{
+       int rc;
+       u32 cmd_flags;
+       __be32 val;
+
+       if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+               DP(BNX2X_MSG_NVM,
+                  "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
+                  offset, buf_size);
+               return -EINVAL;
+       }
+
+       if (offset + buf_size > bp->common.flash_size) {
+               DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+                                 " buf_size (0x%x) > flash_size (0x%x)\n",
+                  offset, buf_size, bp->common.flash_size);
+               return -EINVAL;
+       }
+
+       /* request access to nvram interface */
+       rc = bnx2x_acquire_nvram_lock(bp);
+       if (rc)
+               return rc;
+
+       /* enable access to nvram interface */
+       bnx2x_enable_nvram_access(bp);
+
+       /* read the first word(s) */
+       cmd_flags = MCPR_NVM_COMMAND_FIRST;
+       while ((buf_size > sizeof(u32)) && (rc == 0)) {
+               rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+               memcpy(ret_buf, &val, 4);
+
+               /* advance to the next dword */
+               offset += sizeof(u32);
+               ret_buf += sizeof(u32);
+               buf_size -= sizeof(u32);
+               cmd_flags = 0;
+       }
+
+       if (rc == 0) {
+               cmd_flags |= MCPR_NVM_COMMAND_LAST;
+               rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+               memcpy(ret_buf, &val, 4);
+       }
+
+       /* disable access to nvram interface */
+       bnx2x_disable_nvram_access(bp);
+       bnx2x_release_nvram_lock(bp);
+
+       return rc;
+}
+
+static int bnx2x_get_eeprom(struct net_device *dev,
+                           struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       int rc;
+
+       if (!netif_running(dev))
+               return -EAGAIN;
+
+       DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+          DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
+          eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+          eeprom->len, eeprom->len);
+
+       /* parameters already validated in ethtool_get_eeprom */
+
+       rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
+
+       return rc;
+}
+
+static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
+                                  u32 cmd_flags)
+{
+       int count, i, rc;
+
+       /* build the command word */
+       cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
+
+       /* need to clear DONE bit separately */
+       REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+
+       /* write the data */
+       REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
+
+       /* address of the NVRAM to write to */
+       REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+              (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+       /* issue the write command */
+       REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+       /* adjust timeout for emulation/FPGA */
+       count = NVRAM_TIMEOUT_COUNT;
+       if (CHIP_REV_IS_SLOW(bp))
+               count *= 100;
+
+       /* wait for completion */
+       rc = -EBUSY;
+       for (i = 0; i < count; i++) {
+               udelay(5);
+               val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+               if (val & MCPR_NVM_COMMAND_DONE) {
+                       rc = 0;
+                       break;
+               }
+       }
+
+       return rc;
+}
+
+#define BYTE_OFFSET(offset)            (8 * (offset & 0x03))
+
+static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
+                             int buf_size)
+{
+       int rc;
+       u32 cmd_flags;
+       u32 align_offset;
+       __be32 val;
+
+       if (offset + buf_size > bp->common.flash_size) {
+               DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+                                 " buf_size (0x%x) > flash_size (0x%x)\n",
+                  offset, buf_size, bp->common.flash_size);
+               return -EINVAL;
+       }
+
+       /* request access to nvram interface */
+       rc = bnx2x_acquire_nvram_lock(bp);
+       if (rc)
+               return rc;
+
+       /* enable access to nvram interface */
+       bnx2x_enable_nvram_access(bp);
+
+       cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
+       align_offset = (offset & ~0x03);
+       rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
+
+       if (rc == 0) {
+               val &= ~(0xff << BYTE_OFFSET(offset));
+               val |= (*data_buf << BYTE_OFFSET(offset));
+
+               /* nvram data is returned as an array of bytes
+                * convert it back to cpu order */
+               val = be32_to_cpu(val);
+
+               rc = bnx2x_nvram_write_dword(bp, align_offset, val,
+                                            cmd_flags);
+       }
+
+       /* disable access to nvram interface */
+       bnx2x_disable_nvram_access(bp);
+       bnx2x_release_nvram_lock(bp);
+
+       return rc;
+}
+
+static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
+                            int buf_size)
+{
+       int rc;
+       u32 cmd_flags;
+       u32 val;
+       u32 written_so_far;
+
+       if (buf_size == 1)      /* ethtool */
+               return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
+
+       if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+               DP(BNX2X_MSG_NVM,
+                  "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
+                  offset, buf_size);
+               return -EINVAL;
+       }
+
+       if (offset + buf_size > bp->common.flash_size) {
+               DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+                                 " buf_size (0x%x) > flash_size (0x%x)\n",
+                  offset, buf_size, bp->common.flash_size);
+               return -EINVAL;
+       }
+
+       /* request access to nvram interface */
+       rc = bnx2x_acquire_nvram_lock(bp);
+       if (rc)
+               return rc;
+
+       /* enable access to nvram interface */
+       bnx2x_enable_nvram_access(bp);
+
+       written_so_far = 0;
+       cmd_flags = MCPR_NVM_COMMAND_FIRST;
+       while ((written_so_far < buf_size) && (rc == 0)) {
+               if (written_so_far == (buf_size - sizeof(u32)))
+                       cmd_flags |= MCPR_NVM_COMMAND_LAST;
+               else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
+                       cmd_flags |= MCPR_NVM_COMMAND_LAST;
+               else if ((offset % NVRAM_PAGE_SIZE) == 0)
+                       cmd_flags |= MCPR_NVM_COMMAND_FIRST;
+
+               memcpy(&val, data_buf, 4);
+
+               rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
+
+               /* advance to the next dword */
+               offset += sizeof(u32);
+               data_buf += sizeof(u32);
+               written_so_far += sizeof(u32);
+               cmd_flags = 0;
+       }
+
+       /* disable access to nvram interface */
+       bnx2x_disable_nvram_access(bp);
+       bnx2x_release_nvram_lock(bp);
+
+       return rc;
+}
+
+static int bnx2x_set_eeprom(struct net_device *dev,
+                           struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       int port = BP_PORT(bp);
+       int rc = 0;
+
+       if (!netif_running(dev))
+               return -EAGAIN;
+
+       DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+          DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
+          eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+          eeprom->len, eeprom->len);
+
+       /* parameters already validated in ethtool_set_eeprom */
+
+       /* PHY eeprom can be accessed only by the PMF */
+       if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
+           !bp->port.pmf)
+               return -EINVAL;
+
+       if (eeprom->magic == 0x50485950) {
+               /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
+               bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+               bnx2x_acquire_phy_lock(bp);
+               rc |= bnx2x_link_reset(&bp->link_params,
+                                      &bp->link_vars, 0);
+               if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
+                                       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
+                       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+                                      MISC_REGISTERS_GPIO_HIGH, port);
+               bnx2x_release_phy_lock(bp);
+               bnx2x_link_report(bp);
+
+       } else if (eeprom->magic == 0x50485952) {
+               /* 'PHYR' (0x50485952): re-init link after FW upgrade */
+               if (bp->state == BNX2X_STATE_OPEN) {
+                       bnx2x_acquire_phy_lock(bp);
+                       rc |= bnx2x_link_reset(&bp->link_params,
+                                              &bp->link_vars, 1);
+
+                       rc |= bnx2x_phy_init(&bp->link_params,
+                                            &bp->link_vars);
+                       bnx2x_release_phy_lock(bp);
+                       bnx2x_calc_fc_adv(bp);
+               }
+       } else if (eeprom->magic == 0x53985943) {
+               /* 'PHYC' (0x53985943): PHY FW upgrade completed */
+               if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
+                                      PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
+                       u8 ext_phy_addr =
+                            XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
+
+                       /* DSP Remove Download Mode */
+                       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+                                      MISC_REGISTERS_GPIO_LOW, port);
+
+                       bnx2x_acquire_phy_lock(bp);
+
+                       bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
+
+                       /* wait 0.5 sec to allow it to run */
+                       msleep(500);
+                       bnx2x_ext_phy_hw_reset(bp, port);
+                       msleep(500);
+                       bnx2x_release_phy_lock(bp);
+               }
+       } else
+               rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
+
+       return rc;
+}
+static int bnx2x_get_coalesce(struct net_device *dev,
+                             struct ethtool_coalesce *coal)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+       coal->rx_coalesce_usecs = bp->rx_ticks;
+       coal->tx_coalesce_usecs = bp->tx_ticks;
+
+       return 0;
+}
+
+static int bnx2x_set_coalesce(struct net_device *dev,
+                             struct ethtool_coalesce *coal)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
+       if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
+               bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
+
+       bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
+       if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
+               bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
+
+       if (netif_running(dev))
+               bnx2x_update_coalesce(bp);
+
+       return 0;
+}
+
+static void bnx2x_get_ringparam(struct net_device *dev,
+                               struct ethtool_ringparam *ering)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       ering->rx_max_pending = MAX_RX_AVAIL;
+       ering->rx_mini_max_pending = 0;
+       ering->rx_jumbo_max_pending = 0;
+
+       ering->rx_pending = bp->rx_ring_size;
+       ering->rx_mini_pending = 0;
+       ering->rx_jumbo_pending = 0;
+
+       ering->tx_max_pending = MAX_TX_AVAIL;
+       ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnx2x_set_ringparam(struct net_device *dev,
+                              struct ethtool_ringparam *ering)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       int rc = 0;
+
+       if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+               printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+               return -EAGAIN;
+       }
+
+       if ((ering->rx_pending > MAX_RX_AVAIL) ||
+           (ering->tx_pending > MAX_TX_AVAIL) ||
+           (ering->tx_pending <= MAX_SKB_FRAGS + 4))
+               return -EINVAL;
+
+       bp->rx_ring_size = ering->rx_pending;
+       bp->tx_ring_size = ering->tx_pending;
+
+       if (netif_running(dev)) {
+               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+               rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+       }
+
+       return rc;
+}
+
+static void bnx2x_get_pauseparam(struct net_device *dev,
+                                struct ethtool_pauseparam *epause)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       epause->autoneg = (bp->link_params.req_flow_ctrl ==
+                          BNX2X_FLOW_CTRL_AUTO) &&
+                         (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
+
+       epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
+                           BNX2X_FLOW_CTRL_RX);
+       epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
+                           BNX2X_FLOW_CTRL_TX);
+
+       DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+          DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
+          epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+}
+
+static int bnx2x_set_pauseparam(struct net_device *dev,
+                               struct ethtool_pauseparam *epause)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       if (IS_E1HMF(bp))
+               return 0;
+
+       DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+          DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
+          epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+
+       bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+
+       if (epause->rx_pause)
+               bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
+
+       if (epause->tx_pause)
+               bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
+
+       if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
+               bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+       if (epause->autoneg) {
+               if (!(bp->port.supported & SUPPORTED_Autoneg)) {
+                       DP(NETIF_MSG_LINK, "autoneg not supported\n");
+                       return -EINVAL;
+               }
+
+               if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
+                       bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+       }
+
+       DP(NETIF_MSG_LINK,
+          "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
+
+       if (netif_running(dev)) {
+               bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+               bnx2x_link_set(bp);
+       }
+
+       return 0;
+}
+
+static int bnx2x_set_flags(struct net_device *dev, u32 data)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       int changed = 0;
+       int rc = 0;
+
+       if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
+               return -EINVAL;
+
+       if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+               printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+               return -EAGAIN;
+       }
+
+       /* TPA requires Rx CSUM offloading */
+       if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
+               if (!bp->disable_tpa) {
+                       if (!(dev->features & NETIF_F_LRO)) {
+                               dev->features |= NETIF_F_LRO;
+                               bp->flags |= TPA_ENABLE_FLAG;
+                               changed = 1;
+                       }
+               } else
+                       rc = -EINVAL;
+       } else if (dev->features & NETIF_F_LRO) {
+               dev->features &= ~NETIF_F_LRO;
+               bp->flags &= ~TPA_ENABLE_FLAG;
+               changed = 1;
+       }
+
+       if (data & ETH_FLAG_RXHASH)
+               dev->features |= NETIF_F_RXHASH;
+       else
+               dev->features &= ~NETIF_F_RXHASH;
+
+       if (changed && netif_running(dev)) {
+               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+               rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+       }
+
+       return rc;
+}
+
+static u32 bnx2x_get_rx_csum(struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       return bp->rx_csum;
+}
+
+static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       int rc = 0;
+
+       if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+               printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+               return -EAGAIN;
+       }
+
+       bp->rx_csum = data;
+
+       /* Disable TPA, when Rx CSUM is disabled. Otherwise all
+          TPA'ed packets will be discarded due to wrong TCP CSUM */
+       if (!data) {
+               u32 flags = ethtool_op_get_flags(dev);
+
+               rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
+       }
+
+       return rc;
+}
+
+static int bnx2x_set_tso(struct net_device *dev, u32 data)
+{
+       if (data) {
+               dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
+               dev->features |= NETIF_F_TSO6;
+       } else {
+               dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
+               dev->features &= ~NETIF_F_TSO6;
+       }
+
+       return 0;
+}
+
+static const struct {
+       char string[ETH_GSTRING_LEN];
+} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
+       { "register_test (offline)" },
+       { "memory_test (offline)" },
+       { "loopback_test (offline)" },
+       { "nvram_test (online)" },
+       { "interrupt_test (online)" },
+       { "link_test (online)" },
+       { "idle check (online)" }
+};
+
+static int bnx2x_test_registers(struct bnx2x *bp)
+{
+       int idx, i, rc = -ENODEV;
+       u32 wr_val = 0;
+       int port = BP_PORT(bp);
+       static const struct {
+               u32 offset0;
+               u32 offset1;
+               u32 mask;
+       } reg_tbl[] = {
+/* 0 */                { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
+               { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
+               { HC_REG_AGG_INT_0,                    4, 0x000003ff },
+               { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
+               { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
+               { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
+               { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
+               { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
+               { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
+               { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
+/* 10 */       { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
+               { QM_REG_CONNNUM_0,                    4, 0x000fffff },
+               { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
+               { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
+               { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
+               { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
+               { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
+               { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
+               { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
+               { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
+/* 20 */       { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
+               { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
+               { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
+               { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
+               { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
+               { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
+               { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
+               { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
+               { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
+               { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
+/* 30 */       { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
+               { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
+               { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
+               { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
+               { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
+               { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
+               { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
+
+               { 0xffffffff, 0, 0x00000000 }
+       };
+
+       if (!netif_running(bp->dev))
+               return rc;
+
+       /* Repeat the test twice:
+          First by writing 0x00000000, second by writing 0xffffffff */
+       for (idx = 0; idx < 2; idx++) {
+
+               switch (idx) {
+               case 0:
+                       wr_val = 0;
+                       break;
+               case 1:
+                       wr_val = 0xffffffff;
+                       break;
+               }
+
+               for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
+                       u32 offset, mask, save_val, val;
+
+                       offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
+                       mask = reg_tbl[i].mask;
+
+                       save_val = REG_RD(bp, offset);
+
+                       REG_WR(bp, offset, (wr_val & mask));
+                       val = REG_RD(bp, offset);
+
+                       /* Restore the original register's value */
+                       REG_WR(bp, offset, save_val);
+
+                       /* verify value is as expected */
+                       if ((val & mask) != (wr_val & mask)) {
+                               DP(NETIF_MSG_PROBE,
+                                  "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
+                                  offset, val, wr_val, mask);
+                               goto test_reg_exit;
+                       }
+               }
+       }
+
+       rc = 0;
+
+test_reg_exit:
+       return rc;
+}
+
+static int bnx2x_test_memory(struct bnx2x *bp)
+{
+       int i, j, rc = -ENODEV;
+       u32 val;
+       static const struct {
+               u32 offset;
+               int size;
+       } mem_tbl[] = {
+               { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
+               { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
+               { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
+               { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
+               { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
+               { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
+               { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
+
+               { 0xffffffff, 0 }
+       };
+       static const struct {
+               char *name;
+               u32 offset;
+               u32 e1_mask;
+               u32 e1h_mask;
+       } prty_tbl[] = {
+               { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
+               { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
+               { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
+               { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
+               { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
+               { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
+
+               { NULL, 0xffffffff, 0, 0 }
+       };
+
+       if (!netif_running(bp->dev))
+               return rc;
+
+       /* Go through all the memories */
+       for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
+               for (j = 0; j < mem_tbl[i].size; j++)
+                       REG_RD(bp, mem_tbl[i].offset + j*4);
+
+       /* Check the parity status */
+       for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
+               val = REG_RD(bp, prty_tbl[i].offset);
+               if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
+                   (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
+                       DP(NETIF_MSG_HW,
+                          "%s is 0x%x\n", prty_tbl[i].name, val);
+                       goto test_mem_exit;
+               }
+       }
+
+       rc = 0;
+
+test_mem_exit:
+       return rc;
+}
+
+static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
+{
+       int cnt = 1000;
+
+       if (link_up)
+               while (bnx2x_link_test(bp) && cnt--)
+                       msleep(10);
+}
+
+static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
+{
+       unsigned int pkt_size, num_pkts, i;
+       struct sk_buff *skb;
+       unsigned char *packet;
+       struct bnx2x_fastpath *fp_rx = &bp->fp[0];
+       struct bnx2x_fastpath *fp_tx = &bp->fp[0];
+       u16 tx_start_idx, tx_idx;
+       u16 rx_start_idx, rx_idx;
+       u16 pkt_prod, bd_prod;
+       struct sw_tx_bd *tx_buf;
+       struct eth_tx_start_bd *tx_start_bd;
+       struct eth_tx_parse_bd *pbd = NULL;
+       dma_addr_t mapping;
+       union eth_rx_cqe *cqe;
+       u8 cqe_fp_flags;
+       struct sw_rx_bd *rx_buf;
+       u16 len;
+       int rc = -ENODEV;
+
+       /* check the loopback mode */
+       switch (loopback_mode) {
+       case BNX2X_PHY_LOOPBACK:
+               if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
+                       return -EINVAL;
+               break;
+       case BNX2X_MAC_LOOPBACK:
+               bp->link_params.loopback_mode = LOOPBACK_BMAC;
+               bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* prepare the loopback packet */
+       pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
+                    bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
+       skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+       if (!skb) {
+               rc = -ENOMEM;
+               goto test_loopback_exit;
+       }
+       packet = skb_put(skb, pkt_size);
+       memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+       memset(packet + ETH_ALEN, 0, ETH_ALEN);
+       memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
+       for (i = ETH_HLEN; i < pkt_size; i++)
+               packet[i] = (unsigned char) (i & 0xff);
+
+       /* send the loopback packet */
+       num_pkts = 0;
+       tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
+       rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+
+       pkt_prod = fp_tx->tx_pkt_prod++;
+       tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
+       tx_buf->first_bd = fp_tx->tx_bd_prod;
+       tx_buf->skb = skb;
+       tx_buf->flags = 0;
+
+       bd_prod = TX_BD(fp_tx->tx_bd_prod);
+       tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
+       mapping = dma_map_single(&bp->pdev->dev, skb->data,
+                                skb_headlen(skb), DMA_TO_DEVICE);
+       tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+       tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+       tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
+       tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+       tx_start_bd->vlan = cpu_to_le16(pkt_prod);
+       tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+       tx_start_bd->general_data = ((UNICAST_ADDRESS <<
+                               ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
+
+       /* turn on parsing and get a BD */
+       bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+       pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
+
+       memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
+
+       wmb();
+
+       fp_tx->tx_db.data.prod += 2;
+       barrier();
+       DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
+
+       mmiowb();
+
+       num_pkts++;
+       fp_tx->tx_bd_prod += 2; /* start + pbd */
+
+       udelay(100);
+
+       tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
+       if (tx_idx != tx_start_idx + num_pkts)
+               goto test_loopback_exit;
+
+       rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+       if (rx_idx != rx_start_idx + num_pkts)
+               goto test_loopback_exit;
+
+       cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
+       cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+       if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
+               goto test_loopback_rx_exit;
+
+       len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
+       if (len != pkt_size)
+               goto test_loopback_rx_exit;
+
+       rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
+       skb = rx_buf->skb;
+       skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
+       for (i = ETH_HLEN; i < pkt_size; i++)
+               if (*(skb->data + i) != (unsigned char) (i & 0xff))
+                       goto test_loopback_rx_exit;
+
+       rc = 0;
+
+test_loopback_rx_exit:
+
+       fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
+       fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
+       fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
+       fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
+
+       /* Update producers */
+       bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
+                            fp_rx->rx_sge_prod);
+
+test_loopback_exit:
+       bp->link_params.loopback_mode = LOOPBACK_NONE;
+
+       return rc;
+}
+
+static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
+{
+       int rc = 0, res;
+
+       if (BP_NOMCP(bp))
+               return rc;
+
+       if (!netif_running(bp->dev))
+               return BNX2X_LOOPBACK_FAILED;
+
+       bnx2x_netif_stop(bp, 1);
+       bnx2x_acquire_phy_lock(bp);
+
+       res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
+       if (res) {
+               DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
+               rc |= BNX2X_PHY_LOOPBACK_FAILED;
+       }
+
+       res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
+       if (res) {
+               DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
+               rc |= BNX2X_MAC_LOOPBACK_FAILED;
+       }
+
+       bnx2x_release_phy_lock(bp);
+       bnx2x_netif_start(bp);
+
+       return rc;
+}
+
+#define CRC32_RESIDUAL                 0xdebb20e3
+
+static int bnx2x_test_nvram(struct bnx2x *bp)
+{
+       static const struct {
+               int offset;
+               int size;
+       } nvram_tbl[] = {
+               {     0,  0x14 }, /* bootstrap */
+               {  0x14,  0xec }, /* dir */
+               { 0x100, 0x350 }, /* manuf_info */
+               { 0x450,  0xf0 }, /* feature_info */
+               { 0x640,  0x64 }, /* upgrade_key_info */
+               { 0x6a4,  0x64 },
+               { 0x708,  0x70 }, /* manuf_key_info */
+               { 0x778,  0x70 },
+               {     0,     0 }
+       };
+       __be32 buf[0x350 / 4];
+       u8 *data = (u8 *)buf;
+       int i, rc;
+       u32 magic, crc;
+
+       if (BP_NOMCP(bp))
+               return 0;
+
+       rc = bnx2x_nvram_read(bp, 0, data, 4);
+       if (rc) {
+               DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
+               goto test_nvram_exit;
+       }
+
+       magic = be32_to_cpu(buf[0]);
+       if (magic != 0x669955aa) {
+               DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
+               rc = -ENODEV;
+               goto test_nvram_exit;
+       }
+
+       for (i = 0; nvram_tbl[i].size; i++) {
+
+               rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
+                                     nvram_tbl[i].size);
+               if (rc) {
+                       DP(NETIF_MSG_PROBE,
+                          "nvram_tbl[%d] read data (rc %d)\n", i, rc);
+                       goto test_nvram_exit;
+               }
+
+               crc = ether_crc_le(nvram_tbl[i].size, data);
+               if (crc != CRC32_RESIDUAL) {
+                       DP(NETIF_MSG_PROBE,
+                          "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
+                       rc = -ENODEV;
+                       goto test_nvram_exit;
+               }
+       }
+
+test_nvram_exit:
+       return rc;
+}
+
+static int bnx2x_test_intr(struct bnx2x *bp)
+{
+       struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
+       int i, rc;
+
+       if (!netif_running(bp->dev))
+               return -ENODEV;
+
+       config->hdr.length = 0;
+       if (CHIP_IS_E1(bp))
+               /* use last unicast entries */
+               config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
+       else
+               config->hdr.offset = BP_FUNC(bp);
+       config->hdr.client_id = bp->fp->cl_id;
+       config->hdr.reserved1 = 0;
+
+       bp->set_mac_pending++;
+       smp_wmb();
+       rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+                          U64_HI(bnx2x_sp_mapping(bp, mac_config)),
+                          U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
+       if (rc == 0) {
+               for (i = 0; i < 10; i++) {
+                       if (!bp->set_mac_pending)
+                               break;
+                       smp_rmb();
+                       msleep_interruptible(10);
+               }
+               if (i == 10)
+                       rc = -ENODEV;
+       }
+
+       return rc;
+}
+
+static void bnx2x_self_test(struct net_device *dev,
+                           struct ethtool_test *etest, u64 *buf)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+               printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+               etest->flags |= ETH_TEST_FL_FAILED;
+               return;
+       }
+
+       memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
+
+       if (!netif_running(dev))
+               return;
+
+       /* offline tests are not supported in MF mode */
+       if (IS_E1HMF(bp))
+               etest->flags &= ~ETH_TEST_FL_OFFLINE;
+
+       if (etest->flags & ETH_TEST_FL_OFFLINE) {
+               int port = BP_PORT(bp);
+               u32 val;
+               u8 link_up;
+
+               /* save current value of input enable for TX port IF */
+               val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
+               /* disable input for TX port IF */
+               REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
+
+               link_up = (bnx2x_link_test(bp) == 0);
+               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+               bnx2x_nic_load(bp, LOAD_DIAG);
+               /* wait until link state is restored */
+               bnx2x_wait_for_link(bp, link_up);
+
+               if (bnx2x_test_registers(bp) != 0) {
+                       buf[0] = 1;
+                       etest->flags |= ETH_TEST_FL_FAILED;
+               }
+               if (bnx2x_test_memory(bp) != 0) {
+                       buf[1] = 1;
+                       etest->flags |= ETH_TEST_FL_FAILED;
+               }
+               buf[2] = bnx2x_test_loopback(bp, link_up);
+               if (buf[2] != 0)
+                       etest->flags |= ETH_TEST_FL_FAILED;
+
+               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+
+               /* restore input for TX port IF */
+               REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
+
+               bnx2x_nic_load(bp, LOAD_NORMAL);
+               /* wait until link state is restored */
+               bnx2x_wait_for_link(bp, link_up);
+       }
+       if (bnx2x_test_nvram(bp) != 0) {
+               buf[3] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+       if (bnx2x_test_intr(bp) != 0) {
+               buf[4] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+       if (bp->port.pmf)
+               if (bnx2x_link_test(bp) != 0) {
+                       buf[5] = 1;
+                       etest->flags |= ETH_TEST_FL_FAILED;
+               }
+
+#ifdef BNX2X_EXTRA_DEBUG
+       bnx2x_panic_dump(bp);
+#endif
+}
+
+static const struct {
+       long offset;
+       int size;
+       u8 string[ETH_GSTRING_LEN];
+} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
+/* 1 */        { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
+       { Q_STATS_OFFSET32(error_bytes_received_hi),
+                                               8, "[%d]: rx_error_bytes" },
+       { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
+                                               8, "[%d]: rx_ucast_packets" },
+       { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
+                                               8, "[%d]: rx_mcast_packets" },
+       { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
+                                               8, "[%d]: rx_bcast_packets" },
+       { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
+       { Q_STATS_OFFSET32(rx_err_discard_pkt),
+                                        4, "[%d]: rx_phy_ip_err_discards"},
+       { Q_STATS_OFFSET32(rx_skb_alloc_failed),
+                                        4, "[%d]: rx_skb_alloc_discard" },
+       { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
+
+/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi),        8, "[%d]: tx_bytes" },
+       { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+                                               8, "[%d]: tx_ucast_packets" },
+       { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+                                               8, "[%d]: tx_mcast_packets" },
+       { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+                                               8, "[%d]: tx_bcast_packets" }
+};
+
+static const struct {
+       long offset;
+       int size;
+       u32 flags;
+#define STATS_FLAGS_PORT               1
+#define STATS_FLAGS_FUNC               2
+#define STATS_FLAGS_BOTH               (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
+       u8 string[ETH_GSTRING_LEN];
+} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
+/* 1 */        { STATS_OFFSET32(total_bytes_received_hi),
+                               8, STATS_FLAGS_BOTH, "rx_bytes" },
+       { STATS_OFFSET32(error_bytes_received_hi),
+                               8, STATS_FLAGS_BOTH, "rx_error_bytes" },
+       { STATS_OFFSET32(total_unicast_packets_received_hi),
+                               8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
+       { STATS_OFFSET32(total_multicast_packets_received_hi),
+                               8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
+       { STATS_OFFSET32(total_broadcast_packets_received_hi),
+                               8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
+       { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
+                               8, STATS_FLAGS_PORT, "rx_crc_errors" },
+       { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
+                               8, STATS_FLAGS_PORT, "rx_align_errors" },
+       { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
+                               8, STATS_FLAGS_PORT, "rx_undersize_packets" },
+       { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
+                               8, STATS_FLAGS_PORT, "rx_oversize_packets" },
+/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
+                               8, STATS_FLAGS_PORT, "rx_fragments" },
+       { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
+                               8, STATS_FLAGS_PORT, "rx_jabbers" },
+       { STATS_OFFSET32(no_buff_discard_hi),
+                               8, STATS_FLAGS_BOTH, "rx_discards" },
+       { STATS_OFFSET32(mac_filter_discard),
+                               4, STATS_FLAGS_PORT, "rx_filtered_packets" },
+       { STATS_OFFSET32(xxoverflow_discard),
+                               4, STATS_FLAGS_PORT, "rx_fw_discards" },
+       { STATS_OFFSET32(brb_drop_hi),
+                               8, STATS_FLAGS_PORT, "rx_brb_discard" },
+       { STATS_OFFSET32(brb_truncate_hi),
+                               8, STATS_FLAGS_PORT, "rx_brb_truncate" },
+       { STATS_OFFSET32(pause_frames_received_hi),
+                               8, STATS_FLAGS_PORT, "rx_pause_frames" },
+       { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
+                               8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
+       { STATS_OFFSET32(nig_timer_max),
+                       4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
+/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
+                               4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
+       { STATS_OFFSET32(rx_skb_alloc_failed),
+                               4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
+       { STATS_OFFSET32(hw_csum_err),
+                               4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
+
+       { STATS_OFFSET32(total_bytes_transmitted_hi),
+                               8, STATS_FLAGS_BOTH, "tx_bytes" },
+       { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
+                               8, STATS_FLAGS_PORT, "tx_error_bytes" },
+       { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+                               8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
+       { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+                               8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
+       { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+                               8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
+       { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
+                               8, STATS_FLAGS_PORT, "tx_mac_errors" },
+       { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
+                               8, STATS_FLAGS_PORT, "tx_carrier_errors" },
+/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
+                               8, STATS_FLAGS_PORT, "tx_single_collisions" },
+       { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
+                               8, STATS_FLAGS_PORT, "tx_multi_collisions" },
+       { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
+                               8, STATS_FLAGS_PORT, "tx_deferred" },
+       { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
+                               8, STATS_FLAGS_PORT, "tx_excess_collisions" },
+       { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
+                               8, STATS_FLAGS_PORT, "tx_late_collisions" },
+       { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
+                               8, STATS_FLAGS_PORT, "tx_total_collisions" },
+       { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
+                               8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
+       { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
+                       8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
+       { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
+                       8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
+       { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
+                       8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
+/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
+                       8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
+       { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
+                       8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
+       { STATS_OFFSET32(etherstatspktsover1522octets_hi),
+                       8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
+       { STATS_OFFSET32(pause_frames_sent_hi),
+                               8, STATS_FLAGS_PORT, "tx_pause_frames" }
+};
+
+#define IS_PORT_STAT(i) \
+       ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
+#define IS_FUNC_STAT(i)                (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
+#define IS_E1HMF_MODE_STAT(bp) \
+                       (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
+
+static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       int i, num_stats;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               if (is_multi(bp)) {
+                       num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
+                       if (!IS_E1HMF_MODE_STAT(bp))
+                               num_stats += BNX2X_NUM_STATS;
+               } else {
+                       if (IS_E1HMF_MODE_STAT(bp)) {
+                               num_stats = 0;
+                               for (i = 0; i < BNX2X_NUM_STATS; i++)
+                                       if (IS_FUNC_STAT(i))
+                                               num_stats++;
+                       } else
+                               num_stats = BNX2X_NUM_STATS;
+               }
+               return num_stats;
+
+       case ETH_SS_TEST:
+               return BNX2X_NUM_TESTS;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       int i, j, k;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               if (is_multi(bp)) {
+                       k = 0;
+                       for_each_queue(bp, i) {
+                               for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
+                                       sprintf(buf + (k + j)*ETH_GSTRING_LEN,
+                                               bnx2x_q_stats_arr[j].string, i);
+                               k += BNX2X_NUM_Q_STATS;
+                       }
+                       if (IS_E1HMF_MODE_STAT(bp))
+                               break;
+                       for (j = 0; j < BNX2X_NUM_STATS; j++)
+                               strcpy(buf + (k + j)*ETH_GSTRING_LEN,
+                                      bnx2x_stats_arr[j].string);
+               } else {
+                       for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+                               if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
+                                       continue;
+                               strcpy(buf + j*ETH_GSTRING_LEN,
+                                      bnx2x_stats_arr[i].string);
+                               j++;
+                       }
+               }
+               break;
+
+       case ETH_SS_TEST:
+               memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
+               break;
+       }
+}
+
+static void bnx2x_get_ethtool_stats(struct net_device *dev,
+                                   struct ethtool_stats *stats, u64 *buf)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       u32 *hw_stats, *offset;
+       int i, j, k;
+
+       if (is_multi(bp)) {
+               k = 0;
+               for_each_queue(bp, i) {
+                       hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
+                       for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
+                               if (bnx2x_q_stats_arr[j].size == 0) {
+                                       /* skip this counter */
+                                       buf[k + j] = 0;
+                                       continue;
+                               }
+                               offset = (hw_stats +
+                                         bnx2x_q_stats_arr[j].offset);
+                               if (bnx2x_q_stats_arr[j].size == 4) {
+                                       /* 4-byte counter */
+                                       buf[k + j] = (u64) *offset;
+                                       continue;
+                               }
+                               /* 8-byte counter */
+                               buf[k + j] = HILO_U64(*offset, *(offset + 1));
+                       }
+                       k += BNX2X_NUM_Q_STATS;
+               }
+               if (IS_E1HMF_MODE_STAT(bp))
+                       return;
+               hw_stats = (u32 *)&bp->eth_stats;
+               for (j = 0; j < BNX2X_NUM_STATS; j++) {
+                       if (bnx2x_stats_arr[j].size == 0) {
+                               /* skip this counter */
+                               buf[k + j] = 0;
+                               continue;
+                       }
+                       offset = (hw_stats + bnx2x_stats_arr[j].offset);
+                       if (bnx2x_stats_arr[j].size == 4) {
+                               /* 4-byte counter */
+                               buf[k + j] = (u64) *offset;
+                               continue;
+                       }
+                       /* 8-byte counter */
+                       buf[k + j] = HILO_U64(*offset, *(offset + 1));
+               }
+       } else {
+               hw_stats = (u32 *)&bp->eth_stats;
+               for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+                       if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
+                               continue;
+                       if (bnx2x_stats_arr[i].size == 0) {
+                               /* skip this counter */
+                               buf[j] = 0;
+                               j++;
+                               continue;
+                       }
+                       offset = (hw_stats + bnx2x_stats_arr[i].offset);
+                       if (bnx2x_stats_arr[i].size == 4) {
+                               /* 4-byte counter */
+                               buf[j] = (u64) *offset;
+                               j++;
+                               continue;
+                       }
+                       /* 8-byte counter */
+                       buf[j] = HILO_U64(*offset, *(offset + 1));
+                       j++;
+               }
+       }
+}
+
+static int bnx2x_phys_id(struct net_device *dev, u32 data)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       int i;
+
+       if (!netif_running(dev))
+               return 0;
+
+       if (!bp->port.pmf)
+               return 0;
+
+       if (data == 0)
+               data = 2;
+
+       for (i = 0; i < (data * 2); i++) {
+               if ((i % 2) == 0)
+                       bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
+                                     SPEED_1000);
+               else
+                       bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
+
+               msleep_interruptible(500);
+               if (signal_pending(current))
+                       break;
+       }
+
+       if (bp->link_vars.link_up)
+               bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
+                             bp->link_vars.line_speed);
+
+       return 0;
+}
+
+static const struct ethtool_ops bnx2x_ethtool_ops = {
+       .get_settings           = bnx2x_get_settings,
+       .set_settings           = bnx2x_set_settings,
+       .get_drvinfo            = bnx2x_get_drvinfo,
+       .get_regs_len           = bnx2x_get_regs_len,
+       .get_regs               = bnx2x_get_regs,
+       .get_wol                = bnx2x_get_wol,
+       .set_wol                = bnx2x_set_wol,
+       .get_msglevel           = bnx2x_get_msglevel,
+       .set_msglevel           = bnx2x_set_msglevel,
+       .nway_reset             = bnx2x_nway_reset,
+       .get_link               = bnx2x_get_link,
+       .get_eeprom_len         = bnx2x_get_eeprom_len,
+       .get_eeprom             = bnx2x_get_eeprom,
+       .set_eeprom             = bnx2x_set_eeprom,
+       .get_coalesce           = bnx2x_get_coalesce,
+       .set_coalesce           = bnx2x_set_coalesce,
+       .get_ringparam          = bnx2x_get_ringparam,
+       .set_ringparam          = bnx2x_set_ringparam,
+       .get_pauseparam         = bnx2x_get_pauseparam,
+       .set_pauseparam         = bnx2x_set_pauseparam,
+       .get_rx_csum            = bnx2x_get_rx_csum,
+       .set_rx_csum            = bnx2x_set_rx_csum,
+       .get_tx_csum            = ethtool_op_get_tx_csum,
+       .set_tx_csum            = ethtool_op_set_tx_hw_csum,
+       .set_flags              = bnx2x_set_flags,
+       .get_flags              = ethtool_op_get_flags,
+       .get_sg                 = ethtool_op_get_sg,
+       .set_sg                 = ethtool_op_set_sg,
+       .get_tso                = ethtool_op_get_tso,
+       .set_tso                = bnx2x_set_tso,
+       .self_test              = bnx2x_self_test,
+       .get_sset_count         = bnx2x_get_sset_count,
+       .get_strings            = bnx2x_get_strings,
+       .phys_id                = bnx2x_phys_id,
+       .get_ethtool_stats      = bnx2x_get_ethtool_stats,
+};
+
+void bnx2x_set_ethtool_ops(struct net_device *netdev)
+{
+       SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+}
similarity index 56%
rename from drivers/net/bnx2x_main.c
rename to drivers/net/bnx2x/bnx2x_main.c
index 51b788339c903fda0c434f418db564cbb272b341..b4ec2b02a465cf7d822f23700c119d6e93a2121b 100644 (file)
 #include <linux/io.h>
 #include <linux/stringify.h>
 
-
+#define BNX2X_MAIN
 #include "bnx2x.h"
 #include "bnx2x_init.h"
 #include "bnx2x_init_ops.h"
-#include "bnx2x_dump.h"
+#include "bnx2x_cmn.h"
 
-#define DRV_MODULE_VERSION     "1.52.53-1"
-#define DRV_MODULE_RELDATE     "2010/18/04"
-#define BNX2X_BC_VER           0x040200
 
 #include <linux/firmware.h>
 #include "bnx2x_fw_file_hdr.h"
@@ -121,8 +118,6 @@ static int debug;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, " Default debug msglevel");
 
-static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
-
 static struct workqueue_struct *bnx2x_wq;
 
 enum bnx2x_board_type {
@@ -177,7 +172,7 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
        return val;
 }
 
-static const u32 dmae_reg_go_c[] = {
+const u32 dmae_reg_go_c[] = {
        DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
        DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
        DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
@@ -185,8 +180,7 @@ static const u32 dmae_reg_go_c[] = {
 };
 
 /* copy command into DMAE command memory and set DMAE command go */
-static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
-                           int idx)
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
 {
        u32 cmd_offset;
        int i;
@@ -541,7 +535,7 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
        pr_err("end of fw dump\n");
 }
 
-static void bnx2x_panic_dump(struct bnx2x *bp)
+void bnx2x_panic_dump(struct bnx2x *bp)
 {
        int i;
        u16 j, start, end;
@@ -654,7 +648,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
        BNX2X_ERR("end crash dump -----------------\n");
 }
 
-static void bnx2x_int_enable(struct bnx2x *bp)
+void bnx2x_int_enable(struct bnx2x *bp)
 {
        int port = BP_PORT(bp);
        u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -736,7 +730,7 @@ static void bnx2x_int_disable(struct bnx2x *bp)
                BNX2X_ERR("BUG! proper val not read from IGU!\n");
 }
 
-static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
 {
        int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
        int i, offset;
@@ -806,235 +800,12 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
        return false;
 }
 
-static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
-                               u8 storm, u16 index, u8 op, u8 update)
-{
-       u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
-                      COMMAND_REG_INT_ACK);
-       struct igu_ack_register igu_ack;
-
-       igu_ack.status_block_index = index;
-       igu_ack.sb_id_and_flags =
-                       ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
-                        (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
-                        (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
-                        (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
-
-       DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
-          (*(u32 *)&igu_ack), hc_addr);
-       REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
-
-       /* Make sure that ACK is written */
-       mmiowb();
-       barrier();
-}
-
-static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
-{
-       struct host_status_block *fpsb = fp->status_blk;
-
-       barrier(); /* status block is written to by the chip */
-       fp->fp_c_idx = fpsb->c_status_block.status_block_index;
-       fp->fp_u_idx = fpsb->u_status_block.status_block_index;
-}
-
-static u16 bnx2x_ack_int(struct bnx2x *bp)
-{
-       u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
-                      COMMAND_REG_SIMD_MASK);
-       u32 result = REG_RD(bp, hc_addr);
-
-       DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
-          result, hc_addr);
-
-       return result;
-}
-
-
-/*
- * fast path service functions
- */
-
-static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
-{
-       /* Tell compiler that consumer and producer can change */
-       barrier();
-       return (fp->tx_pkt_prod != fp->tx_pkt_cons);
-}
-
-/* free skb in the packet ring at pos idx
- * return idx of last bd freed
- */
-static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-                            u16 idx)
-{
-       struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
-       struct eth_tx_start_bd *tx_start_bd;
-       struct eth_tx_bd *tx_data_bd;
-       struct sk_buff *skb = tx_buf->skb;
-       u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
-       int nbd;
-
-       /* prefetch skb end pointer to speedup dev_kfree_skb() */
-       prefetch(&skb->end);
-
-       DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
-          idx, tx_buf, skb);
-
-       /* unmap first bd */
-       DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
-       tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
-       dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
-                        BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
-
-       nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
-#ifdef BNX2X_STOP_ON_ERROR
-       if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
-               BNX2X_ERR("BAD nbd!\n");
-               bnx2x_panic();
-       }
-#endif
-       new_cons = nbd + tx_buf->first_bd;
-
-       /* Get the next bd */
-       bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
-
-       /* Skip a parse bd... */
-       --nbd;
-       bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
-
-       /* ...and the TSO split header bd since they have no mapping */
-       if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
-               --nbd;
-               bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
-       }
-
-       /* now free frags */
-       while (nbd > 0) {
-
-               DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
-               tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
-               dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
-                              BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
-               if (--nbd)
-                       bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
-       }
-
-       /* release skb */
-       WARN_ON(!skb);
-       dev_kfree_skb(skb);
-       tx_buf->first_bd = 0;
-       tx_buf->skb = NULL;
-
-       return new_cons;
-}
-
-static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
-{
-       s16 used;
-       u16 prod;
-       u16 cons;
-
-       prod = fp->tx_bd_prod;
-       cons = fp->tx_bd_cons;
-
-       /* NUM_TX_RINGS = number of "next-page" entries
-          It will be used as a threshold */
-       used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
-
-#ifdef BNX2X_STOP_ON_ERROR
-       WARN_ON(used < 0);
-       WARN_ON(used > fp->bp->tx_ring_size);
-       WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
-#endif
-
-       return (s16)(fp->bp->tx_ring_size) - used;
-}
-
-static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
-{
-       u16 hw_cons;
-
-       /* Tell compiler that status block fields can change */
-       barrier();
-       hw_cons = le16_to_cpu(*fp->tx_cons_sb);
-       return hw_cons != fp->tx_pkt_cons;
-}
-
-static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
-{
-       struct bnx2x *bp = fp->bp;
-       struct netdev_queue *txq;
-       u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
-
-#ifdef BNX2X_STOP_ON_ERROR
-       if (unlikely(bp->panic))
-               return -1;
-#endif
-
-       txq = netdev_get_tx_queue(bp->dev, fp->index);
-       hw_cons = le16_to_cpu(*fp->tx_cons_sb);
-       sw_cons = fp->tx_pkt_cons;
-
-       while (sw_cons != hw_cons) {
-               u16 pkt_cons;
-
-               pkt_cons = TX_BD(sw_cons);
-
-               /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
-
-               DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
-                  hw_cons, sw_cons, pkt_cons);
-
-/*             if (NEXT_TX_IDX(sw_cons) != hw_cons) {
-                       rmb();
-                       prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
-               }
-*/
-               bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
-               sw_cons++;
-       }
-
-       fp->tx_pkt_cons = sw_cons;
-       fp->tx_bd_cons = bd_cons;
-
-       /* Need to make the tx_bd_cons update visible to start_xmit()
-        * before checking for netif_tx_queue_stopped().  Without the
-        * memory barrier, there is a small possibility that
-        * start_xmit() will miss it and cause the queue to be stopped
-        * forever.
-        */
-       smp_mb();
-
-       /* TBD need a thresh? */
-       if (unlikely(netif_tx_queue_stopped(txq))) {
-               /* Taking tx_lock() is needed to prevent reenabling the queue
-                * while it's empty. This could have happen if rx_action() gets
-                * suspended in bnx2x_tx_int() after the condition before
-                * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
-                *
-                * stops the queue->sees fresh tx_bd_cons->releases the queue->
-                * sends some packets consuming the whole queue again->
-                * stops the queue
-                */
-
-               __netif_tx_lock(txq, smp_processor_id());
-
-               if ((netif_tx_queue_stopped(txq)) &&
-                   (bp->state == BNX2X_STATE_OPEN) &&
-                   (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
-                       netif_tx_wake_queue(txq);
-
-               __netif_tx_unlock(txq);
-       }
-       return 0;
-}
 
 #ifdef BCM_CNIC
 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
 #endif
 
-static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
+void bnx2x_sp_event(struct bnx2x_fastpath *fp,
                           union eth_rx_cqe *rr_cqe)
 {
        struct bnx2x *bp = fp->bp;
@@ -1118,11419 +889,5906 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
        mb(); /* force bnx2x_wait_ramrod() to see the change */
 }
 
-static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
-                                    struct bnx2x_fastpath *fp, u16 index)
-{
-       struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
-       struct page *page = sw_buf->page;
-       struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
-
-       /* Skip "next page" elements */
-       if (!page)
-               return;
-
-       dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
-                      SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
-       __free_pages(page, PAGES_PER_SGE_SHIFT);
-
-       sw_buf->page = NULL;
-       sge->addr_hi = 0;
-       sge->addr_lo = 0;
-}
-
-static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
-                                          struct bnx2x_fastpath *fp, int last)
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
 {
+       struct bnx2x *bp = netdev_priv(dev_instance);
+       u16 status = bnx2x_ack_int(bp);
+       u16 mask;
        int i;
 
-       for (i = 0; i < last; i++)
-               bnx2x_free_rx_sge(bp, fp, i);
-}
-
-static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
-                                    struct bnx2x_fastpath *fp, u16 index)
-{
-       struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
-       struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
-       struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
-       dma_addr_t mapping;
-
-       if (unlikely(page == NULL))
-               return -ENOMEM;
+       /* Return here if interrupt is shared and it's not for us */
+       if (unlikely(status == 0)) {
+               DP(NETIF_MSG_INTR, "not our interrupt!\n");
+               return IRQ_NONE;
+       }
+       DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
 
-       mapping = dma_map_page(&bp->pdev->dev, page, 0,
-                              SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
-               __free_pages(page, PAGES_PER_SGE_SHIFT);
-               return -ENOMEM;
+       /* Return here if interrupt is disabled */
+       if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+               DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
+               return IRQ_HANDLED;
        }
 
-       sw_buf->page = page;
-       dma_unmap_addr_set(sw_buf, mapping, mapping);
+#ifdef BNX2X_STOP_ON_ERROR
+       if (unlikely(bp->panic))
+               return IRQ_HANDLED;
+#endif
 
-       sge->addr_hi = cpu_to_le32(U64_HI(mapping));
-       sge->addr_lo = cpu_to_le32(U64_LO(mapping));
+       for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
 
-       return 0;
-}
+               mask = 0x2 << fp->sb_id;
+               if (status & mask) {
+                       /* Handle Rx and Tx according to SB id */
+                       prefetch(fp->rx_cons_sb);
+                       prefetch(&fp->status_blk->u_status_block.
+                                               status_block_index);
+                       prefetch(fp->tx_cons_sb);
+                       prefetch(&fp->status_blk->c_status_block.
+                                               status_block_index);
+                       napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+                       status &= ~mask;
+               }
+       }
 
-static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
-                                    struct bnx2x_fastpath *fp, u16 index)
-{
-       struct sk_buff *skb;
-       struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
-       struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
-       dma_addr_t mapping;
+#ifdef BCM_CNIC
+       mask = 0x2 << CNIC_SB_ID(bp);
+       if (status & (mask | 0x1)) {
+               struct cnic_ops *c_ops = NULL;
 
-       skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
-       if (unlikely(skb == NULL))
-               return -ENOMEM;
+               rcu_read_lock();
+               c_ops = rcu_dereference(bp->cnic_ops);
+               if (c_ops)
+                       c_ops->cnic_handler(bp->cnic_data, NULL);
+               rcu_read_unlock();
 
-       mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
-                                DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
-               dev_kfree_skb(skb);
-               return -ENOMEM;
+               status &= ~mask;
        }
+#endif
 
-       rx_buf->skb = skb;
-       dma_unmap_addr_set(rx_buf, mapping, mapping);
+       if (unlikely(status & 0x1)) {
+               queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
 
-       rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
-       rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+               status &= ~0x1;
+               if (!status)
+                       return IRQ_HANDLED;
+       }
 
-       return 0;
-}
+       if (unlikely(status))
+               DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+                  status);
 
-/* note that we are not allocating a new skb,
- * we are just moving one from cons to prod
- * we are not creating a new mapping,
- * so there is no need to check for dma_mapping_error().
- */
-static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
-                              struct sk_buff *skb, u16 cons, u16 prod)
-{
-       struct bnx2x *bp = fp->bp;
-       struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
-       struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
-       struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
-       struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+       return IRQ_HANDLED;
+}
 
-       dma_sync_single_for_device(&bp->pdev->dev,
-                                  dma_unmap_addr(cons_rx_buf, mapping),
-                                  RX_COPY_THRESH, DMA_FROM_DEVICE);
+/* end of fast path */
 
-       prod_rx_buf->skb = cons_rx_buf->skb;
-       dma_unmap_addr_set(prod_rx_buf, mapping,
-                          dma_unmap_addr(cons_rx_buf, mapping));
-       *prod_bd = *cons_bd;
-}
 
-static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
-                                            u16 idx)
-{
-       u16 last_max = fp->last_max_sge;
+/* Link */
 
-       if (SUB_S16(idx, last_max) > 0)
-               fp->last_max_sge = idx;
-}
+/*
+ * General service functions
+ */
 
-static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
 {
-       int i, j;
-
-       for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
-               int idx = RX_SGE_CNT * i - 1;
+       u32 lock_status;
+       u32 resource_bit = (1 << resource);
+       int func = BP_FUNC(bp);
+       u32 hw_lock_control_reg;
+       int cnt;
 
-               for (j = 0; j < 2; j++) {
-                       SGE_MASK_CLEAR_BIT(fp, idx);
-                       idx--;
-               }
+       /* Validating that the resource is within range */
+       if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+               DP(NETIF_MSG_HW,
+                  "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+                  resource, HW_LOCK_MAX_RESOURCE_VALUE);
+               return -EINVAL;
        }
-}
-
-static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
-                                 struct eth_fast_path_rx_cqe *fp_cqe)
-{
-       struct bnx2x *bp = fp->bp;
-       u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
-                                    le16_to_cpu(fp_cqe->len_on_bd)) >>
-                     SGE_PAGE_SHIFT;
-       u16 last_max, last_elem, first_elem;
-       u16 delta = 0;
-       u16 i;
 
-       if (!sge_len)
-               return;
+       if (func <= 5) {
+               hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+       } else {
+               hw_lock_control_reg =
+                               (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+       }
 
-       /* First mark all used pages */
-       for (i = 0; i < sge_len; i++)
-               SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
+       /* Validating that the resource is not already taken */
+       lock_status = REG_RD(bp, hw_lock_control_reg);
+       if (lock_status & resource_bit) {
+               DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
+                  lock_status, resource_bit);
+               return -EEXIST;
+       }
 
-       DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
-          sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
+       /* Try for 5 second every 5ms */
+       for (cnt = 0; cnt < 1000; cnt++) {
+               /* Try to acquire the lock */
+               REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+               lock_status = REG_RD(bp, hw_lock_control_reg);
+               if (lock_status & resource_bit)
+                       return 0;
 
-       /* Here we assume that the last SGE index is the biggest */
-       prefetch((void *)(fp->sge_mask));
-       bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
+               msleep(5);
+       }
+       DP(NETIF_MSG_HW, "Timeout\n");
+       return -EAGAIN;
+}
 
-       last_max = RX_SGE(fp->last_max_sge);
-       last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
-       first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
+{
+       u32 lock_status;
+       u32 resource_bit = (1 << resource);
+       int func = BP_FUNC(bp);
+       u32 hw_lock_control_reg;
 
-       /* If ring is not full */
-       if (last_elem + 1 != first_elem)
-               last_elem++;
+       DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
 
-       /* Now update the prod */
-       for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
-               if (likely(fp->sge_mask[i]))
-                       break;
+       /* Validating that the resource is within range */
+       if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+               DP(NETIF_MSG_HW,
+                  "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+                  resource, HW_LOCK_MAX_RESOURCE_VALUE);
+               return -EINVAL;
+       }
 
-               fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
-               delta += RX_SGE_MASK_ELEM_SZ;
+       if (func <= 5) {
+               hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+       } else {
+               hw_lock_control_reg =
+                               (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
        }
 
-       if (delta > 0) {
-               fp->rx_sge_prod += delta;
-               /* clear page-end entries */
-               bnx2x_clear_sge_mask_next_elems(fp);
+       /* Validating that the resource is currently taken */
+       lock_status = REG_RD(bp, hw_lock_control_reg);
+       if (!(lock_status & resource_bit)) {
+               DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
+                  lock_status, resource_bit);
+               return -EFAULT;
        }
 
-       DP(NETIF_MSG_RX_STATUS,
-          "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
-          fp->last_max_sge, fp->rx_sge_prod);
+       REG_WR(bp, hw_lock_control_reg, resource_bit);
+       return 0;
 }
 
-static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
-{
-       /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
-       memset(fp->sge_mask, 0xff,
-              (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
-
-       /* Clear the two last indices in the page to 1:
-          these are the indices that correspond to the "next" element,
-          hence will never be indicated and should be removed from
-          the calculations. */
-       bnx2x_clear_sge_mask_next_elems(fp);
-}
 
-static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
-                           struct sk_buff *skb, u16 cons, u16 prod)
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
 {
-       struct bnx2x *bp = fp->bp;
-       struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
-       struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
-       struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
-       dma_addr_t mapping;
-
-       /* move empty skb from pool to prod and map it */
-       prod_rx_buf->skb = fp->tpa_pool[queue].skb;
-       mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
-                                bp->rx_buf_size, DMA_FROM_DEVICE);
-       dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
-
-       /* move partial skb from cons to pool (don't unmap yet) */
-       fp->tpa_pool[queue] = *cons_rx_buf;
-
-       /* mark bin state as start - print error if current state != stop */
-       if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
-               BNX2X_ERR("start of bin not in stop [%d]\n", queue);
-
-       fp->tpa_state[queue] = BNX2X_TPA_START;
-
-       /* point prod_bd to new skb */
-       prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
-       prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-
-#ifdef BNX2X_STOP_ON_ERROR
-       fp->tpa_queue_used |= (1 << queue);
-#ifdef _ASM_GENERIC_INT_L64_H
-       DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
-#else
-       DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
-#endif
-          fp->tpa_queue_used);
-#endif
-}
-
-static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-                              struct sk_buff *skb,
-                              struct eth_fast_path_rx_cqe *fp_cqe,
-                              u16 cqe_idx)
-{
-       struct sw_rx_page *rx_pg, old_rx_pg;
-       u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
-       u32 i, frag_len, frag_size, pages;
-       int err;
-       int j;
-
-       frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
-       pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
-
-       /* This is needed in order to enable forwarding support */
-       if (frag_size)
-               skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
-                                              max(frag_size, (u32)len_on_bd));
+       /* The GPIO should be swapped if swap register is set and active */
+       int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+                        REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+       int gpio_shift = gpio_num +
+                       (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+       u32 gpio_mask = (1 << gpio_shift);
+       u32 gpio_reg;
+       int value;
 
-#ifdef BNX2X_STOP_ON_ERROR
-       if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
-               BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
-                         pages, cqe_idx);
-               BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
-                         fp_cqe->pkt_len, len_on_bd);
-               bnx2x_panic();
+       if (gpio_num > MISC_REGISTERS_GPIO_3) {
+               BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
                return -EINVAL;
        }
-#endif
-
-       /* Run through the SGL and compose the fragmented skb */
-       for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
-               u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
-
-               /* FW gives the indices of the SGE as if the ring is an array
-                  (meaning that "next" element will consume 2 indices) */
-               frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
-               rx_pg = &fp->rx_page_ring[sge_idx];
-               old_rx_pg = *rx_pg;
-
-               /* If we fail to allocate a substitute page, we simply stop
-                  where we are and drop the whole packet */
-               err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
-               if (unlikely(err)) {
-                       fp->eth_q_stats.rx_skb_alloc_failed++;
-                       return err;
-               }
 
-               /* Unmap the page as we r going to pass it to the stack */
-               dma_unmap_page(&bp->pdev->dev,
-                              dma_unmap_addr(&old_rx_pg, mapping),
-                              SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
-
-               /* Add one frag and update the appropriate fields in the skb */
-               skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+       /* read GPIO value */
+       gpio_reg = REG_RD(bp, MISC_REG_GPIO);
 
-               skb->data_len += frag_len;
-               skb->truesize += frag_len;
-               skb->len += frag_len;
+       /* get the requested pin value */
+       if ((gpio_reg & gpio_mask) == gpio_mask)
+               value = 1;
+       else
+               value = 0;
 
-               frag_size -= frag_len;
-       }
+       DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
 
-       return 0;
+       return value;
 }
 
-static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-                          u16 queue, int pad, int len, union eth_rx_cqe *cqe,
-                          u16 cqe_idx)
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
 {
-       struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
-       struct sk_buff *skb = rx_buf->skb;
-       /* alloc new skb */
-       struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
-
-       /* Unmap skb in the pool anyway, as we are going to change
-          pool entry status to BNX2X_TPA_STOP even if new skb allocation
-          fails. */
-       dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
-                        bp->rx_buf_size, DMA_FROM_DEVICE);
-
-       if (likely(new_skb)) {
-               /* fix ip xsum and give it to the stack */
-               /* (no need to map the new skb) */
-#ifdef BCM_VLAN
-               int is_vlan_cqe =
-                       (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
-                        PARSING_FLAGS_VLAN);
-               int is_not_hwaccel_vlan_cqe =
-                       (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
-#endif
-
-               prefetch(skb);
-               prefetch(((char *)(skb)) + 128);
-
-#ifdef BNX2X_STOP_ON_ERROR
-               if (pad + len > bp->rx_buf_size) {
-                       BNX2X_ERR("skb_put is about to fail...  "
-                                 "pad %d  len %d  rx_buf_size %d\n",
-                                 pad, len, bp->rx_buf_size);
-                       bnx2x_panic();
-                       return;
-               }
-#endif
-
-               skb_reserve(skb, pad);
-               skb_put(skb, len);
-
-               skb->protocol = eth_type_trans(skb, bp->dev);
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       /* The GPIO should be swapped if swap register is set and active */
+       int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+                        REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+       int gpio_shift = gpio_num +
+                       (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+       u32 gpio_mask = (1 << gpio_shift);
+       u32 gpio_reg;
 
-               {
-                       struct iphdr *iph;
+       if (gpio_num > MISC_REGISTERS_GPIO_3) {
+               BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+               return -EINVAL;
+       }
 
-                       iph = (struct iphdr *)skb->data;
-#ifdef BCM_VLAN
-                       /* If there is no Rx VLAN offloading -
-                          take VLAN tag into an account */
-                       if (unlikely(is_not_hwaccel_vlan_cqe))
-                               iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
-#endif
-                       iph->check = 0;
-                       iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
-               }
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+       /* read GPIO and mask except the float bits */
+       gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
 
-               if (!bnx2x_fill_frag_skb(bp, fp, skb,
-                                        &cqe->fast_path_cqe, cqe_idx)) {
-#ifdef BCM_VLAN
-                       if ((bp->vlgrp != NULL) && is_vlan_cqe &&
-                           (!is_not_hwaccel_vlan_cqe))
-                               vlan_gro_receive(&fp->napi, bp->vlgrp,
-                                                le16_to_cpu(cqe->fast_path_cqe.
-                                                            vlan_tag), skb);
-                       else
-#endif
-                               napi_gro_receive(&fp->napi, skb);
-               } else {
-                       DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
-                          " - dropping packet!\n");
-                       dev_kfree_skb(skb);
-               }
+       switch (mode) {
+       case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+               DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
+                  gpio_num, gpio_shift);
+               /* clear FLOAT and set CLR */
+               gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+               gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
+               break;
 
+       case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+               DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
+                  gpio_num, gpio_shift);
+               /* clear FLOAT and set SET */
+               gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+               gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
+               break;
 
-               /* put new skb in bin */
-               fp->tpa_pool[queue].skb = new_skb;
+       case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+               DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
+                  gpio_num, gpio_shift);
+               /* set FLOAT */
+               gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+               break;
 
-       } else {
-               /* else drop the packet and keep the buffer in the bin */
-               DP(NETIF_MSG_RX_STATUS,
-                  "Failed to allocate new skb - dropping packet!\n");
-               fp->eth_q_stats.rx_skb_alloc_failed++;
+       default:
+               break;
        }
 
-       fp->tpa_state[queue] = BNX2X_TPA_STOP;
+       REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+       return 0;
 }
 
-static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
-                                       struct bnx2x_fastpath *fp,
-                                       u16 bd_prod, u16 rx_comp_prod,
-                                       u16 rx_sge_prod)
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
 {
-       struct ustorm_eth_rx_producers rx_prods = {0};
-       int i;
+       /* The GPIO should be swapped if swap register is set and active */
+       int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+                        REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+       int gpio_shift = gpio_num +
+                       (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+       u32 gpio_mask = (1 << gpio_shift);
+       u32 gpio_reg;
 
-       /* Update producers */
-       rx_prods.bd_prod = bd_prod;
-       rx_prods.cqe_prod = rx_comp_prod;
-       rx_prods.sge_prod = rx_sge_prod;
+       if (gpio_num > MISC_REGISTERS_GPIO_3) {
+               BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+               return -EINVAL;
+       }
 
-       /*
-        * Make sure that the BD and SGE data is updated before updating the
-        * producers since FW might read the BD/SGE right after the producer
-        * is updated.
-        * This is only applicable for weak-ordered memory model archs such
-        * as IA-64. The following barrier is also mandatory since FW will
-        * assumes BDs must have buffers.
-        */
-       wmb();
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+       /* read GPIO int */
+       gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
 
-       for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
-               REG_WR(bp, BAR_USTRORM_INTMEM +
-                      USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
-                      ((u32 *)&rx_prods)[i]);
+       switch (mode) {
+       case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
+               DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
+                                  "output low\n", gpio_num, gpio_shift);
+               /* clear SET and set CLR */
+               gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+               gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+               break;
 
-       mmiowb(); /* keep prod updates ordered */
+       case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
+               DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
+                                  "output high\n", gpio_num, gpio_shift);
+               /* clear CLR and set SET */
+               gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+               gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+               break;
 
-       DP(NETIF_MSG_RX_STATUS,
-          "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
-          fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
-}
+       default:
+               break;
+       }
 
-/* Set Toeplitz hash value in the skb using the value from the
- * CQE (calculated by HW).
- */
-static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
-                                       struct sk_buff *skb)
-{
-       /* Set Toeplitz hash from CQE */
-       if ((bp->dev->features & NETIF_F_RXHASH) &&
-           (cqe->fast_path_cqe.status_flags &
-            ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
-               skb->rxhash =
-               le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
+       REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+       return 0;
 }
 
-static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
 {
-       struct bnx2x *bp = fp->bp;
-       u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
-       u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
-       int rx_pkt = 0;
-
-#ifdef BNX2X_STOP_ON_ERROR
-       if (unlikely(bp->panic))
-               return 0;
-#endif
-
-       /* CQ "next element" is of the size of the regular element,
-          that's why it's ok here */
-       hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
-       if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
-               hw_comp_cons++;
-
-       bd_cons = fp->rx_bd_cons;
-       bd_prod = fp->rx_bd_prod;
-       bd_prod_fw = bd_prod;
-       sw_comp_cons = fp->rx_comp_cons;
-       sw_comp_prod = fp->rx_comp_prod;
-
-       /* Memory barrier necessary as speculative reads of the rx
-        * buffer can be ahead of the index in the status block
-        */
-       rmb();
+       u32 spio_mask = (1 << spio_num);
+       u32 spio_reg;
 
-       DP(NETIF_MSG_RX_STATUS,
-          "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
-          fp->index, hw_comp_cons, sw_comp_cons);
-
-       while (sw_comp_cons != hw_comp_cons) {
-               struct sw_rx_bd *rx_buf = NULL;
-               struct sk_buff *skb;
-               union eth_rx_cqe *cqe;
-               u8 cqe_fp_flags;
-               u16 len, pad;
-
-               comp_ring_cons = RCQ_BD(sw_comp_cons);
-               bd_prod = RX_BD(bd_prod);
-               bd_cons = RX_BD(bd_cons);
-
-               /* Prefetch the page containing the BD descriptor
-                  at producer's index. It will be needed when new skb is
-                  allocated */
-               prefetch((void *)(PAGE_ALIGN((unsigned long)
-                                            (&fp->rx_desc_ring[bd_prod])) -
-                                 PAGE_SIZE + 1));
-
-               cqe = &fp->rx_comp_ring[comp_ring_cons];
-               cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
-
-               DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
-                  "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
-                  cqe_fp_flags, cqe->fast_path_cqe.status_flags,
-                  le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
-                  le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
-                  le16_to_cpu(cqe->fast_path_cqe.pkt_len));
-
-               /* is this a slowpath msg? */
-               if (unlikely(CQE_TYPE(cqe_fp_flags))) {
-                       bnx2x_sp_event(fp, cqe);
-                       goto next_cqe;
-
-               /* this is an rx packet */
-               } else {
-                       rx_buf = &fp->rx_buf_ring[bd_cons];
-                       skb = rx_buf->skb;
-                       prefetch(skb);
-                       len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
-                       pad = cqe->fast_path_cqe.placement_offset;
-
-                       /* If CQE is marked both TPA_START and TPA_END
-                          it is a non-TPA CQE */
-                       if ((!fp->disable_tpa) &&
-                           (TPA_TYPE(cqe_fp_flags) !=
-                                       (TPA_TYPE_START | TPA_TYPE_END))) {
-                               u16 queue = cqe->fast_path_cqe.queue_index;
-
-                               if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
-                                       DP(NETIF_MSG_RX_STATUS,
-                                          "calling tpa_start on queue %d\n",
-                                          queue);
-
-                                       bnx2x_tpa_start(fp, queue, skb,
-                                                       bd_cons, bd_prod);
-
-                                       /* Set Toeplitz hash for an LRO skb */
-                                       bnx2x_set_skb_rxhash(bp, cqe, skb);
-
-                                       goto next_rx;
-                               }
+       if ((spio_num < MISC_REGISTERS_SPIO_4) ||
+           (spio_num > MISC_REGISTERS_SPIO_7)) {
+               BNX2X_ERR("Invalid SPIO %d\n", spio_num);
+               return -EINVAL;
+       }
 
-                               if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
-                                       DP(NETIF_MSG_RX_STATUS,
-                                          "calling tpa_stop on queue %d\n",
-                                          queue);
-
-                                       if (!BNX2X_RX_SUM_FIX(cqe))
-                                               BNX2X_ERR("STOP on none TCP "
-                                                         "data\n");
-
-                                       /* This is a size of the linear data
-                                          on this skb */
-                                       len = le16_to_cpu(cqe->fast_path_cqe.
-                                                               len_on_bd);
-                                       bnx2x_tpa_stop(bp, fp, queue, pad,
-                                                   len, cqe, comp_ring_cons);
-#ifdef BNX2X_STOP_ON_ERROR
-                                       if (bp->panic)
-                                               return 0;
-#endif
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+       /* read SPIO and mask except the float bits */
+       spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
 
-                                       bnx2x_update_sge_prod(fp,
-                                                       &cqe->fast_path_cqe);
-                                       goto next_cqe;
-                               }
-                       }
+       switch (mode) {
+       case MISC_REGISTERS_SPIO_OUTPUT_LOW:
+               DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
+               /* clear FLOAT and set CLR */
+               spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+               spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
+               break;
 
-                       dma_sync_single_for_device(&bp->pdev->dev,
-                                       dma_unmap_addr(rx_buf, mapping),
-                                                  pad + RX_COPY_THRESH,
-                                                  DMA_FROM_DEVICE);
-                       prefetch(((char *)(skb)) + 128);
-
-                       /* is this an error packet? */
-                       if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
-                               DP(NETIF_MSG_RX_ERR,
-                                  "ERROR  flags %x  rx packet %u\n",
-                                  cqe_fp_flags, sw_comp_cons);
-                               fp->eth_q_stats.rx_err_discard_pkt++;
-                               goto reuse_rx;
-                       }
+       case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
+               DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
+               /* clear FLOAT and set SET */
+               spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+               spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
+               break;
 
-                       /* Since we don't have a jumbo ring
-                        * copy small packets if mtu > 1500
-                        */
-                       if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
-                           (len <= RX_COPY_THRESH)) {
-                               struct sk_buff *new_skb;
-
-                               new_skb = netdev_alloc_skb(bp->dev,
-                                                          len + pad);
-                               if (new_skb == NULL) {
-                                       DP(NETIF_MSG_RX_ERR,
-                                          "ERROR  packet dropped "
-                                          "because of alloc failure\n");
-                                       fp->eth_q_stats.rx_skb_alloc_failed++;
-                                       goto reuse_rx;
-                               }
+       case MISC_REGISTERS_SPIO_INPUT_HI_Z:
+               DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
+               /* set FLOAT */
+               spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+               break;
 
-                               /* aligned copy */
-                               skb_copy_from_linear_data_offset(skb, pad,
-                                                   new_skb->data + pad, len);
-                               skb_reserve(new_skb, pad);
-                               skb_put(new_skb, len);
+       default:
+               break;
+       }
 
-                               bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
+       REG_WR(bp, MISC_REG_SPIO, spio_reg);
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
 
-                               skb = new_skb;
+       return 0;
+}
 
-                       } else
-                       if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
-                               dma_unmap_single(&bp->pdev->dev,
-                                       dma_unmap_addr(rx_buf, mapping),
-                                                bp->rx_buf_size,
-                                                DMA_FROM_DEVICE);
-                               skb_reserve(skb, pad);
-                               skb_put(skb, len);
+void bnx2x_calc_fc_adv(struct bnx2x *bp)
+{
+       switch (bp->link_vars.ieee_fc &
+               MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
+       case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
+               bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
+                                         ADVERTISED_Pause);
+               break;
 
-                       } else {
-                               DP(NETIF_MSG_RX_ERR,
-                                  "ERROR  packet dropped because "
-                                  "of alloc failure\n");
-                               fp->eth_q_stats.rx_skb_alloc_failed++;
-reuse_rx:
-                               bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
-                               goto next_rx;
-                       }
+       case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
+               bp->port.advertising |= (ADVERTISED_Asym_Pause |
+                                        ADVERTISED_Pause);
+               break;
 
-                       skb->protocol = eth_type_trans(skb, bp->dev);
+       case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
+               bp->port.advertising |= ADVERTISED_Asym_Pause;
+               break;
 
-                       /* Set Toeplitz hash for a none-LRO skb */
-                       bnx2x_set_skb_rxhash(bp, cqe, skb);
+       default:
+               bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
+                                         ADVERTISED_Pause);
+               break;
+       }
+}
 
-                       skb->ip_summed = CHECKSUM_NONE;
-                       if (bp->rx_csum) {
-                               if (likely(BNX2X_RX_CSUM_OK(cqe)))
-                                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                               else
-                                       fp->eth_q_stats.hw_csum_err++;
-                       }
-               }
 
-               skb_record_rx_queue(skb, fp->index);
+u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
+{
+       if (!BP_NOMCP(bp)) {
+               u8 rc;
 
-#ifdef BCM_VLAN
-               if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
-                   (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
-                    PARSING_FLAGS_VLAN))
-                       vlan_gro_receive(&fp->napi, bp->vlgrp,
-                               le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
+               /* Initialize link parameters structure variables */
+               /* It is recommended to turn off RX FC for jumbo frames
+                  for better performance */
+               if (bp->dev->mtu > 5000)
+                       bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
                else
-#endif
-                       napi_gro_receive(&fp->napi, skb);
-
+                       bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
 
-next_rx:
-               rx_buf->skb = NULL;
+               bnx2x_acquire_phy_lock(bp);
 
-               bd_cons = NEXT_RX_IDX(bd_cons);
-               bd_prod = NEXT_RX_IDX(bd_prod);
-               bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
-               rx_pkt++;
-next_cqe:
-               sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
-               sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
+               if (load_mode == LOAD_DIAG)
+                       bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
 
-               if (rx_pkt == budget)
-                       break;
-       } /* while */
+               rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
 
-       fp->rx_bd_cons = bd_cons;
-       fp->rx_bd_prod = bd_prod_fw;
-       fp->rx_comp_cons = sw_comp_cons;
-       fp->rx_comp_prod = sw_comp_prod;
+               bnx2x_release_phy_lock(bp);
 
-       /* Update producers */
-       bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
-                            fp->rx_sge_prod);
+               bnx2x_calc_fc_adv(bp);
 
-       fp->rx_pkt += rx_pkt;
-       fp->rx_calls++;
+               if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
+                       bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+                       bnx2x_link_report(bp);
+               }
 
-       return rx_pkt;
+               return rc;
+       }
+       BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+       return -EINVAL;
 }
 
-static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
+void bnx2x_link_set(struct bnx2x *bp)
 {
-       struct bnx2x_fastpath *fp = fp_cookie;
-       struct bnx2x *bp = fp->bp;
-
-       /* Return here if interrupt is disabled */
-       if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-               DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
-               return IRQ_HANDLED;
-       }
-
-       DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
-          fp->index, fp->sb_id);
-       bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
-
-#ifdef BNX2X_STOP_ON_ERROR
-       if (unlikely(bp->panic))
-               return IRQ_HANDLED;
-#endif
-
-       /* Handle Rx and Tx according to MSI-X vector */
-       prefetch(fp->rx_cons_sb);
-       prefetch(fp->tx_cons_sb);
-       prefetch(&fp->status_blk->u_status_block.status_block_index);
-       prefetch(&fp->status_blk->c_status_block.status_block_index);
-       napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+       if (!BP_NOMCP(bp)) {
+               bnx2x_acquire_phy_lock(bp);
+               bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+               bnx2x_release_phy_lock(bp);
 
-       return IRQ_HANDLED;
+               bnx2x_calc_fc_adv(bp);
+       } else
+               BNX2X_ERR("Bootcode is missing - can not set link\n");
 }
 
-static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
+static void bnx2x__link_reset(struct bnx2x *bp)
 {
-       struct bnx2x *bp = netdev_priv(dev_instance);
-       u16 status = bnx2x_ack_int(bp);
-       u16 mask;
-       int i;
-
-       /* Return here if interrupt is shared and it's not for us */
-       if (unlikely(status == 0)) {
-               DP(NETIF_MSG_INTR, "not our interrupt!\n");
-               return IRQ_NONE;
-       }
-       DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
-
-       /* Return here if interrupt is disabled */
-       if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-               DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
-               return IRQ_HANDLED;
-       }
+       if (!BP_NOMCP(bp)) {
+               bnx2x_acquire_phy_lock(bp);
+               bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+               bnx2x_release_phy_lock(bp);
+       } else
+               BNX2X_ERR("Bootcode is missing - can not reset link\n");
+}
 
-#ifdef BNX2X_STOP_ON_ERROR
-       if (unlikely(bp->panic))
-               return IRQ_HANDLED;
-#endif
+u8 bnx2x_link_test(struct bnx2x *bp)
+{
+       u8 rc = 0;
 
-       for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
-               struct bnx2x_fastpath *fp = &bp->fp[i];
+       if (!BP_NOMCP(bp)) {
+               bnx2x_acquire_phy_lock(bp);
+               rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
+               bnx2x_release_phy_lock(bp);
+       } else
+               BNX2X_ERR("Bootcode is missing - can not test link\n");
 
-               mask = 0x2 << fp->sb_id;
-               if (status & mask) {
-                       /* Handle Rx and Tx according to SB id */
-                       prefetch(fp->rx_cons_sb);
-                       prefetch(&fp->status_blk->u_status_block.
-                                               status_block_index);
-                       prefetch(fp->tx_cons_sb);
-                       prefetch(&fp->status_blk->c_status_block.
-                                               status_block_index);
-                       napi_schedule(&bnx2x_fp(bp, fp->index, napi));
-                       status &= ~mask;
-               }
-       }
+       return rc;
+}
 
-#ifdef BCM_CNIC
-       mask = 0x2 << CNIC_SB_ID(bp);
-       if (status & (mask | 0x1)) {
-               struct cnic_ops *c_ops = NULL;
+static void bnx2x_init_port_minmax(struct bnx2x *bp)
+{
+       u32 r_param = bp->link_vars.line_speed / 8;
+       u32 fair_periodic_timeout_usec;
+       u32 t_fair;
 
-               rcu_read_lock();
-               c_ops = rcu_dereference(bp->cnic_ops);
-               if (c_ops)
-                       c_ops->cnic_handler(bp->cnic_data, NULL);
-               rcu_read_unlock();
+       memset(&(bp->cmng.rs_vars), 0,
+              sizeof(struct rate_shaping_vars_per_port));
+       memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
 
-               status &= ~mask;
-       }
-#endif
+       /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
+       bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
 
-       if (unlikely(status & 0x1)) {
-               queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+       /* this is the threshold below which no timer arming will occur
+          1.25 coefficient is for the threshold to be a little bigger
+          than the real time, to compensate for timer in-accuracy */
+       bp->cmng.rs_vars.rs_threshold =
+                               (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
 
-               status &= ~0x1;
-               if (!status)
-                       return IRQ_HANDLED;
-       }
+       /* resolution of fairness timer */
+       fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
+       /* for 10G it is 1000usec. for 1G it is 10000usec. */
+       t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
 
-       if (unlikely(status))
-               DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
-                  status);
+       /* this is the threshold below which we won't arm the timer anymore */
+       bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
 
-       return IRQ_HANDLED;
+       /* we multiply by 1e3/8 to get bytes/msec.
+          We don't want the credits to pass a credit
+          of the t_fair*FAIR_MEM (algorithm resolution) */
+       bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
+       /* since each tick is 4 usec */
+       bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
 }
 
-/* end of fast path */
-
-static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
-
-/* Link */
-
-/*
- * General service functions
+/* Calculates the sum of vn_min_rates.
+   It's needed for further normalizing of the min_rates.
+   Returns:
+     sum of vn_min_rates.
+       or
+     0 - if all the min_rates are 0.
+     In the later case fainess algorithm should be deactivated.
+     If not all min_rates are zero then those that are zeroes will be set to 1.
  */
-
-static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
+static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
 {
-       u32 lock_status;
-       u32 resource_bit = (1 << resource);
-       int func = BP_FUNC(bp);
-       u32 hw_lock_control_reg;
-       int cnt;
-
-       /* Validating that the resource is within range */
-       if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
-               DP(NETIF_MSG_HW,
-                  "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
-                  resource, HW_LOCK_MAX_RESOURCE_VALUE);
-               return -EINVAL;
-       }
+       int all_zero = 1;
+       int port = BP_PORT(bp);
+       int vn;
 
-       if (func <= 5) {
-               hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
-       } else {
-               hw_lock_control_reg =
-                               (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
-       }
+       bp->vn_weight_sum = 0;
+       for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+               int func = 2*vn + port;
+               u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
+               u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+                                  FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
 
-       /* Validating that the resource is not already taken */
-       lock_status = REG_RD(bp, hw_lock_control_reg);
-       if (lock_status & resource_bit) {
-               DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
-                  lock_status, resource_bit);
-               return -EEXIST;
-       }
+               /* Skip hidden vns */
+               if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
+                       continue;
 
-       /* Try for 5 second every 5ms */
-       for (cnt = 0; cnt < 1000; cnt++) {
-               /* Try to acquire the lock */
-               REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
-               lock_status = REG_RD(bp, hw_lock_control_reg);
-               if (lock_status & resource_bit)
-                       return 0;
+               /* If min rate is zero - set it to 1 */
+               if (!vn_min_rate)
+                       vn_min_rate = DEF_MIN_RATE;
+               else
+                       all_zero = 0;
 
-               msleep(5);
+               bp->vn_weight_sum += vn_min_rate;
        }
-       DP(NETIF_MSG_HW, "Timeout\n");
-       return -EAGAIN;
+
+       /* ... only if all min rates are zeros - disable fairness */
+       if (all_zero) {
+               bp->cmng.flags.cmng_enables &=
+                                       ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+               DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
+                  "  fairness will be disabled\n");
+       } else
+               bp->cmng.flags.cmng_enables |=
+                                       CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 }
 
-static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
+static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
 {
-       u32 lock_status;
-       u32 resource_bit = (1 << resource);
-       int func = BP_FUNC(bp);
-       u32 hw_lock_control_reg;
-
-       DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
+       struct rate_shaping_vars_per_vn m_rs_vn;
+       struct fairness_vars_per_vn m_fair_vn;
+       u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
+       u16 vn_min_rate, vn_max_rate;
+       int i;
 
-       /* Validating that the resource is within range */
-       if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
-               DP(NETIF_MSG_HW,
-                  "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
-                  resource, HW_LOCK_MAX_RESOURCE_VALUE);
-               return -EINVAL;
-       }
+       /* If function is hidden - set min and max to zeroes */
+       if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
+               vn_min_rate = 0;
+               vn_max_rate = 0;
 
-       if (func <= 5) {
-               hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
        } else {
-               hw_lock_control_reg =
-                               (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
-       }
-
-       /* Validating that the resource is currently taken */
-       lock_status = REG_RD(bp, hw_lock_control_reg);
-       if (!(lock_status & resource_bit)) {
-               DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
-                  lock_status, resource_bit);
-               return -EFAULT;
+               vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+                               FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
+               /* If min rate is zero - set it to 1 */
+               if (!vn_min_rate)
+                       vn_min_rate = DEF_MIN_RATE;
+               vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+                               FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
        }
+       DP(NETIF_MSG_IFUP,
+          "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
+          func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
 
-       REG_WR(bp, hw_lock_control_reg, resource_bit);
-       return 0;
-}
+       memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
+       memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
 
-/* HW Lock for shared dual port PHYs */
-static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
-{
-       mutex_lock(&bp->port.phy_mutex);
+       /* global vn counter - maximal Mbps for this vn */
+       m_rs_vn.vn_counter.rate = vn_max_rate;
 
-       if (bp->port.need_hw_lock)
-               bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
-}
+       /* quota - number of bytes transmitted in this period */
+       m_rs_vn.vn_counter.quota =
+                               (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
 
-static void bnx2x_release_phy_lock(struct bnx2x *bp)
-{
-       if (bp->port.need_hw_lock)
-               bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+       if (bp->vn_weight_sum) {
+               /* credit for each period of the fairness algorithm:
+                  number of bytes in T_FAIR (the vn share the port rate).
+                  vn_weight_sum should not be larger than 10000, thus
+                  T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
+                  than zero */
+               m_fair_vn.vn_credit_delta =
+                       max_t(u32, (vn_min_rate * (T_FAIR_COEF /
+                                                  (8 * bp->vn_weight_sum))),
+                             (bp->cmng.fair_vars.fair_threshold * 2));
+               DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
+                  m_fair_vn.vn_credit_delta);
+       }
+
+       /* Store it to internal memory */
+       for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
+               REG_WR(bp, BAR_XSTRORM_INTMEM +
+                      XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
+                      ((u32 *)(&m_rs_vn))[i]);
 
-       mutex_unlock(&bp->port.phy_mutex);
+       for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
+               REG_WR(bp, BAR_XSTRORM_INTMEM +
+                      XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
+                      ((u32 *)(&m_fair_vn))[i]);
 }
 
-int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
+
+/* This function is called upon link interrupt */
+static void bnx2x_link_attn(struct bnx2x *bp)
 {
-       /* The GPIO should be swapped if swap register is set and active */
-       int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
-                        REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
-       int gpio_shift = gpio_num +
-                       (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
-       u32 gpio_mask = (1 << gpio_shift);
-       u32 gpio_reg;
-       int value;
+       u32 prev_link_status = bp->link_vars.link_status;
+       /* Make sure that we are synced with the current statistics */
+       bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 
-       if (gpio_num > MISC_REGISTERS_GPIO_3) {
-               BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
-               return -EINVAL;
-       }
+       bnx2x_link_update(&bp->link_params, &bp->link_vars);
 
-       /* read GPIO value */
-       gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+       if (bp->link_vars.link_up) {
 
-       /* get the requested pin value */
-       if ((gpio_reg & gpio_mask) == gpio_mask)
-               value = 1;
-       else
-               value = 0;
+               /* dropless flow control */
+               if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
+                       int port = BP_PORT(bp);
+                       u32 pause_enabled = 0;
 
-       DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
+                       if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+                               pause_enabled = 1;
 
-       return value;
-}
+                       REG_WR(bp, BAR_USTRORM_INTMEM +
+                              USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
+                              pause_enabled);
+               }
 
-int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
-{
-       /* The GPIO should be swapped if swap register is set and active */
-       int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
-                        REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
-       int gpio_shift = gpio_num +
-                       (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
-       u32 gpio_mask = (1 << gpio_shift);
-       u32 gpio_reg;
+               if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
+                       struct host_port_stats *pstats;
 
-       if (gpio_num > MISC_REGISTERS_GPIO_3) {
-               BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
-               return -EINVAL;
+                       pstats = bnx2x_sp(bp, port_stats);
+                       /* reset old bmac stats */
+                       memset(&(pstats->mac_stx[0]), 0,
+                              sizeof(struct mac_stx));
+               }
+               if (bp->state == BNX2X_STATE_OPEN)
+                       bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
        }
 
-       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
-       /* read GPIO and mask except the float bits */
-       gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
-
-       switch (mode) {
-       case MISC_REGISTERS_GPIO_OUTPUT_LOW:
-               DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
-                  gpio_num, gpio_shift);
-               /* clear FLOAT and set CLR */
-               gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
-               gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
-               break;
+       /* indicate link status only if link status actually changed */
+       if (prev_link_status != bp->link_vars.link_status)
+               bnx2x_link_report(bp);
 
-       case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
-               DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
-                  gpio_num, gpio_shift);
-               /* clear FLOAT and set SET */
-               gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
-               gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
-               break;
+       if (IS_E1HMF(bp)) {
+               int port = BP_PORT(bp);
+               int func;
+               int vn;
 
-       case MISC_REGISTERS_GPIO_INPUT_HI_Z:
-               DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
-                  gpio_num, gpio_shift);
-               /* set FLOAT */
-               gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
-               break;
+               /* Set the attention towards other drivers on the same port */
+               for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+                       if (vn == BP_E1HVN(bp))
+                               continue;
 
-       default:
-               break;
-       }
+                       func = ((vn << 1) | port);
+                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+                              (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+               }
 
-       REG_WR(bp, MISC_REG_GPIO, gpio_reg);
-       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+               if (bp->link_vars.link_up) {
+                       int i;
 
-       return 0;
-}
+                       /* Init rate shaping and fairness contexts */
+                       bnx2x_init_port_minmax(bp);
 
-int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
-{
-       /* The GPIO should be swapped if swap register is set and active */
-       int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
-                        REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
-       int gpio_shift = gpio_num +
-                       (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
-       u32 gpio_mask = (1 << gpio_shift);
-       u32 gpio_reg;
+                       for (vn = VN_0; vn < E1HVN_MAX; vn++)
+                               bnx2x_init_vn_minmax(bp, 2*vn + port);
 
-       if (gpio_num > MISC_REGISTERS_GPIO_3) {
-               BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
-               return -EINVAL;
+                       /* Store it to internal memory */
+                       for (i = 0;
+                            i < sizeof(struct cmng_struct_per_port) / 4; i++)
+                               REG_WR(bp, BAR_XSTRORM_INTMEM +
+                                 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
+                                      ((u32 *)(&bp->cmng))[i]);
+               }
        }
+}
 
-       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
-       /* read GPIO int */
-       gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
-
-       switch (mode) {
-       case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
-               DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
-                                  "output low\n", gpio_num, gpio_shift);
-               /* clear SET and set CLR */
-               gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
-               gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
-               break;
+void bnx2x__link_status_update(struct bnx2x *bp)
+{
+       if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
+               return;
 
-       case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
-               DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
-                                  "output high\n", gpio_num, gpio_shift);
-               /* clear CLR and set SET */
-               gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
-               gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
-               break;
+       bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
 
-       default:
-               break;
-       }
+       if (bp->link_vars.link_up)
+               bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+       else
+               bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 
-       REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
-       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+       bnx2x_calc_vn_weight_sum(bp);
 
-       return 0;
+       /* indicate link status */
+       bnx2x_link_report(bp);
 }
 
-static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
+static void bnx2x_pmf_update(struct bnx2x *bp)
 {
-       u32 spio_mask = (1 << spio_num);
-       u32 spio_reg;
-
-       if ((spio_num < MISC_REGISTERS_SPIO_4) ||
-           (spio_num > MISC_REGISTERS_SPIO_7)) {
-               BNX2X_ERR("Invalid SPIO %d\n", spio_num);
-               return -EINVAL;
-       }
-
-       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
-       /* read SPIO and mask except the float bits */
-       spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
+       int port = BP_PORT(bp);
+       u32 val;
 
-       switch (mode) {
-       case MISC_REGISTERS_SPIO_OUTPUT_LOW:
-               DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
-               /* clear FLOAT and set CLR */
-               spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
-               spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
-               break;
+       bp->port.pmf = 1;
+       DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
 
-       case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
-               DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
-               /* clear FLOAT and set SET */
-               spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
-               spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
-               break;
+       /* enable nig attention */
+       val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
+       REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+       REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
 
-       case MISC_REGISTERS_SPIO_INPUT_HI_Z:
-               DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
-               /* set FLOAT */
-               spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
-               break;
+       bnx2x_stats_handle(bp, STATS_EVENT_PMF);
+}
 
-       default:
-               break;
-       }
+/* end of Link */
 
-       REG_WR(bp, MISC_REG_SPIO, spio_reg);
-       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+/* slow path */
 
-       return 0;
-}
+/*
+ * General service functions
+ */
 
-static void bnx2x_calc_fc_adv(struct bnx2x *bp)
+/* send the MCP a request, block until there is a reply */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
 {
-       switch (bp->link_vars.ieee_fc &
-               MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
-       case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
-               bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
-                                         ADVERTISED_Pause);
-               break;
+       int func = BP_FUNC(bp);
+       u32 seq = ++bp->fw_seq;
+       u32 rc = 0;
+       u32 cnt = 1;
+       u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
 
-       case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
-               bp->port.advertising |= (ADVERTISED_Asym_Pause |
-                                        ADVERTISED_Pause);
-               break;
+       mutex_lock(&bp->fw_mb_mutex);
+       SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
+       DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
 
-       case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
-               bp->port.advertising |= ADVERTISED_Asym_Pause;
-               break;
+       do {
+               /* let the FW do it's magic ... */
+               msleep(delay);
 
-       default:
-               bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
-                                         ADVERTISED_Pause);
-               break;
+               rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
+
+               /* Give the FW up to 5 second (500*10ms) */
+       } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
+
+       DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+          cnt*delay, rc, seq);
+
+       /* is this a reply to our command? */
+       if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
+               rc &= FW_MSG_CODE_MASK;
+       else {
+               /* FW BUG! */
+               BNX2X_ERR("FW failed to respond!\n");
+               bnx2x_fw_dump(bp);
+               rc = 0;
        }
+       mutex_unlock(&bp->fw_mb_mutex);
+
+       return rc;
 }
 
-static void bnx2x_link_report(struct bnx2x *bp)
+static void bnx2x_e1h_disable(struct bnx2x *bp)
 {
-       if (bp->flags & MF_FUNC_DIS) {
-               netif_carrier_off(bp->dev);
-               netdev_err(bp->dev, "NIC Link is Down\n");
-               return;
-       }
+       int port = BP_PORT(bp);
 
-       if (bp->link_vars.link_up) {
-               u16 line_speed;
+       netif_tx_disable(bp->dev);
 
-               if (bp->state == BNX2X_STATE_OPEN)
-                       netif_carrier_on(bp->dev);
-               netdev_info(bp->dev, "NIC Link is Up, ");
+       REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
 
-               line_speed = bp->link_vars.line_speed;
-               if (IS_E1HMF(bp)) {
-                       u16 vn_max_rate;
+       netif_carrier_off(bp->dev);
+}
 
-                       vn_max_rate =
-                               ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
-                                FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
-                       if (vn_max_rate < line_speed)
-                               line_speed = vn_max_rate;
-               }
-               pr_cont("%d Mbps ", line_speed);
+static void bnx2x_e1h_enable(struct bnx2x *bp)
+{
+       int port = BP_PORT(bp);
 
-               if (bp->link_vars.duplex == DUPLEX_FULL)
-                       pr_cont("full duplex");
-               else
-                       pr_cont("half duplex");
-
-               if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
-                       if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
-                               pr_cont(", receive ");
-                               if (bp->link_vars.flow_ctrl &
-                                   BNX2X_FLOW_CTRL_TX)
-                                       pr_cont("& transmit ");
-                       } else {
-                               pr_cont(", transmit ");
-                       }
-                       pr_cont("flow control ON");
-               }
-               pr_cont("\n");
+       REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
 
-       } else { /* link_down */
-               netif_carrier_off(bp->dev);
-               netdev_err(bp->dev, "NIC Link is Down\n");
-       }
+       /* Tx queue should be only reenabled */
+       netif_tx_wake_all_queues(bp->dev);
+
+       /*
+        * Should not call netif_carrier_on since it will be called if the link
+        * is up when checking for link state
+        */
 }
 
-static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
+static void bnx2x_update_min_max(struct bnx2x *bp)
 {
-       if (!BP_NOMCP(bp)) {
-               u8 rc;
-
-               /* Initialize link parameters structure variables */
-               /* It is recommended to turn off RX FC for jumbo frames
-                  for better performance */
-               if (bp->dev->mtu > 5000)
-                       bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
-               else
-                       bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
+       int port = BP_PORT(bp);
+       int vn, i;
 
-               bnx2x_acquire_phy_lock(bp);
+       /* Init rate shaping and fairness contexts */
+       bnx2x_init_port_minmax(bp);
 
-               if (load_mode == LOAD_DIAG)
-                       bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
+       bnx2x_calc_vn_weight_sum(bp);
 
-               rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+       for (vn = VN_0; vn < E1HVN_MAX; vn++)
+               bnx2x_init_vn_minmax(bp, 2*vn + port);
 
-               bnx2x_release_phy_lock(bp);
+       if (bp->port.pmf) {
+               int func;
 
-               bnx2x_calc_fc_adv(bp);
+               /* Set the attention towards other drivers on the same port */
+               for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+                       if (vn == BP_E1HVN(bp))
+                               continue;
 
-               if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
-                       bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
-                       bnx2x_link_report(bp);
+                       func = ((vn << 1) | port);
+                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+                              (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
                }
 
-               return rc;
+               /* Store it to internal memory */
+               for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
+                       REG_WR(bp, BAR_XSTRORM_INTMEM +
+                              XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
+                              ((u32 *)(&bp->cmng))[i]);
        }
-       BNX2X_ERR("Bootcode is missing - can not initialize link\n");
-       return -EINVAL;
 }
 
-static void bnx2x_link_set(struct bnx2x *bp)
+static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
 {
-       if (!BP_NOMCP(bp)) {
-               bnx2x_acquire_phy_lock(bp);
-               bnx2x_phy_init(&bp->link_params, &bp->link_vars);
-               bnx2x_release_phy_lock(bp);
+       DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
 
-               bnx2x_calc_fc_adv(bp);
-       } else
-               BNX2X_ERR("Bootcode is missing - can not set link\n");
+       if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
+
+               /*
+                * This is the only place besides the function initialization
+                * where the bp->flags can change so it is done without any
+                * locks
+                */
+               if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
+                       DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
+                       bp->flags |= MF_FUNC_DIS;
+
+                       bnx2x_e1h_disable(bp);
+               } else {
+                       DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+                       bp->flags &= ~MF_FUNC_DIS;
+
+                       bnx2x_e1h_enable(bp);
+               }
+               dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
+       }
+       if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
+
+               bnx2x_update_min_max(bp);
+               dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
+       }
+
+       /* Report results to MCP */
+       if (dcc_event)
+               bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
+       else
+               bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
 }
 
-static void bnx2x__link_reset(struct bnx2x *bp)
+/* must be called under the spq lock */
+static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
 {
-       if (!BP_NOMCP(bp)) {
-               bnx2x_acquire_phy_lock(bp);
-               bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
-               bnx2x_release_phy_lock(bp);
-       } else
-               BNX2X_ERR("Bootcode is missing - can not reset link\n");
+       struct eth_spe *next_spe = bp->spq_prod_bd;
+
+       if (bp->spq_prod_bd == bp->spq_last_bd) {
+               bp->spq_prod_bd = bp->spq;
+               bp->spq_prod_idx = 0;
+               DP(NETIF_MSG_TIMER, "end of spq\n");
+       } else {
+               bp->spq_prod_bd++;
+               bp->spq_prod_idx++;
+       }
+       return next_spe;
 }
 
-static u8 bnx2x_link_test(struct bnx2x *bp)
+/* must be called under the spq lock */
+static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
 {
-       u8 rc = 0;
+       int func = BP_FUNC(bp);
 
-       if (!BP_NOMCP(bp)) {
-               bnx2x_acquire_phy_lock(bp);
-               rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
-               bnx2x_release_phy_lock(bp);
-       } else
-               BNX2X_ERR("Bootcode is missing - can not test link\n");
+       /* Make sure that BD data is updated before writing the producer */
+       wmb();
 
-       return rc;
+       REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
+              bp->spq_prod_idx);
+       mmiowb();
 }
 
-static void bnx2x_init_port_minmax(struct bnx2x *bp)
+/* the slow path queue is odd since completions arrive on the fastpath ring */
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+                        u32 data_hi, u32 data_lo, int common)
 {
-       u32 r_param = bp->link_vars.line_speed / 8;
-       u32 fair_periodic_timeout_usec;
-       u32 t_fair;
+       struct eth_spe *spe;
 
-       memset(&(bp->cmng.rs_vars), 0,
-              sizeof(struct rate_shaping_vars_per_port));
-       memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
+#ifdef BNX2X_STOP_ON_ERROR
+       if (unlikely(bp->panic))
+               return -EIO;
+#endif
 
-       /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
-       bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
+       spin_lock_bh(&bp->spq_lock);
 
-       /* this is the threshold below which no timer arming will occur
-          1.25 coefficient is for the threshold to be a little bigger
-          than the real time, to compensate for timer in-accuracy */
-       bp->cmng.rs_vars.rs_threshold =
-                               (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
+       if (!bp->spq_left) {
+               BNX2X_ERR("BUG! SPQ ring full!\n");
+               spin_unlock_bh(&bp->spq_lock);
+               bnx2x_panic();
+               return -EBUSY;
+       }
 
-       /* resolution of fairness timer */
-       fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
-       /* for 10G it is 1000usec. for 1G it is 10000usec. */
-       t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
+       spe = bnx2x_sp_get_next(bp);
 
-       /* this is the threshold below which we won't arm the timer anymore */
-       bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
+       /* CID needs port number to be encoded int it */
+       spe->hdr.conn_and_cmd_data =
+                       cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
+                                   HW_CID(bp, cid));
+       spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
+       if (common)
+               spe->hdr.type |=
+                       cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
 
-       /* we multiply by 1e3/8 to get bytes/msec.
-          We don't want the credits to pass a credit
-          of the t_fair*FAIR_MEM (algorithm resolution) */
-       bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
-       /* since each tick is 4 usec */
-       bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
-}
+       spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
+       spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
 
-/* Calculates the sum of vn_min_rates.
-   It's needed for further normalizing of the min_rates.
-   Returns:
-     sum of vn_min_rates.
-       or
-     0 - if all the min_rates are 0.
-     In the later case fainess algorithm should be deactivated.
-     If not all min_rates are zero then those that are zeroes will be set to 1.
- */
-static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
-{
-       int all_zero = 1;
-       int port = BP_PORT(bp);
-       int vn;
-
-       bp->vn_weight_sum = 0;
-       for (vn = VN_0; vn < E1HVN_MAX; vn++) {
-               int func = 2*vn + port;
-               u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
-               u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
-                                  FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
-
-               /* Skip hidden vns */
-               if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
-                       continue;
-
-               /* If min rate is zero - set it to 1 */
-               if (!vn_min_rate)
-                       vn_min_rate = DEF_MIN_RATE;
-               else
-                       all_zero = 0;
+       bp->spq_left--;
 
-               bp->vn_weight_sum += vn_min_rate;
-       }
+       DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
+          "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
+          bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
+          (u32)(U64_LO(bp->spq_mapping) +
+          (void *)bp->spq_prod_bd - (void *)bp->spq), command,
+          HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
 
-       /* ... only if all min rates are zeros - disable fairness */
-       if (all_zero) {
-               bp->cmng.flags.cmng_enables &=
-                                       ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
-               DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
-                  "  fairness will be disabled\n");
-       } else
-               bp->cmng.flags.cmng_enables |=
-                                       CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+       bnx2x_sp_prod_update(bp);
+       spin_unlock_bh(&bp->spq_lock);
+       return 0;
 }
 
-static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
+/* acquire split MCP access lock register */
+static int bnx2x_acquire_alr(struct bnx2x *bp)
 {
-       struct rate_shaping_vars_per_vn m_rs_vn;
-       struct fairness_vars_per_vn m_fair_vn;
-       u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
-       u16 vn_min_rate, vn_max_rate;
-       int i;
+       u32 j, val;
+       int rc = 0;
 
-       /* If function is hidden - set min and max to zeroes */
-       if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
-               vn_min_rate = 0;
-               vn_max_rate = 0;
+       might_sleep();
+       for (j = 0; j < 1000; j++) {
+               val = (1UL << 31);
+               REG_WR(bp, GRCBASE_MCP + 0x9c, val);
+               val = REG_RD(bp, GRCBASE_MCP + 0x9c);
+               if (val & (1L << 31))
+                       break;
 
-       } else {
-               vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
-                               FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
-               /* If min rate is zero - set it to 1 */
-               if (!vn_min_rate)
-                       vn_min_rate = DEF_MIN_RATE;
-               vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
-                               FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+               msleep(5);
        }
-       DP(NETIF_MSG_IFUP,
-          "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
-          func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
-
-       memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
-       memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
-
-       /* global vn counter - maximal Mbps for this vn */
-       m_rs_vn.vn_counter.rate = vn_max_rate;
-
-       /* quota - number of bytes transmitted in this period */
-       m_rs_vn.vn_counter.quota =
-                               (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
-
-       if (bp->vn_weight_sum) {
-               /* credit for each period of the fairness algorithm:
-                  number of bytes in T_FAIR (the vn share the port rate).
-                  vn_weight_sum should not be larger than 10000, thus
-                  T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
-                  than zero */
-               m_fair_vn.vn_credit_delta =
-                       max_t(u32, (vn_min_rate * (T_FAIR_COEF /
-                                                  (8 * bp->vn_weight_sum))),
-                             (bp->cmng.fair_vars.fair_threshold * 2));
-               DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
-                  m_fair_vn.vn_credit_delta);
+       if (!(val & (1L << 31))) {
+               BNX2X_ERR("Cannot acquire MCP access lock register\n");
+               rc = -EBUSY;
        }
 
-       /* Store it to internal memory */
-       for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
-               REG_WR(bp, BAR_XSTRORM_INTMEM +
-                      XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
-                      ((u32 *)(&m_rs_vn))[i]);
-
-       for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
-               REG_WR(bp, BAR_XSTRORM_INTMEM +
-                      XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
-                      ((u32 *)(&m_fair_vn))[i]);
+       return rc;
 }
 
-
-/* This function is called upon link interrupt */
-static void bnx2x_link_attn(struct bnx2x *bp)
+/* release split MCP access lock register */
+static void bnx2x_release_alr(struct bnx2x *bp)
 {
-       u32 prev_link_status = bp->link_vars.link_status;
-       /* Make sure that we are synced with the current statistics */
-       bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
-       bnx2x_link_update(&bp->link_params, &bp->link_vars);
+       REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
+}
 
-       if (bp->link_vars.link_up) {
+static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
+{
+       struct host_def_status_block *def_sb = bp->def_status_blk;
+       u16 rc = 0;
 
-               /* dropless flow control */
-               if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
-                       int port = BP_PORT(bp);
-                       u32 pause_enabled = 0;
+       barrier(); /* status block is written to by the chip */
+       if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
+               bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
+               rc |= 1;
+       }
+       if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
+               bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
+               rc |= 2;
+       }
+       if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
+               bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
+               rc |= 4;
+       }
+       if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
+               bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
+               rc |= 8;
+       }
+       if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
+               bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
+               rc |= 16;
+       }
+       return rc;
+}
 
-                       if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
-                               pause_enabled = 1;
+/*
+ * slow path service functions
+ */
 
-                       REG_WR(bp, BAR_USTRORM_INTMEM +
-                              USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
-                              pause_enabled);
-               }
+static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
+{
+       int port = BP_PORT(bp);
+       u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
+                      COMMAND_REG_ATTN_BITS_SET);
+       u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+                             MISC_REG_AEU_MASK_ATTN_FUNC_0;
+       u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
+                                      NIG_REG_MASK_INTERRUPT_PORT0;
+       u32 aeu_mask;
+       u32 nig_mask = 0;
 
-               if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
-                       struct host_port_stats *pstats;
+       if (bp->attn_state & asserted)
+               BNX2X_ERR("IGU ERROR\n");
 
-                       pstats = bnx2x_sp(bp, port_stats);
-                       /* reset old bmac stats */
-                       memset(&(pstats->mac_stx[0]), 0,
-                              sizeof(struct mac_stx));
-               }
-               if (bp->state == BNX2X_STATE_OPEN)
-                       bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
-       }
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+       aeu_mask = REG_RD(bp, aeu_addr);
 
-       /* indicate link status only if link status actually changed */
-       if (prev_link_status != bp->link_vars.link_status)
-               bnx2x_link_report(bp);
+       DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
+          aeu_mask, asserted);
+       aeu_mask &= ~(asserted & 0x3ff);
+       DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
 
-       if (IS_E1HMF(bp)) {
-               int port = BP_PORT(bp);
-               int func;
-               int vn;
+       REG_WR(bp, aeu_addr, aeu_mask);
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
 
-               /* Set the attention towards other drivers on the same port */
-               for (vn = VN_0; vn < E1HVN_MAX; vn++) {
-                       if (vn == BP_E1HVN(bp))
-                               continue;
+       DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+       bp->attn_state |= asserted;
+       DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
 
-                       func = ((vn << 1) | port);
-                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
-                              (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
-               }
+       if (asserted & ATTN_HARD_WIRED_MASK) {
+               if (asserted & ATTN_NIG_FOR_FUNC) {
 
-               if (bp->link_vars.link_up) {
-                       int i;
+                       bnx2x_acquire_phy_lock(bp);
 
-                       /* Init rate shaping and fairness contexts */
-                       bnx2x_init_port_minmax(bp);
+                       /* save nig interrupt mask */
+                       nig_mask = REG_RD(bp, nig_int_mask_addr);
+                       REG_WR(bp, nig_int_mask_addr, 0);
 
-                       for (vn = VN_0; vn < E1HVN_MAX; vn++)
-                               bnx2x_init_vn_minmax(bp, 2*vn + port);
+                       bnx2x_link_attn(bp);
 
-                       /* Store it to internal memory */
-                       for (i = 0;
-                            i < sizeof(struct cmng_struct_per_port) / 4; i++)
-                               REG_WR(bp, BAR_XSTRORM_INTMEM +
-                                 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
-                                      ((u32 *)(&bp->cmng))[i]);
+                       /* handle unicore attn? */
                }
-       }
-}
-
-static void bnx2x__link_status_update(struct bnx2x *bp)
-{
-       if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
-               return;
+               if (asserted & ATTN_SW_TIMER_4_FUNC)
+                       DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
 
-       bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+               if (asserted & GPIO_2_FUNC)
+                       DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
 
-       if (bp->link_vars.link_up)
-               bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
-       else
-               bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+               if (asserted & GPIO_3_FUNC)
+                       DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
 
-       bnx2x_calc_vn_weight_sum(bp);
+               if (asserted & GPIO_4_FUNC)
+                       DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
 
-       /* indicate link status */
-       bnx2x_link_report(bp);
+               if (port == 0) {
+                       if (asserted & ATTN_GENERAL_ATTN_1) {
+                               DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
+                               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
+                       }
+                       if (asserted & ATTN_GENERAL_ATTN_2) {
+                               DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
+                               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
+                       }
+                       if (asserted & ATTN_GENERAL_ATTN_3) {
+                               DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
+                               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
+                       }
+               } else {
+                       if (asserted & ATTN_GENERAL_ATTN_4) {
+                               DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
+                               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
+                       }
+                       if (asserted & ATTN_GENERAL_ATTN_5) {
+                               DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
+                               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
+                       }
+                       if (asserted & ATTN_GENERAL_ATTN_6) {
+                               DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
+                               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
+                       }
+               }
+
+       } /* if hardwired */
+
+       DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
+          asserted, hc_addr);
+       REG_WR(bp, hc_addr, asserted);
+
+       /* now set back the mask */
+       if (asserted & ATTN_NIG_FOR_FUNC) {
+               REG_WR(bp, nig_int_mask_addr, nig_mask);
+               bnx2x_release_phy_lock(bp);
+       }
 }
 
-static void bnx2x_pmf_update(struct bnx2x *bp)
+static inline void bnx2x_fan_failure(struct bnx2x *bp)
 {
        int port = BP_PORT(bp);
-       u32 val;
-
-       bp->port.pmf = 1;
-       DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
 
-       /* enable nig attention */
-       val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
-       REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
-       REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+       /* mark the failure */
+       bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+       bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
+       SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
+                bp->link_params.ext_phy_config);
 
-       bnx2x_stats_handle(bp, STATS_EVENT_PMF);
+       /* log the failure */
+       netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
+              " the driver to shutdown the card to prevent permanent"
+              " damage.  Please contact OEM Support for assistance\n");
 }
 
-/* end of Link */
-
-/* slow path */
+static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
+{
+       int port = BP_PORT(bp);
+       int reg_offset;
+       u32 val, swap_val, swap_override;
 
-/*
- * General service functions
- */
+       reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+                            MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
 
-/* send the MCP a request, block until there is a reply */
-u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
-{
-       int func = BP_FUNC(bp);
-       u32 seq = ++bp->fw_seq;
-       u32 rc = 0;
-       u32 cnt = 1;
-       u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
+       if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
 
-       mutex_lock(&bp->fw_mb_mutex);
-       SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
-       DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
+               val = REG_RD(bp, reg_offset);
+               val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
+               REG_WR(bp, reg_offset, val);
 
-       do {
-               /* let the FW do it's magic ... */
-               msleep(delay);
+               BNX2X_ERR("SPIO5 hw attention\n");
 
-               rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
+               /* Fan failure attention */
+               switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+                       /* Low power mode is controlled by GPIO 2 */
+                       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+                                      MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+                       /* The PHY reset is controlled by GPIO 1 */
+                       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+                                      MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+                       break;
 
-               /* Give the FW up to 5 second (500*10ms) */
-       } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+                       /* The PHY reset is controlled by GPIO 1 */
+                       /* fake the port number to cancel the swap done in
+                          set_gpio() */
+                       swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+                       swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+                       port = (swap_val && swap_override) ^ 1;
+                       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+                                      MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+                       break;
 
-       DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
-          cnt*delay, rc, seq);
+               default:
+                       break;
+               }
+               bnx2x_fan_failure(bp);
+       }
 
-       /* is this a reply to our command? */
-       if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
-               rc &= FW_MSG_CODE_MASK;
-       else {
-               /* FW BUG! */
-               BNX2X_ERR("FW failed to respond!\n");
-               bnx2x_fw_dump(bp);
-               rc = 0;
+       if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
+                   AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
+               bnx2x_acquire_phy_lock(bp);
+               bnx2x_handle_module_detect_int(&bp->link_params);
+               bnx2x_release_phy_lock(bp);
        }
-       mutex_unlock(&bp->fw_mb_mutex);
 
-       return rc;
-}
+       if (attn & HW_INTERRUT_ASSERT_SET_0) {
 
-static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
-static void bnx2x_set_rx_mode(struct net_device *dev);
+               val = REG_RD(bp, reg_offset);
+               val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
+               REG_WR(bp, reg_offset, val);
 
-static void bnx2x_e1h_disable(struct bnx2x *bp)
+               BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
+                         (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
+               bnx2x_panic();
+       }
+}
+
+static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
 {
-       int port = BP_PORT(bp);
+       u32 val;
 
-       netif_tx_disable(bp->dev);
+       if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
 
-       REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+               val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
+               BNX2X_ERR("DB hw attention 0x%x\n", val);
+               /* DORQ discard attention */
+               if (val & 0x2)
+                       BNX2X_ERR("FATAL error from DORQ\n");
+       }
 
-       netif_carrier_off(bp->dev);
-}
+       if (attn & HW_INTERRUT_ASSERT_SET_1) {
 
-static void bnx2x_e1h_enable(struct bnx2x *bp)
-{
-       int port = BP_PORT(bp);
+               int port = BP_PORT(bp);
+               int reg_offset;
 
-       REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+               reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
+                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
 
-       /* Tx queue should be only reenabled */
-       netif_tx_wake_all_queues(bp->dev);
+               val = REG_RD(bp, reg_offset);
+               val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
+               REG_WR(bp, reg_offset, val);
 
-       /*
-        * Should not call netif_carrier_on since it will be called if the link
-        * is up when checking for link state
-        */
+               BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
+                         (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
+               bnx2x_panic();
+       }
 }
 
-static void bnx2x_update_min_max(struct bnx2x *bp)
+static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
 {
-       int port = BP_PORT(bp);
-       int vn, i;
+       u32 val;
 
-       /* Init rate shaping and fairness contexts */
-       bnx2x_init_port_minmax(bp);
+       if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
 
-       bnx2x_calc_vn_weight_sum(bp);
+               val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
+               BNX2X_ERR("CFC hw attention 0x%x\n", val);
+               /* CFC error attention */
+               if (val & 0x2)
+                       BNX2X_ERR("FATAL error from CFC\n");
+       }
 
-       for (vn = VN_0; vn < E1HVN_MAX; vn++)
-               bnx2x_init_vn_minmax(bp, 2*vn + port);
+       if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
 
-       if (bp->port.pmf) {
-               int func;
+               val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
+               BNX2X_ERR("PXP hw attention 0x%x\n", val);
+               /* RQ_USDMDP_FIFO_OVERFLOW */
+               if (val & 0x18000)
+                       BNX2X_ERR("FATAL error from PXP\n");
+       }
 
-               /* Set the attention towards other drivers on the same port */
-               for (vn = VN_0; vn < E1HVN_MAX; vn++) {
-                       if (vn == BP_E1HVN(bp))
-                               continue;
+       if (attn & HW_INTERRUT_ASSERT_SET_2) {
 
-                       func = ((vn << 1) | port);
-                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
-                              (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
-               }
+               int port = BP_PORT(bp);
+               int reg_offset;
 
-               /* Store it to internal memory */
-               for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
-                       REG_WR(bp, BAR_XSTRORM_INTMEM +
-                              XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
-                              ((u32 *)(&bp->cmng))[i]);
+               reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
+                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
+
+               val = REG_RD(bp, reg_offset);
+               val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
+               REG_WR(bp, reg_offset, val);
+
+               BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
+                         (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
+               bnx2x_panic();
        }
 }
 
-static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
+static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
 {
-       DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
+       u32 val;
 
-       if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
+       if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
 
-               /*
-                * This is the only place besides the function initialization
-                * where the bp->flags can change so it is done without any
-                * locks
-                */
-               if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
-                       DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
-                       bp->flags |= MF_FUNC_DIS;
+               if (attn & BNX2X_PMF_LINK_ASSERT) {
+                       int func = BP_FUNC(bp);
 
-                       bnx2x_e1h_disable(bp);
-               } else {
-                       DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
-                       bp->flags &= ~MF_FUNC_DIS;
+                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+                       bp->mf_config = SHMEM_RD(bp,
+                                          mf_cfg.func_mf_config[func].config);
+                       val = SHMEM_RD(bp, func_mb[func].drv_status);
+                       if (val & DRV_STATUS_DCC_EVENT_MASK)
+                               bnx2x_dcc_event(bp,
+                                           (val & DRV_STATUS_DCC_EVENT_MASK));
+                       bnx2x__link_status_update(bp);
+                       if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
+                               bnx2x_pmf_update(bp);
 
-                       bnx2x_e1h_enable(bp);
-               }
-               dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
-       }
-       if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
+               } else if (attn & BNX2X_MC_ASSERT_BITS) {
 
-               bnx2x_update_min_max(bp);
-               dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
-       }
+                       BNX2X_ERR("MC assert!\n");
+                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
+                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
+                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
+                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
+                       bnx2x_panic();
 
-       /* Report results to MCP */
-       if (dcc_event)
-               bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
-       else
-               bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
-}
+               } else if (attn & BNX2X_MCP_ASSERT) {
 
-/* must be called under the spq lock */
-static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
-{
-       struct eth_spe *next_spe = bp->spq_prod_bd;
+                       BNX2X_ERR("MCP assert!\n");
+                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
+                       bnx2x_fw_dump(bp);
 
-       if (bp->spq_prod_bd == bp->spq_last_bd) {
-               bp->spq_prod_bd = bp->spq;
-               bp->spq_prod_idx = 0;
-               DP(NETIF_MSG_TIMER, "end of spq\n");
-       } else {
-               bp->spq_prod_bd++;
-               bp->spq_prod_idx++;
+               } else
+                       BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
+       }
+
+       if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
+               BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
+               if (attn & BNX2X_GRC_TIMEOUT) {
+                       val = CHIP_IS_E1H(bp) ?
+                               REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
+                       BNX2X_ERR("GRC time-out 0x%08x\n", val);
+               }
+               if (attn & BNX2X_GRC_RSV) {
+                       val = CHIP_IS_E1H(bp) ?
+                               REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
+                       BNX2X_ERR("GRC reserved 0x%08x\n", val);
+               }
+               REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
        }
-       return next_spe;
 }
 
-/* must be called under the spq lock */
-static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
+#define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
+#define LOAD_COUNTER_BITS      16 /* Number of bits for load counter */
+#define LOAD_COUNTER_MASK      (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
+#define RESET_DONE_FLAG_MASK   (~LOAD_COUNTER_MASK)
+#define RESET_DONE_FLAG_SHIFT  LOAD_COUNTER_BITS
+#define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_done(struct bnx2x *bp)
 {
-       int func = BP_FUNC(bp);
-
-       /* Make sure that BD data is updated before writing the producer */
-       wmb();
-
-       REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
-              bp->spq_prod_idx);
+       u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+       val &= ~(1 << RESET_DONE_FLAG_SHIFT);
+       REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+       barrier();
        mmiowb();
 }
 
-/* the slow path queue is odd since completions arrive on the fastpath ring */
-static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
-                        u32 data_hi, u32 data_lo, int common)
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
 {
-       struct eth_spe *spe;
-
-#ifdef BNX2X_STOP_ON_ERROR
-       if (unlikely(bp->panic))
-               return -EIO;
-#endif
+       u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+       val |= (1 << 16);
+       REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+       barrier();
+       mmiowb();
+}
 
-       spin_lock_bh(&bp->spq_lock);
+/*
+ * should be run under rtnl lock
+ */
+bool bnx2x_reset_is_done(struct bnx2x *bp)
+{
+       u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+       DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
+       return (val & RESET_DONE_FLAG_MASK) ? false : true;
+}
 
-       if (!bp->spq_left) {
-               BNX2X_ERR("BUG! SPQ ring full!\n");
-               spin_unlock_bh(&bp->spq_lock);
-               bnx2x_panic();
-               return -EBUSY;
-       }
+/*
+ * should be run under rtnl lock
+ */
+inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
+{
+       u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
 
-       spe = bnx2x_sp_get_next(bp);
+       DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
 
-       /* CID needs port number to be encoded int it */
-       spe->hdr.conn_and_cmd_data =
-                       cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
-                                   HW_CID(bp, cid));
-       spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
-       if (common)
-               spe->hdr.type |=
-                       cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
+       val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
+       REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+       barrier();
+       mmiowb();
+}
 
-       spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
-       spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
+/*
+ * should be run under rtnl lock
+ */
+u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
+{
+       u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
 
-       bp->spq_left--;
+       DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
 
-       DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
-          "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
-          bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
-          (u32)(U64_LO(bp->spq_mapping) +
-          (void *)bp->spq_prod_bd - (void *)bp->spq), command,
-          HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
+       val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
+       REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+       barrier();
+       mmiowb();
 
-       bnx2x_sp_prod_update(bp);
-       spin_unlock_bh(&bp->spq_lock);
-       return 0;
+       return val1;
 }
 
-/* acquire split MCP access lock register */
-static int bnx2x_acquire_alr(struct bnx2x *bp)
+/*
+ * should be run under rtnl lock
+ */
+static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
 {
-       u32 j, val;
-       int rc = 0;
-
-       might_sleep();
-       for (j = 0; j < 1000; j++) {
-               val = (1UL << 31);
-               REG_WR(bp, GRCBASE_MCP + 0x9c, val);
-               val = REG_RD(bp, GRCBASE_MCP + 0x9c);
-               if (val & (1L << 31))
-                       break;
-
-               msleep(5);
-       }
-       if (!(val & (1L << 31))) {
-               BNX2X_ERR("Cannot acquire MCP access lock register\n");
-               rc = -EBUSY;
-       }
+       return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
+}
 
-       return rc;
+static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
+{
+       u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+       REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
 }
 
-/* release split MCP access lock register */
-static void bnx2x_release_alr(struct bnx2x *bp)
+static inline void _print_next_block(int idx, const char *blk)
 {
-       REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
+       if (idx)
+               pr_cont(", ");
+       pr_cont("%s", blk);
 }
 
-static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
+static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
 {
-       struct host_def_status_block *def_sb = bp->def_status_blk;
-       u16 rc = 0;
+       int i = 0;
+       u32 cur_bit = 0;
+       for (i = 0; sig; i++) {
+               cur_bit = ((u32)0x1 << i);
+               if (sig & cur_bit) {
+                       switch (cur_bit) {
+                       case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+                               _print_next_block(par_num++, "BRB");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+                               _print_next_block(par_num++, "PARSER");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+                               _print_next_block(par_num++, "TSDM");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+                               _print_next_block(par_num++, "SEARCHER");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+                               _print_next_block(par_num++, "TSEMI");
+                               break;
+                       }
 
-       barrier(); /* status block is written to by the chip */
-       if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
-               bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
-               rc |= 1;
-       }
-       if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
-               bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
-               rc |= 2;
-       }
-       if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
-               bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
-               rc |= 4;
-       }
-       if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
-               bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
-               rc |= 8;
-       }
-       if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
-               bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
-               rc |= 16;
+                       /* Clear the bit */
+                       sig &= ~cur_bit;
+               }
        }
-       return rc;
-}
 
-/*
- * slow path service functions
- */
+       return par_num;
+}
 
-static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
+static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
 {
-       int port = BP_PORT(bp);
-       u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
-                      COMMAND_REG_ATTN_BITS_SET);
-       u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
-                             MISC_REG_AEU_MASK_ATTN_FUNC_0;
-       u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
-                                      NIG_REG_MASK_INTERRUPT_PORT0;
-       u32 aeu_mask;
-       u32 nig_mask = 0;
-
-       if (bp->attn_state & asserted)
-               BNX2X_ERR("IGU ERROR\n");
-
-       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
-       aeu_mask = REG_RD(bp, aeu_addr);
-
-       DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
-          aeu_mask, asserted);
-       aeu_mask &= ~(asserted & 0x3ff);
-       DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
-
-       REG_WR(bp, aeu_addr, aeu_mask);
-       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
-
-       DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
-       bp->attn_state |= asserted;
-       DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
-
-       if (asserted & ATTN_HARD_WIRED_MASK) {
-               if (asserted & ATTN_NIG_FOR_FUNC) {
+       int i = 0;
+       u32 cur_bit = 0;
+       for (i = 0; sig; i++) {
+               cur_bit = ((u32)0x1 << i);
+               if (sig & cur_bit) {
+                       switch (cur_bit) {
+                       case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+                               _print_next_block(par_num++, "PBCLIENT");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
+                               _print_next_block(par_num++, "QM");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
+                               _print_next_block(par_num++, "XSDM");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
+                               _print_next_block(par_num++, "XSEMI");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
+                               _print_next_block(par_num++, "DOORBELLQ");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
+                               _print_next_block(par_num++, "VAUX PCI CORE");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
+                               _print_next_block(par_num++, "DEBUG");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
+                               _print_next_block(par_num++, "USDM");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
+                               _print_next_block(par_num++, "USEMI");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
+                               _print_next_block(par_num++, "UPB");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
+                               _print_next_block(par_num++, "CSDM");
+                               break;
+                       }
 
-                       bnx2x_acquire_phy_lock(bp);
+                       /* Clear the bit */
+                       sig &= ~cur_bit;
+               }
+       }
 
-                       /* save nig interrupt mask */
-                       nig_mask = REG_RD(bp, nig_int_mask_addr);
-                       REG_WR(bp, nig_int_mask_addr, 0);
+       return par_num;
+}
 
-                       bnx2x_link_attn(bp);
+static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
+{
+       int i = 0;
+       u32 cur_bit = 0;
+       for (i = 0; sig; i++) {
+               cur_bit = ((u32)0x1 << i);
+               if (sig & cur_bit) {
+                       switch (cur_bit) {
+                       case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+                               _print_next_block(par_num++, "CSEMI");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+                               _print_next_block(par_num++, "PXP");
+                               break;
+                       case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+                               _print_next_block(par_num++,
+                                       "PXPPCICLOCKCLIENT");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+                               _print_next_block(par_num++, "CFC");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+                               _print_next_block(par_num++, "CDU");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+                               _print_next_block(par_num++, "IGU");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+                               _print_next_block(par_num++, "MISC");
+                               break;
+                       }
 
-                       /* handle unicore attn? */
+                       /* Clear the bit */
+                       sig &= ~cur_bit;
                }
-               if (asserted & ATTN_SW_TIMER_4_FUNC)
-                       DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
-
-               if (asserted & GPIO_2_FUNC)
-                       DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
-
-               if (asserted & GPIO_3_FUNC)
-                       DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
+       }
 
-               if (asserted & GPIO_4_FUNC)
-                       DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
+       return par_num;
+}
 
-               if (port == 0) {
-                       if (asserted & ATTN_GENERAL_ATTN_1) {
-                               DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
-                               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
-                       }
-                       if (asserted & ATTN_GENERAL_ATTN_2) {
-                               DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
-                               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
-                       }
-                       if (asserted & ATTN_GENERAL_ATTN_3) {
-                               DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
-                               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
-                       }
-               } else {
-                       if (asserted & ATTN_GENERAL_ATTN_4) {
-                               DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
-                               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
-                       }
-                       if (asserted & ATTN_GENERAL_ATTN_5) {
-                               DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
-                               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
-                       }
-                       if (asserted & ATTN_GENERAL_ATTN_6) {
-                               DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
-                               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
+static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
+{
+       int i = 0;
+       u32 cur_bit = 0;
+       for (i = 0; sig; i++) {
+               cur_bit = ((u32)0x1 << i);
+               if (sig & cur_bit) {
+                       switch (cur_bit) {
+                       case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
+                               _print_next_block(par_num++, "MCP ROM");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
+                               _print_next_block(par_num++, "MCP UMP RX");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
+                               _print_next_block(par_num++, "MCP UMP TX");
+                               break;
+                       case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+                               _print_next_block(par_num++, "MCP SCPAD");
+                               break;
                        }
-               }
 
-       } /* if hardwired */
+                       /* Clear the bit */
+                       sig &= ~cur_bit;
+               }
+       }
 
-       DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
-          asserted, hc_addr);
-       REG_WR(bp, hc_addr, asserted);
+       return par_num;
+}
 
-       /* now set back the mask */
-       if (asserted & ATTN_NIG_FOR_FUNC) {
-               REG_WR(bp, nig_int_mask_addr, nig_mask);
-               bnx2x_release_phy_lock(bp);
-       }
+static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
+                                    u32 sig2, u32 sig3)
+{
+       if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
+           (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
+               int par_num = 0;
+               DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
+                       "[0]:0x%08x [1]:0x%08x "
+                       "[2]:0x%08x [3]:0x%08x\n",
+                         sig0 & HW_PRTY_ASSERT_SET_0,
+                         sig1 & HW_PRTY_ASSERT_SET_1,
+                         sig2 & HW_PRTY_ASSERT_SET_2,
+                         sig3 & HW_PRTY_ASSERT_SET_3);
+               printk(KERN_ERR"%s: Parity errors detected in blocks: ",
+                      bp->dev->name);
+               par_num = bnx2x_print_blocks_with_parity0(
+                       sig0 & HW_PRTY_ASSERT_SET_0, par_num);
+               par_num = bnx2x_print_blocks_with_parity1(
+                       sig1 & HW_PRTY_ASSERT_SET_1, par_num);
+               par_num = bnx2x_print_blocks_with_parity2(
+                       sig2 & HW_PRTY_ASSERT_SET_2, par_num);
+               par_num = bnx2x_print_blocks_with_parity3(
+                       sig3 & HW_PRTY_ASSERT_SET_3, par_num);
+               printk("\n");
+               return true;
+       } else
+               return false;
 }
 
-static inline void bnx2x_fan_failure(struct bnx2x *bp)
+bool bnx2x_chk_parity_attn(struct bnx2x *bp)
 {
+       struct attn_route attn;
        int port = BP_PORT(bp);
 
-       /* mark the failure */
-       bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
-       bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
-       SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
-                bp->link_params.ext_phy_config);
+       attn.sig[0] = REG_RD(bp,
+               MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
+                            port*4);
+       attn.sig[1] = REG_RD(bp,
+               MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
+                            port*4);
+       attn.sig[2] = REG_RD(bp,
+               MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
+                            port*4);
+       attn.sig[3] = REG_RD(bp,
+               MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
+                            port*4);
 
-       /* log the failure */
-       netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
-              " the driver to shutdown the card to prevent permanent"
-              " damage.  Please contact OEM Support for assistance\n");
+       return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
+                                       attn.sig[3]);
 }
 
-static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
+static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
 {
+       struct attn_route attn, *group_mask;
        int port = BP_PORT(bp);
-       int reg_offset;
-       u32 val, swap_val, swap_override;
-
-       reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
-                            MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+       int index;
+       u32 reg_addr;
+       u32 val;
+       u32 aeu_mask;
 
-       if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
+       /* need to take HW lock because MCP or other port might also
+          try to handle this event */
+       bnx2x_acquire_alr(bp);
 
-               val = REG_RD(bp, reg_offset);
-               val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
-               REG_WR(bp, reg_offset, val);
+       if (bnx2x_chk_parity_attn(bp)) {
+               bp->recovery_state = BNX2X_RECOVERY_INIT;
+               bnx2x_set_reset_in_progress(bp);
+               schedule_delayed_work(&bp->reset_task, 0);
+               /* Disable HW interrupts */
+               bnx2x_int_disable(bp);
+               bnx2x_release_alr(bp);
+               /* In case of parity errors don't handle attentions so that
+                * other function would "see" parity errors.
+                */
+               return;
+       }
 
-               BNX2X_ERR("SPIO5 hw attention\n");
+       attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
+       attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
+       attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
+       attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
+       DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
+          attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
 
-               /* Fan failure attention */
-               switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
-                       /* Low power mode is controlled by GPIO 2 */
-                       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-                                      MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
-                       /* The PHY reset is controlled by GPIO 1 */
-                       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-                                      MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
-                       break;
+       for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+               if (deasserted & (1 << index)) {
+                       group_mask = &bp->attn_group[index];
 
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
-                       /* The PHY reset is controlled by GPIO 1 */
-                       /* fake the port number to cancel the swap done in
-                          set_gpio() */
-                       swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
-                       swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
-                       port = (swap_val && swap_override) ^ 1;
-                       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-                                      MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
-                       break;
+                       DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
+                          index, group_mask->sig[0], group_mask->sig[1],
+                          group_mask->sig[2], group_mask->sig[3]);
 
-               default:
-                       break;
+                       bnx2x_attn_int_deasserted3(bp,
+                                       attn.sig[3] & group_mask->sig[3]);
+                       bnx2x_attn_int_deasserted1(bp,
+                                       attn.sig[1] & group_mask->sig[1]);
+                       bnx2x_attn_int_deasserted2(bp,
+                                       attn.sig[2] & group_mask->sig[2]);
+                       bnx2x_attn_int_deasserted0(bp,
+                                       attn.sig[0] & group_mask->sig[0]);
                }
-               bnx2x_fan_failure(bp);
        }
 
-       if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
-                   AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
-               bnx2x_acquire_phy_lock(bp);
-               bnx2x_handle_module_detect_int(&bp->link_params);
-               bnx2x_release_phy_lock(bp);
-       }
+       bnx2x_release_alr(bp);
 
-       if (attn & HW_INTERRUT_ASSERT_SET_0) {
+       reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
 
-               val = REG_RD(bp, reg_offset);
-               val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
-               REG_WR(bp, reg_offset, val);
+       val = ~deasserted;
+       DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
+          val, reg_addr);
+       REG_WR(bp, reg_addr, val);
 
-               BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
-                         (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
-               bnx2x_panic();
-       }
-}
+       if (~bp->attn_state & deasserted)
+               BNX2X_ERR("IGU ERROR\n");
 
-static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
-{
-       u32 val;
+       reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
 
-       if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
+       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+       aeu_mask = REG_RD(bp, reg_addr);
 
-               val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
-               BNX2X_ERR("DB hw attention 0x%x\n", val);
-               /* DORQ discard attention */
-               if (val & 0x2)
-                       BNX2X_ERR("FATAL error from DORQ\n");
-       }
+       DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
+          aeu_mask, deasserted);
+       aeu_mask |= (deasserted & 0x3ff);
+       DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
 
-       if (attn & HW_INTERRUT_ASSERT_SET_1) {
+       REG_WR(bp, reg_addr, aeu_mask);
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
 
-               int port = BP_PORT(bp);
-               int reg_offset;
+       DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+       bp->attn_state &= ~deasserted;
+       DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+}
 
-               reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
-                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
+static void bnx2x_attn_int(struct bnx2x *bp)
+{
+       /* read local copy of bits */
+       u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
+                                                               attn_bits);
+       u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
+                                                               attn_bits_ack);
+       u32 attn_state = bp->attn_state;
 
-               val = REG_RD(bp, reg_offset);
-               val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
-               REG_WR(bp, reg_offset, val);
+       /* look for changed bits */
+       u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
+       u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
 
-               BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
-                         (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
-               bnx2x_panic();
-       }
-}
+       DP(NETIF_MSG_HW,
+          "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
+          attn_bits, attn_ack, asserted, deasserted);
 
-static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
-{
-       u32 val;
+       if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
+               BNX2X_ERR("BAD attention state\n");
 
-       if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
+       /* handle bits that were raised */
+       if (asserted)
+               bnx2x_attn_int_asserted(bp, asserted);
 
-               val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
-               BNX2X_ERR("CFC hw attention 0x%x\n", val);
-               /* CFC error attention */
-               if (val & 0x2)
-                       BNX2X_ERR("FATAL error from CFC\n");
-       }
+       if (deasserted)
+               bnx2x_attn_int_deasserted(bp, deasserted);
+}
 
-       if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
+static void bnx2x_sp_task(struct work_struct *work)
+{
+       struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
+       u16 status;
 
-               val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
-               BNX2X_ERR("PXP hw attention 0x%x\n", val);
-               /* RQ_USDMDP_FIFO_OVERFLOW */
-               if (val & 0x18000)
-                       BNX2X_ERR("FATAL error from PXP\n");
+       /* Return here if interrupt is disabled */
+       if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+               DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
+               return;
        }
 
-       if (attn & HW_INTERRUT_ASSERT_SET_2) {
-
-               int port = BP_PORT(bp);
-               int reg_offset;
+       status = bnx2x_update_dsb_idx(bp);
+/*     if (status == 0)                                     */
+/*             BNX2X_ERR("spurious slowpath interrupt!\n"); */
 
-               reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
-                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
+       DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
 
-               val = REG_RD(bp, reg_offset);
-               val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
-               REG_WR(bp, reg_offset, val);
+       /* HW attentions */
+       if (status & 0x1) {
+               bnx2x_attn_int(bp);
+               status &= ~0x1;
+       }
 
-               BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
-                         (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
-               bnx2x_panic();
+       /* CStorm events: STAT_QUERY */
+       if (status & 0x2) {
+               DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
+               status &= ~0x2;
        }
+
+       if (unlikely(status))
+               DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+                  status);
+
+       bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
+                    IGU_INT_NOP, 1);
+       bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
+                    IGU_INT_NOP, 1);
+       bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
+                    IGU_INT_NOP, 1);
+       bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
+                    IGU_INT_NOP, 1);
+       bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
+                    IGU_INT_ENABLE, 1);
 }
 
-static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
+irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
 {
-       u32 val;
+       struct net_device *dev = dev_instance;
+       struct bnx2x *bp = netdev_priv(dev);
 
-       if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
+       /* Return here if interrupt is disabled */
+       if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
+               DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
+               return IRQ_HANDLED;
+       }
 
-               if (attn & BNX2X_PMF_LINK_ASSERT) {
-                       int func = BP_FUNC(bp);
+       bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
 
-                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
-                       bp->mf_config = SHMEM_RD(bp,
-                                          mf_cfg.func_mf_config[func].config);
-                       val = SHMEM_RD(bp, func_mb[func].drv_status);
-                       if (val & DRV_STATUS_DCC_EVENT_MASK)
-                               bnx2x_dcc_event(bp,
-                                           (val & DRV_STATUS_DCC_EVENT_MASK));
-                       bnx2x__link_status_update(bp);
-                       if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
-                               bnx2x_pmf_update(bp);
+#ifdef BNX2X_STOP_ON_ERROR
+       if (unlikely(bp->panic))
+               return IRQ_HANDLED;
+#endif
 
-               } else if (attn & BNX2X_MC_ASSERT_BITS) {
+#ifdef BCM_CNIC
+       {
+               struct cnic_ops *c_ops;
 
-                       BNX2X_ERR("MC assert!\n");
-                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
-                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
-                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
-                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
-                       bnx2x_panic();
+               rcu_read_lock();
+               c_ops = rcu_dereference(bp->cnic_ops);
+               if (c_ops)
+                       c_ops->cnic_handler(bp->cnic_data, NULL);
+               rcu_read_unlock();
+       }
+#endif
+       queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
 
-               } else if (attn & BNX2X_MCP_ASSERT) {
+       return IRQ_HANDLED;
+}
 
-                       BNX2X_ERR("MCP assert!\n");
-                       REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
-                       bnx2x_fw_dump(bp);
+/* end of slow path */
 
-               } else
-                       BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
+static void bnx2x_timer(unsigned long data)
+{
+       struct bnx2x *bp = (struct bnx2x *) data;
+
+       if (!netif_running(bp->dev))
+               return;
+
+       if (atomic_read(&bp->intr_sem) != 0)
+               goto timer_restart;
+
+       if (poll) {
+               struct bnx2x_fastpath *fp = &bp->fp[0];
+               int rc;
+
+               bnx2x_tx_int(fp);
+               rc = bnx2x_rx_int(fp, 1000);
        }
 
-       if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
-               BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
-               if (attn & BNX2X_GRC_TIMEOUT) {
-                       val = CHIP_IS_E1H(bp) ?
-                               REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
-                       BNX2X_ERR("GRC time-out 0x%08x\n", val);
-               }
-               if (attn & BNX2X_GRC_RSV) {
-                       val = CHIP_IS_E1H(bp) ?
-                               REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
-                       BNX2X_ERR("GRC reserved 0x%08x\n", val);
+       if (!BP_NOMCP(bp)) {
+               int func = BP_FUNC(bp);
+               u32 drv_pulse;
+               u32 mcp_pulse;
+
+               ++bp->fw_drv_pulse_wr_seq;
+               bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
+               /* TBD - add SYSTEM_TIME */
+               drv_pulse = bp->fw_drv_pulse_wr_seq;
+               SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
+
+               mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
+                            MCP_PULSE_SEQ_MASK);
+               /* The delta between driver pulse and mcp response
+                * should be 1 (before mcp response) or 0 (after mcp response)
+                */
+               if ((drv_pulse != mcp_pulse) &&
+                   (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
+                       /* someone lost a heartbeat... */
+                       BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+                                 drv_pulse, mcp_pulse);
                }
-               REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
        }
+
+       if (bp->state == BNX2X_STATE_OPEN)
+               bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
+
+timer_restart:
+       mod_timer(&bp->timer, jiffies + bp->current_interval);
 }
 
-static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
-static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
+/* end of Statistics */
 
+/* nic init */
 
-#define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
-#define LOAD_COUNTER_BITS      16 /* Number of bits for load counter */
-#define LOAD_COUNTER_MASK      (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
-#define RESET_DONE_FLAG_MASK   (~LOAD_COUNTER_MASK)
-#define RESET_DONE_FLAG_SHIFT  LOAD_COUNTER_BITS
-#define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
 /*
- * should be run under rtnl lock
+ * nic init service functions
  */
-static inline void bnx2x_set_reset_done(struct bnx2x *bp)
-{
-       u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
-       val &= ~(1 << RESET_DONE_FLAG_SHIFT);
-       REG_WR(bp, BNX2X_MISC_GEN_REG, val);
-       barrier();
-       mmiowb();
-}
 
-/*
- * should be run under rtnl lock
- */
-static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
+static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
 {
-       u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
-       val |= (1 << 16);
-       REG_WR(bp, BNX2X_MISC_GEN_REG, val);
-       barrier();
-       mmiowb();
-}
+       int port = BP_PORT(bp);
 
-/*
- * should be run under rtnl lock
- */
-static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
-{
-       u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
-       DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
-       return (val & RESET_DONE_FLAG_MASK) ? false : true;
+       /* "CSTORM" */
+       bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
+                       CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
+                       CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
+       bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
+                       CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
+                       CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
 }
 
-/*
- * should be run under rtnl lock
- */
-static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
+void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
+                         dma_addr_t mapping, int sb_id)
 {
-       u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+       int port = BP_PORT(bp);
+       int func = BP_FUNC(bp);
+       int index;
+       u64 section;
 
-       DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+       /* USTORM */
+       section = ((u64)mapping) + offsetof(struct host_status_block,
+                                           u_status_block);
+       sb->u_status_block.status_block_id = sb_id;
 
-       val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
-       REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
-       barrier();
-       mmiowb();
-}
+       REG_WR(bp, BAR_CSTRORM_INTMEM +
+              CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
+       REG_WR(bp, BAR_CSTRORM_INTMEM +
+              ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
+              U64_HI(section));
+       REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
+               CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
 
-/*
- * should be run under rtnl lock
- */
-static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
-{
-       u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+       for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
+               REG_WR16(bp, BAR_CSTRORM_INTMEM +
+                        CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
 
-       DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+       /* CSTORM */
+       section = ((u64)mapping) + offsetof(struct host_status_block,
+                                           c_status_block);
+       sb->c_status_block.status_block_id = sb_id;
 
-       val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
-       REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
-       barrier();
-       mmiowb();
+       REG_WR(bp, BAR_CSTRORM_INTMEM +
+              CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
+       REG_WR(bp, BAR_CSTRORM_INTMEM +
+              ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
+              U64_HI(section));
+       REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
+               CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
 
-       return val1;
-}
+       for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
+               REG_WR16(bp, BAR_CSTRORM_INTMEM +
+                        CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
 
-/*
- * should be run under rtnl lock
- */
-static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
-{
-       return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
+       bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
 }
 
-static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
+static void bnx2x_zero_def_sb(struct bnx2x *bp)
 {
-       u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
-       REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
-}
+       int func = BP_FUNC(bp);
 
-static inline void _print_next_block(int idx, const char *blk)
-{
-       if (idx)
-               pr_cont(", ");
-       pr_cont("%s", blk);
+       bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
+                       TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
+                       sizeof(struct tstorm_def_status_block)/4);
+       bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
+                       CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
+                       sizeof(struct cstorm_def_status_block_u)/4);
+       bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
+                       CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
+                       sizeof(struct cstorm_def_status_block_c)/4);
+       bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
+                       XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
+                       sizeof(struct xstorm_def_status_block)/4);
 }
 
-static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
+static void bnx2x_init_def_sb(struct bnx2x *bp,
+                             struct host_def_status_block *def_sb,
+                             dma_addr_t mapping, int sb_id)
 {
-       int i = 0;
-       u32 cur_bit = 0;
-       for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
-               if (sig & cur_bit) {
-                       switch (cur_bit) {
-                       case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
-                               _print_next_block(par_num++, "BRB");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
-                               _print_next_block(par_num++, "PARSER");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
-                               _print_next_block(par_num++, "TSDM");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
-                               _print_next_block(par_num++, "SEARCHER");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
-                               _print_next_block(par_num++, "TSEMI");
-                               break;
-                       }
+       int port = BP_PORT(bp);
+       int func = BP_FUNC(bp);
+       int index, val, reg_offset;
+       u64 section;
 
-                       /* Clear the bit */
-                       sig &= ~cur_bit;
-               }
+       /* ATTN */
+       section = ((u64)mapping) + offsetof(struct host_def_status_block,
+                                           atten_status_block);
+       def_sb->atten_status_block.status_block_id = sb_id;
+
+       bp->attn_state = 0;
+
+       reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+                            MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+
+       for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+               bp->attn_group[index].sig[0] = REG_RD(bp,
+                                                    reg_offset + 0x10*index);
+               bp->attn_group[index].sig[1] = REG_RD(bp,
+                                              reg_offset + 0x4 + 0x10*index);
+               bp->attn_group[index].sig[2] = REG_RD(bp,
+                                              reg_offset + 0x8 + 0x10*index);
+               bp->attn_group[index].sig[3] = REG_RD(bp,
+                                              reg_offset + 0xc + 0x10*index);
        }
 
-       return par_num;
-}
+       reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
+                            HC_REG_ATTN_MSG0_ADDR_L);
 
-static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
-{
-       int i = 0;
-       u32 cur_bit = 0;
-       for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
-               if (sig & cur_bit) {
-                       switch (cur_bit) {
-                       case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
-                               _print_next_block(par_num++, "PBCLIENT");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
-                               _print_next_block(par_num++, "QM");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
-                               _print_next_block(par_num++, "XSDM");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
-                               _print_next_block(par_num++, "XSEMI");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
-                               _print_next_block(par_num++, "DOORBELLQ");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
-                               _print_next_block(par_num++, "VAUX PCI CORE");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
-                               _print_next_block(par_num++, "DEBUG");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
-                               _print_next_block(par_num++, "USDM");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
-                               _print_next_block(par_num++, "USEMI");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
-                               _print_next_block(par_num++, "UPB");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
-                               _print_next_block(par_num++, "CSDM");
-                               break;
-                       }
+       REG_WR(bp, reg_offset, U64_LO(section));
+       REG_WR(bp, reg_offset + 4, U64_HI(section));
 
-                       /* Clear the bit */
-                       sig &= ~cur_bit;
-               }
-       }
+       reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
 
-       return par_num;
-}
+       val = REG_RD(bp, reg_offset);
+       val |= sb_id;
+       REG_WR(bp, reg_offset, val);
 
-static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
-{
-       int i = 0;
-       u32 cur_bit = 0;
-       for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
-               if (sig & cur_bit) {
-                       switch (cur_bit) {
-                       case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
-                               _print_next_block(par_num++, "CSEMI");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
-                               _print_next_block(par_num++, "PXP");
-                               break;
-                       case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
-                               _print_next_block(par_num++,
-                                       "PXPPCICLOCKCLIENT");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
-                               _print_next_block(par_num++, "CFC");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
-                               _print_next_block(par_num++, "CDU");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
-                               _print_next_block(par_num++, "IGU");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
-                               _print_next_block(par_num++, "MISC");
-                               break;
-                       }
+       /* USTORM */
+       section = ((u64)mapping) + offsetof(struct host_def_status_block,
+                                           u_def_status_block);
+       def_sb->u_def_status_block.status_block_id = sb_id;
 
-                       /* Clear the bit */
-                       sig &= ~cur_bit;
-               }
-       }
+       REG_WR(bp, BAR_CSTRORM_INTMEM +
+              CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
+       REG_WR(bp, BAR_CSTRORM_INTMEM +
+              ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
+              U64_HI(section));
+       REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
+               CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
 
-       return par_num;
-}
+       for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
+               REG_WR16(bp, BAR_CSTRORM_INTMEM +
+                        CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
 
-static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
-{
-       int i = 0;
-       u32 cur_bit = 0;
-       for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
-               if (sig & cur_bit) {
-                       switch (cur_bit) {
-                       case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
-                               _print_next_block(par_num++, "MCP ROM");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
-                               _print_next_block(par_num++, "MCP UMP RX");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
-                               _print_next_block(par_num++, "MCP UMP TX");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
-                               _print_next_block(par_num++, "MCP SCPAD");
-                               break;
-                       }
+       /* CSTORM */
+       section = ((u64)mapping) + offsetof(struct host_def_status_block,
+                                           c_def_status_block);
+       def_sb->c_def_status_block.status_block_id = sb_id;
 
-                       /* Clear the bit */
-                       sig &= ~cur_bit;
-               }
-       }
+       REG_WR(bp, BAR_CSTRORM_INTMEM +
+              CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
+       REG_WR(bp, BAR_CSTRORM_INTMEM +
+              ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
+              U64_HI(section));
+       REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
+               CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
 
-       return par_num;
-}
+       for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
+               REG_WR16(bp, BAR_CSTRORM_INTMEM +
+                        CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
 
-static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
-                                    u32 sig2, u32 sig3)
-{
-       if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
-           (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
-               int par_num = 0;
-               DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
-                       "[0]:0x%08x [1]:0x%08x "
-                       "[2]:0x%08x [3]:0x%08x\n",
-                         sig0 & HW_PRTY_ASSERT_SET_0,
-                         sig1 & HW_PRTY_ASSERT_SET_1,
-                         sig2 & HW_PRTY_ASSERT_SET_2,
-                         sig3 & HW_PRTY_ASSERT_SET_3);
-               printk(KERN_ERR"%s: Parity errors detected in blocks: ",
-                      bp->dev->name);
-               par_num = bnx2x_print_blocks_with_parity0(
-                       sig0 & HW_PRTY_ASSERT_SET_0, par_num);
-               par_num = bnx2x_print_blocks_with_parity1(
-                       sig1 & HW_PRTY_ASSERT_SET_1, par_num);
-               par_num = bnx2x_print_blocks_with_parity2(
-                       sig2 & HW_PRTY_ASSERT_SET_2, par_num);
-               par_num = bnx2x_print_blocks_with_parity3(
-                       sig3 & HW_PRTY_ASSERT_SET_3, par_num);
-               printk("\n");
-               return true;
-       } else
-               return false;
+       /* TSTORM */
+       section = ((u64)mapping) + offsetof(struct host_def_status_block,
+                                           t_def_status_block);
+       def_sb->t_def_status_block.status_block_id = sb_id;
+
+       REG_WR(bp, BAR_TSTRORM_INTMEM +
+              TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
+       REG_WR(bp, BAR_TSTRORM_INTMEM +
+              ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
+              U64_HI(section));
+       REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
+               TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
+
+       for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
+               REG_WR16(bp, BAR_TSTRORM_INTMEM +
+                        TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
+
+       /* XSTORM */
+       section = ((u64)mapping) + offsetof(struct host_def_status_block,
+                                           x_def_status_block);
+       def_sb->x_def_status_block.status_block_id = sb_id;
+
+       REG_WR(bp, BAR_XSTRORM_INTMEM +
+              XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
+       REG_WR(bp, BAR_XSTRORM_INTMEM +
+              ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
+              U64_HI(section));
+       REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
+               XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
+
+       for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
+               REG_WR16(bp, BAR_XSTRORM_INTMEM +
+                        XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
+
+       bp->stats_pending = 0;
+       bp->set_mac_pending = 0;
+
+       bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
 }
 
-static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
+void bnx2x_update_coalesce(struct bnx2x *bp)
 {
-       struct attn_route attn;
        int port = BP_PORT(bp);
+       int i;
 
-       attn.sig[0] = REG_RD(bp,
-               MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
-                            port*4);
-       attn.sig[1] = REG_RD(bp,
-               MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
-                            port*4);
-       attn.sig[2] = REG_RD(bp,
-               MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
-                            port*4);
-       attn.sig[3] = REG_RD(bp,
-               MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
-                            port*4);
+       for_each_queue(bp, i) {
+               int sb_id = bp->fp[i].sb_id;
 
-       return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
-                                       attn.sig[3]);
+               /* HC_INDEX_U_ETH_RX_CQ_CONS */
+               REG_WR8(bp, BAR_CSTRORM_INTMEM +
+                       CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
+                                                     U_SB_ETH_RX_CQ_INDEX),
+                       bp->rx_ticks/(4 * BNX2X_BTR));
+               REG_WR16(bp, BAR_CSTRORM_INTMEM +
+                        CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
+                                                      U_SB_ETH_RX_CQ_INDEX),
+                        (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
+
+               /* HC_INDEX_C_ETH_TX_CQ_CONS */
+               REG_WR8(bp, BAR_CSTRORM_INTMEM +
+                       CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
+                                                     C_SB_ETH_TX_CQ_INDEX),
+                       bp->tx_ticks/(4 * BNX2X_BTR));
+               REG_WR16(bp, BAR_CSTRORM_INTMEM +
+                        CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
+                                                      C_SB_ETH_TX_CQ_INDEX),
+                        (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
+       }
 }
 
-static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+static void bnx2x_init_sp_ring(struct bnx2x *bp)
 {
-       struct attn_route attn, *group_mask;
-       int port = BP_PORT(bp);
-       int index;
-       u32 reg_addr;
-       u32 val;
-       u32 aeu_mask;
+       int func = BP_FUNC(bp);
 
-       /* need to take HW lock because MCP or other port might also
-          try to handle this event */
-       bnx2x_acquire_alr(bp);
+       spin_lock_init(&bp->spq_lock);
 
-       if (bnx2x_chk_parity_attn(bp)) {
-               bp->recovery_state = BNX2X_RECOVERY_INIT;
-               bnx2x_set_reset_in_progress(bp);
-               schedule_delayed_work(&bp->reset_task, 0);
-               /* Disable HW interrupts */
-               bnx2x_int_disable(bp);
-               bnx2x_release_alr(bp);
-               /* In case of parity errors don't handle attentions so that
-                * other function would "see" parity errors.
-                */
-               return;
-       }
-
-       attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
-       attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
-       attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
-       attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
-       DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
-          attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
-
-       for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
-               if (deasserted & (1 << index)) {
-                       group_mask = &bp->attn_group[index];
+       bp->spq_left = MAX_SPQ_PENDING;
+       bp->spq_prod_idx = 0;
+       bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
+       bp->spq_prod_bd = bp->spq;
+       bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
 
-                       DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
-                          index, group_mask->sig[0], group_mask->sig[1],
-                          group_mask->sig[2], group_mask->sig[3]);
+       REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
+              U64_LO(bp->spq_mapping));
+       REG_WR(bp,
+              XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
+              U64_HI(bp->spq_mapping));
 
-                       bnx2x_attn_int_deasserted3(bp,
-                                       attn.sig[3] & group_mask->sig[3]);
-                       bnx2x_attn_int_deasserted1(bp,
-                                       attn.sig[1] & group_mask->sig[1]);
-                       bnx2x_attn_int_deasserted2(bp,
-                                       attn.sig[2] & group_mask->sig[2]);
-                       bnx2x_attn_int_deasserted0(bp,
-                                       attn.sig[0] & group_mask->sig[0]);
-               }
-       }
+       REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
+              bp->spq_prod_idx);
+}
 
-       bnx2x_release_alr(bp);
+static void bnx2x_init_context(struct bnx2x *bp)
+{
+       int i;
 
-       reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
+       /* Rx */
+       for_each_queue(bp, i) {
+               struct eth_context *context = bnx2x_sp(bp, context[i].eth);
+               struct bnx2x_fastpath *fp = &bp->fp[i];
+               u8 cl_id = fp->cl_id;
 
-       val = ~deasserted;
-       DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
-          val, reg_addr);
-       REG_WR(bp, reg_addr, val);
+               context->ustorm_st_context.common.sb_index_numbers =
+                                               BNX2X_RX_SB_INDEX_NUM;
+               context->ustorm_st_context.common.clientId = cl_id;
+               context->ustorm_st_context.common.status_block_id = fp->sb_id;
+               context->ustorm_st_context.common.flags =
+                       (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
+                        USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
+               context->ustorm_st_context.common.statistics_counter_id =
+                                               cl_id;
+               context->ustorm_st_context.common.mc_alignment_log_size =
+                                               BNX2X_RX_ALIGN_SHIFT;
+               context->ustorm_st_context.common.bd_buff_size =
+                                               bp->rx_buf_size;
+               context->ustorm_st_context.common.bd_page_base_hi =
+                                               U64_HI(fp->rx_desc_mapping);
+               context->ustorm_st_context.common.bd_page_base_lo =
+                                               U64_LO(fp->rx_desc_mapping);
+               if (!fp->disable_tpa) {
+                       context->ustorm_st_context.common.flags |=
+                               USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
+                       context->ustorm_st_context.common.sge_buff_size =
+                               (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
+                                          0xffff);
+                       context->ustorm_st_context.common.sge_page_base_hi =
+                                               U64_HI(fp->rx_sge_mapping);
+                       context->ustorm_st_context.common.sge_page_base_lo =
+                                               U64_LO(fp->rx_sge_mapping);
 
-       if (~bp->attn_state & deasserted)
-               BNX2X_ERR("IGU ERROR\n");
+                       context->ustorm_st_context.common.max_sges_for_packet =
+                               SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
+                       context->ustorm_st_context.common.max_sges_for_packet =
+                               ((context->ustorm_st_context.common.
+                                 max_sges_for_packet + PAGES_PER_SGE - 1) &
+                                (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
+               }
 
-       reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
-                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
+               context->ustorm_ag_context.cdu_usage =
+                       CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
+                                              CDU_REGION_NUMBER_UCM_AG,
+                                              ETH_CONNECTION_TYPE);
 
-       bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
-       aeu_mask = REG_RD(bp, reg_addr);
+               context->xstorm_ag_context.cdu_reserved =
+                       CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
+                                              CDU_REGION_NUMBER_XCM_AG,
+                                              ETH_CONNECTION_TYPE);
+       }
 
-       DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
-          aeu_mask, deasserted);
-       aeu_mask |= (deasserted & 0x3ff);
-       DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+       /* Tx */
+       for_each_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
+               struct eth_context *context =
+                       bnx2x_sp(bp, context[i].eth);
 
-       REG_WR(bp, reg_addr, aeu_mask);
-       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+               context->cstorm_st_context.sb_index_number =
+                                               C_SB_ETH_TX_CQ_INDEX;
+               context->cstorm_st_context.status_block_id = fp->sb_id;
 
-       DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
-       bp->attn_state &= ~deasserted;
-       DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+               context->xstorm_st_context.tx_bd_page_base_hi =
+                                               U64_HI(fp->tx_desc_mapping);
+               context->xstorm_st_context.tx_bd_page_base_lo =
+                                               U64_LO(fp->tx_desc_mapping);
+               context->xstorm_st_context.statistics_data = (fp->cl_id |
+                               XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
+       }
 }
 
-static void bnx2x_attn_int(struct bnx2x *bp)
+static void bnx2x_init_ind_table(struct bnx2x *bp)
 {
-       /* read local copy of bits */
-       u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
-                                                               attn_bits);
-       u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
-                                                               attn_bits_ack);
-       u32 attn_state = bp->attn_state;
-
-       /* look for changed bits */
-       u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
-       u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
-
-       DP(NETIF_MSG_HW,
-          "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
-          attn_bits, attn_ack, asserted, deasserted);
-
-       if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
-               BNX2X_ERR("BAD attention state\n");
+       int func = BP_FUNC(bp);
+       int i;
 
-       /* handle bits that were raised */
-       if (asserted)
-               bnx2x_attn_int_asserted(bp, asserted);
+       if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+               return;
 
-       if (deasserted)
-               bnx2x_attn_int_deasserted(bp, deasserted);
+       DP(NETIF_MSG_IFUP,
+          "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
+       for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+               REG_WR8(bp, BAR_TSTRORM_INTMEM +
+                       TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
+                       bp->fp->cl_id + (i % bp->num_queues));
 }
 
-static void bnx2x_sp_task(struct work_struct *work)
+void bnx2x_set_client_config(struct bnx2x *bp)
 {
-       struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
-       u16 status;
+       struct tstorm_eth_client_config tstorm_client = {0};
+       int port = BP_PORT(bp);
+       int i;
 
-       /* Return here if interrupt is disabled */
-       if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-               DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
-               return;
+       tstorm_client.mtu = bp->dev->mtu;
+       tstorm_client.config_flags =
+                               (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
+                                TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
+#ifdef BCM_VLAN
+       if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
+               tstorm_client.config_flags |=
+                               TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
+               DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
        }
+#endif
 
-       status = bnx2x_update_dsb_idx(bp);
-/*     if (status == 0)                                     */
-/*             BNX2X_ERR("spurious slowpath interrupt!\n"); */
-
-       DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
-
-       /* HW attentions */
-       if (status & 0x1) {
-               bnx2x_attn_int(bp);
-               status &= ~0x1;
-       }
+       for_each_queue(bp, i) {
+               tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
 
-       /* CStorm events: STAT_QUERY */
-       if (status & 0x2) {
-               DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
-               status &= ~0x2;
+               REG_WR(bp, BAR_TSTRORM_INTMEM +
+                      TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
+                      ((u32 *)&tstorm_client)[0]);
+               REG_WR(bp, BAR_TSTRORM_INTMEM +
+                      TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
+                      ((u32 *)&tstorm_client)[1]);
        }
 
-       if (unlikely(status))
-               DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
-                  status);
-
-       bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
-                    IGU_INT_NOP, 1);
-       bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
-                    IGU_INT_NOP, 1);
-       bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
-                    IGU_INT_NOP, 1);
-       bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
-                    IGU_INT_NOP, 1);
-       bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
-                    IGU_INT_ENABLE, 1);
+       DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
+          ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
 }
 
-static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
+void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
 {
-       struct net_device *dev = dev_instance;
-       struct bnx2x *bp = netdev_priv(dev);
+       struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
+       int mode = bp->rx_mode;
+       int mask = bp->rx_mode_cl_mask;
+       int func = BP_FUNC(bp);
+       int port = BP_PORT(bp);
+       int i;
+       /* All but management unicast packets should pass to the host as well */
+       u32 llh_mask =
+               NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
+               NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
+               NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
+               NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
 
-       /* Return here if interrupt is disabled */
-       if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
-               DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
-               return IRQ_HANDLED;
-       }
+       DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
 
-       bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
+       switch (mode) {
+       case BNX2X_RX_MODE_NONE: /* no Rx */
+               tstorm_mac_filter.ucast_drop_all = mask;
+               tstorm_mac_filter.mcast_drop_all = mask;
+               tstorm_mac_filter.bcast_drop_all = mask;
+               break;
 
-#ifdef BNX2X_STOP_ON_ERROR
-       if (unlikely(bp->panic))
-               return IRQ_HANDLED;
-#endif
-
-#ifdef BCM_CNIC
-       {
-               struct cnic_ops *c_ops;
-
-               rcu_read_lock();
-               c_ops = rcu_dereference(bp->cnic_ops);
-               if (c_ops)
-                       c_ops->cnic_handler(bp->cnic_data, NULL);
-               rcu_read_unlock();
-       }
-#endif
-       queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
-
-       return IRQ_HANDLED;
-}
-
-/* end of slow path */
-
-/* Statistics */
-
-/****************************************************************************
-* Macros
-****************************************************************************/
-
-/* sum[hi:lo] += add[hi:lo] */
-#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
-       do { \
-               s_lo += a_lo; \
-               s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
-       } while (0)
-
-/* difference = minuend - subtrahend */
-#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
-       do { \
-               if (m_lo < s_lo) { \
-                       /* underflow */ \
-                       d_hi = m_hi - s_hi; \
-                       if (d_hi > 0) { \
-                               /* we can 'loan' 1 */ \
-                               d_hi--; \
-                               d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
-                       } else { \
-                               /* m_hi <= s_hi */ \
-                               d_hi = 0; \
-                               d_lo = 0; \
-                       } \
-               } else { \
-                       /* m_lo >= s_lo */ \
-                       if (m_hi < s_hi) { \
-                               d_hi = 0; \
-                               d_lo = 0; \
-                       } else { \
-                               /* m_hi >= s_hi */ \
-                               d_hi = m_hi - s_hi; \
-                               d_lo = m_lo - s_lo; \
-                       } \
-               } \
-       } while (0)
-
-#define UPDATE_STAT64(s, t) \
-       do { \
-               DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
-                       diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
-               pstats->mac_stx[0].t##_hi = new->s##_hi; \
-               pstats->mac_stx[0].t##_lo = new->s##_lo; \
-               ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
-                      pstats->mac_stx[1].t##_lo, diff.lo); \
-       } while (0)
-
-#define UPDATE_STAT64_NIG(s, t) \
-       do { \
-               DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
-                       diff.lo, new->s##_lo, old->s##_lo); \
-               ADD_64(estats->t##_hi, diff.hi, \
-                      estats->t##_lo, diff.lo); \
-       } while (0)
-
-/* sum[hi:lo] += add */
-#define ADD_EXTEND_64(s_hi, s_lo, a) \
-       do { \
-               s_lo += a; \
-               s_hi += (s_lo < a) ? 1 : 0; \
-       } while (0)
-
-#define UPDATE_EXTEND_STAT(s) \
-       do { \
-               ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
-                             pstats->mac_stx[1].s##_lo, \
-                             new->s); \
-       } while (0)
+       case BNX2X_RX_MODE_NORMAL:
+               tstorm_mac_filter.bcast_accept_all = mask;
+               break;
 
-#define UPDATE_EXTEND_TSTAT(s, t) \
-       do { \
-               diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
-               old_tclient->s = tclient->s; \
-               ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
-       } while (0)
+       case BNX2X_RX_MODE_ALLMULTI:
+               tstorm_mac_filter.mcast_accept_all = mask;
+               tstorm_mac_filter.bcast_accept_all = mask;
+               break;
 
-#define UPDATE_EXTEND_USTAT(s, t) \
-       do { \
-               diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
-               old_uclient->s = uclient->s; \
-               ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
-       } while (0)
+       case BNX2X_RX_MODE_PROMISC:
+               tstorm_mac_filter.ucast_accept_all = mask;
+               tstorm_mac_filter.mcast_accept_all = mask;
+               tstorm_mac_filter.bcast_accept_all = mask;
+               /* pass management unicast packets as well */
+               llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
+               break;
 
-#define UPDATE_EXTEND_XSTAT(s, t) \
-       do { \
-               diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
-               old_xclient->s = xclient->s; \
-               ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
-       } while (0)
+       default:
+               BNX2X_ERR("BAD rx mode (%d)\n", mode);
+               break;
+       }
 
-/* minuend -= subtrahend */
-#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
-       do { \
-               DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
-       } while (0)
+       REG_WR(bp,
+              (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
+              llh_mask);
 
-/* minuend[hi:lo] -= subtrahend */
-#define SUB_EXTEND_64(m_hi, m_lo, s) \
-       do { \
-               SUB_64(m_hi, 0, m_lo, s); \
-       } while (0)
+       for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
+               REG_WR(bp, BAR_TSTRORM_INTMEM +
+                      TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
+                      ((u32 *)&tstorm_mac_filter)[i]);
 
-#define SUB_EXTEND_USTAT(s, t) \
-       do { \
-               diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
-               SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
-       } while (0)
+/*             DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
+                  ((u32 *)&tstorm_mac_filter)[i]); */
+       }
 
-/*
- * General service functions
- */
+       if (mode != BNX2X_RX_MODE_NONE)
+               bnx2x_set_client_config(bp);
+}
 
-static inline long bnx2x_hilo(u32 *hiref)
+static void bnx2x_init_internal_common(struct bnx2x *bp)
 {
-       u32 lo = *(hiref + 1);
-#if (BITS_PER_LONG == 64)
-       u32 hi = *hiref;
+       int i;
 
-       return HILO_U64(hi, lo);
-#else
-       return lo;
-#endif
+       /* Zero this manually as its initialization is
+          currently missing in the initTool */
+       for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
+               REG_WR(bp, BAR_USTRORM_INTMEM +
+                      USTORM_AGG_DATA_OFFSET + i * 4, 0);
 }
 
-/*
- * Init service functions
- */
-
-static void bnx2x_storm_stats_post(struct bnx2x *bp)
+static void bnx2x_init_internal_port(struct bnx2x *bp)
 {
-       if (!bp->stats_pending) {
-               struct eth_query_ramrod_data ramrod_data = {0};
-               int i, rc;
+       int port = BP_PORT(bp);
 
-               ramrod_data.drv_counter = bp->stats_counter++;
-               ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
-               for_each_queue(bp, i)
-                       ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
-
-               rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
-                                  ((u32 *)&ramrod_data)[1],
-                                  ((u32 *)&ramrod_data)[0], 0);
-               if (rc == 0) {
-                       /* stats ramrod has it's own slot on the spq */
-                       bp->spq_left++;
-                       bp->stats_pending = 1;
-               }
-       }
+       REG_WR(bp,
+              BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
+       REG_WR(bp,
+              BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
+       REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
+       REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
 }
 
-static void bnx2x_hw_stats_post(struct bnx2x *bp)
+static void bnx2x_init_internal_func(struct bnx2x *bp)
 {
-       struct dmae_command *dmae = &bp->stats_dmae;
-       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-
-       *stats_comp = DMAE_COMP_VAL;
-       if (CHIP_REV_IS_SLOW(bp))
-               return;
-
-       /* loader */
-       if (bp->executer_idx) {
-               int loader_idx = PMF_DMAE_C(bp);
-
-               memset(dmae, 0, sizeof(struct dmae_command));
-
-               dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
-                               DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
-                               DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-                               DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-                               DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-                               (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
-                                              DMAE_CMD_PORT_0) |
-                               (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
-               dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
-               dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
-               dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
-                                    sizeof(struct dmae_command) *
-                                    (loader_idx + 1)) >> 2;
-               dmae->dst_addr_hi = 0;
-               dmae->len = sizeof(struct dmae_command) >> 2;
-               if (CHIP_IS_E1(bp))
-                       dmae->len--;
-               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
-               dmae->comp_addr_hi = 0;
-               dmae->comp_val = 1;
+       struct tstorm_eth_function_common_config tstorm_config = {0};
+       struct stats_indication_flags stats_flags = {0};
+       int port = BP_PORT(bp);
+       int func = BP_FUNC(bp);
+       int i, j;
+       u32 offset;
+       u16 max_agg_size;
 
-               *stats_comp = 0;
-               bnx2x_post_dmae(bp, dmae, loader_idx);
+       tstorm_config.config_flags = RSS_FLAGS(bp);
 
-       } else if (bp->func_stx) {
-               *stats_comp = 0;
-               bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
-       }
-}
+       if (is_multi(bp))
+               tstorm_config.rss_result_mask = MULTI_MASK;
 
-static int bnx2x_stats_comp(struct bnx2x *bp)
-{
-       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-       int cnt = 10;
+       /* Enable TPA if needed */
+       if (bp->flags & TPA_ENABLE_FLAG)
+               tstorm_config.config_flags |=
+                       TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
 
-       might_sleep();
-       while (*stats_comp != DMAE_COMP_VAL) {
-               if (!cnt) {
-                       BNX2X_ERR("timeout waiting for stats finished\n");
-                       break;
-               }
-               cnt--;
-               msleep(1);
-       }
-       return 1;
-}
+       if (IS_E1HMF(bp))
+               tstorm_config.config_flags |=
+                               TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
 
-/*
- * Statistics service functions
- */
+       tstorm_config.leading_client_id = BP_L_ID(bp);
 
-static void bnx2x_stats_pmf_update(struct bnx2x *bp)
-{
-       struct dmae_command *dmae;
-       u32 opcode;
-       int loader_idx = PMF_DMAE_C(bp);
-       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+       REG_WR(bp, BAR_TSTRORM_INTMEM +
+              TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
+              (*(u32 *)&tstorm_config));
 
-       /* sanity */
-       if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
-               BNX2X_ERR("BUG!\n");
-               return;
-       }
+       bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
+       bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
+       bnx2x_set_storm_rx_mode(bp);
 
-       bp->executer_idx = 0;
+       for_each_queue(bp, i) {
+               u8 cl_id = bp->fp[i].cl_id;
 
-       opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
-                 DMAE_CMD_C_ENABLE |
-                 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-                 DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
-
-       dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-       dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
-       dmae->src_addr_lo = bp->port.port_stx >> 2;
-       dmae->src_addr_hi = 0;
-       dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
-       dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
-       dmae->len = DMAE_LEN32_RD_MAX;
-       dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-       dmae->comp_addr_hi = 0;
-       dmae->comp_val = 1;
-
-       dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-       dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
-       dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
-       dmae->src_addr_hi = 0;
-       dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
-                                  DMAE_LEN32_RD_MAX * 4);
-       dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
-                                  DMAE_LEN32_RD_MAX * 4);
-       dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
-       dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
-       dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
-       dmae->comp_val = DMAE_COMP_VAL;
-
-       *stats_comp = 0;
-       bnx2x_hw_stats_post(bp);
-       bnx2x_stats_comp(bp);
-}
-
-static void bnx2x_port_stats_init(struct bnx2x *bp)
-{
-       struct dmae_command *dmae;
-       int port = BP_PORT(bp);
-       int vn = BP_E1HVN(bp);
-       u32 opcode;
-       int loader_idx = PMF_DMAE_C(bp);
-       u32 mac_addr;
-       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-
-       /* sanity */
-       if (!bp->link_vars.link_up || !bp->port.pmf) {
-               BNX2X_ERR("BUG!\n");
-               return;
-       }
+               /* reset xstorm per client statistics */
+               offset = BAR_XSTRORM_INTMEM +
+                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
+               for (j = 0;
+                    j < sizeof(struct xstorm_per_client_stats) / 4; j++)
+                       REG_WR(bp, offset + j*4, 0);
 
-       bp->executer_idx = 0;
+               /* reset tstorm per client statistics */
+               offset = BAR_TSTRORM_INTMEM +
+                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
+               for (j = 0;
+                    j < sizeof(struct tstorm_per_client_stats) / 4; j++)
+                       REG_WR(bp, offset + j*4, 0);
 
-       /* MCP */
-       opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
-                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
-                 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-                 DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-                 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-                 (vn << DMAE_CMD_E1HVN_SHIFT));
-
-       if (bp->port.port_stx) {
-
-               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-               dmae->opcode = opcode;
-               dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
-               dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
-               dmae->dst_addr_lo = bp->port.port_stx >> 2;
-               dmae->dst_addr_hi = 0;
-               dmae->len = sizeof(struct host_port_stats) >> 2;
-               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-               dmae->comp_addr_hi = 0;
-               dmae->comp_val = 1;
-       }
-
-       if (bp->func_stx) {
-
-               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-               dmae->opcode = opcode;
-               dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
-               dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
-               dmae->dst_addr_lo = bp->func_stx >> 2;
-               dmae->dst_addr_hi = 0;
-               dmae->len = sizeof(struct host_func_stats) >> 2;
-               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-               dmae->comp_addr_hi = 0;
-               dmae->comp_val = 1;
-       }
-
-       /* MAC */
-       opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
-                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
-                 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-                 DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-                 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-                 (vn << DMAE_CMD_E1HVN_SHIFT));
-
-       if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
-
-               mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
-                                  NIG_REG_INGRESS_BMAC0_MEM);
-
-               /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
-                  BIGMAC_REGISTER_TX_STAT_GTBYT */
-               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-               dmae->opcode = opcode;
-               dmae->src_addr_lo = (mac_addr +
-                                    BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
-               dmae->src_addr_hi = 0;
-               dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
-               dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
-               dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
-                            BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
-               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-               dmae->comp_addr_hi = 0;
-               dmae->comp_val = 1;
-
-               /* BIGMAC_REGISTER_RX_STAT_GR64 ..
-                  BIGMAC_REGISTER_RX_STAT_GRIPJ */
-               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-               dmae->opcode = opcode;
-               dmae->src_addr_lo = (mac_addr +
-                                    BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
-               dmae->src_addr_hi = 0;
-               dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
-                               offsetof(struct bmac_stats, rx_stat_gr64_lo));
-               dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
-                               offsetof(struct bmac_stats, rx_stat_gr64_lo));
-               dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
-                            BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
-               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-               dmae->comp_addr_hi = 0;
-               dmae->comp_val = 1;
-
-       } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
-
-               mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
-
-               /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
-               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-               dmae->opcode = opcode;
-               dmae->src_addr_lo = (mac_addr +
-                                    EMAC_REG_EMAC_RX_STAT_AC) >> 2;
-               dmae->src_addr_hi = 0;
-               dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
-               dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
-               dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
-               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-               dmae->comp_addr_hi = 0;
-               dmae->comp_val = 1;
-
-               /* EMAC_REG_EMAC_RX_STAT_AC_28 */
-               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-               dmae->opcode = opcode;
-               dmae->src_addr_lo = (mac_addr +
-                                    EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
-               dmae->src_addr_hi = 0;
-               dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
-                    offsetof(struct emac_stats, rx_stat_falsecarriererrors));
-               dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
-                    offsetof(struct emac_stats, rx_stat_falsecarriererrors));
-               dmae->len = 1;
-               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-               dmae->comp_addr_hi = 0;
-               dmae->comp_val = 1;
-
-               /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
-               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-               dmae->opcode = opcode;
-               dmae->src_addr_lo = (mac_addr +
-                                    EMAC_REG_EMAC_TX_STAT_AC) >> 2;
-               dmae->src_addr_hi = 0;
-               dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
-                       offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
-               dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
-                       offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
-               dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
-               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-               dmae->comp_addr_hi = 0;
-               dmae->comp_val = 1;
-       }
-
-       /* NIG */
-       dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-       dmae->opcode = opcode;
-       dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
-                                   NIG_REG_STAT0_BRB_DISCARD) >> 2;
-       dmae->src_addr_hi = 0;
-       dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
-       dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
-       dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
-       dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-       dmae->comp_addr_hi = 0;
-       dmae->comp_val = 1;
-
-       dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-       dmae->opcode = opcode;
-       dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
-                                   NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
-       dmae->src_addr_hi = 0;
-       dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
-                       offsetof(struct nig_stats, egress_mac_pkt0_lo));
-       dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
-                       offsetof(struct nig_stats, egress_mac_pkt0_lo));
-       dmae->len = (2*sizeof(u32)) >> 2;
-       dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-       dmae->comp_addr_hi = 0;
-       dmae->comp_val = 1;
-
-       dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-       dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
-                       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
-                       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-                       DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-                       DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-                       (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-                       (vn << DMAE_CMD_E1HVN_SHIFT));
-       dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
-                                   NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
-       dmae->src_addr_hi = 0;
-       dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
-                       offsetof(struct nig_stats, egress_mac_pkt1_lo));
-       dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
-                       offsetof(struct nig_stats, egress_mac_pkt1_lo));
-       dmae->len = (2*sizeof(u32)) >> 2;
-       dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
-       dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
-       dmae->comp_val = DMAE_COMP_VAL;
-
-       *stats_comp = 0;
-}
-
-static void bnx2x_func_stats_init(struct bnx2x *bp)
-{
-       struct dmae_command *dmae = &bp->stats_dmae;
-       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-
-       /* sanity */
-       if (!bp->func_stx) {
-               BNX2X_ERR("BUG!\n");
-               return;
+               /* reset ustorm per client statistics */
+               offset = BAR_USTRORM_INTMEM +
+                        USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
+               for (j = 0;
+                    j < sizeof(struct ustorm_per_client_stats) / 4; j++)
+                       REG_WR(bp, offset + j*4, 0);
        }
 
-       bp->executer_idx = 0;
-       memset(dmae, 0, sizeof(struct dmae_command));
+       /* Init statistics related context */
+       stats_flags.collect_eth = 1;
 
-       dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
-                       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
-                       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-                       DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-                       DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-                       (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-                       (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
-       dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
-       dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
-       dmae->dst_addr_lo = bp->func_stx >> 2;
-       dmae->dst_addr_hi = 0;
-       dmae->len = sizeof(struct host_func_stats) >> 2;
-       dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
-       dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
-       dmae->comp_val = DMAE_COMP_VAL;
+       REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
+              ((u32 *)&stats_flags)[0]);
+       REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
+              ((u32 *)&stats_flags)[1]);
 
-       *stats_comp = 0;
-}
+       REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
+              ((u32 *)&stats_flags)[0]);
+       REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
+              ((u32 *)&stats_flags)[1]);
 
-static void bnx2x_stats_start(struct bnx2x *bp)
-{
-       if (bp->port.pmf)
-               bnx2x_port_stats_init(bp);
-
-       else if (bp->func_stx)
-               bnx2x_func_stats_init(bp);
-
-       bnx2x_hw_stats_post(bp);
-       bnx2x_storm_stats_post(bp);
-}
-
-static void bnx2x_stats_pmf_start(struct bnx2x *bp)
-{
-       bnx2x_stats_comp(bp);
-       bnx2x_stats_pmf_update(bp);
-       bnx2x_stats_start(bp);
-}
-
-static void bnx2x_stats_restart(struct bnx2x *bp)
-{
-       bnx2x_stats_comp(bp);
-       bnx2x_stats_start(bp);
-}
-
-static void bnx2x_bmac_stats_update(struct bnx2x *bp)
-{
-       struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
-       struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
-       struct bnx2x_eth_stats *estats = &bp->eth_stats;
-       struct {
-               u32 lo;
-               u32 hi;
-       } diff;
-
-       UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
-       UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
-       UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
-       UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
-       UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
-       UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
-       UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
-       UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
-       UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
-       UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
-       UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
-       UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
-       UPDATE_STAT64(tx_stat_gt127,
-                               tx_stat_etherstatspkts65octetsto127octets);
-       UPDATE_STAT64(tx_stat_gt255,
-                               tx_stat_etherstatspkts128octetsto255octets);
-       UPDATE_STAT64(tx_stat_gt511,
-                               tx_stat_etherstatspkts256octetsto511octets);
-       UPDATE_STAT64(tx_stat_gt1023,
-                               tx_stat_etherstatspkts512octetsto1023octets);
-       UPDATE_STAT64(tx_stat_gt1518,
-                               tx_stat_etherstatspkts1024octetsto1522octets);
-       UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
-       UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
-       UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
-       UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
-       UPDATE_STAT64(tx_stat_gterr,
-                               tx_stat_dot3statsinternalmactransmiterrors);
-       UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
-
-       estats->pause_frames_received_hi =
-                               pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
-       estats->pause_frames_received_lo =
-                               pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
-
-       estats->pause_frames_sent_hi =
-                               pstats->mac_stx[1].tx_stat_outxoffsent_hi;
-       estats->pause_frames_sent_lo =
-                               pstats->mac_stx[1].tx_stat_outxoffsent_lo;
-}
-
-static void bnx2x_emac_stats_update(struct bnx2x *bp)
-{
-       struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
-       struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
-       struct bnx2x_eth_stats *estats = &bp->eth_stats;
-
-       UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
-       UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
-       UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
-       UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
-       UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
-       UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
-       UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
-       UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
-       UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
-       UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
-       UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
-       UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
-       UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
-       UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
-       UPDATE_EXTEND_STAT(tx_stat_outxonsent);
-       UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
-       UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
-       UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
-       UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
-       UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
-       UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
-       UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
-       UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
-       UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
-       UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
-       UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
-       UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
-       UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
-       UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
-       UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
-       UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
-
-       estats->pause_frames_received_hi =
-                       pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
-       estats->pause_frames_received_lo =
-                       pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
-       ADD_64(estats->pause_frames_received_hi,
-              pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
-              estats->pause_frames_received_lo,
-              pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
-
-       estats->pause_frames_sent_hi =
-                       pstats->mac_stx[1].tx_stat_outxonsent_hi;
-       estats->pause_frames_sent_lo =
-                       pstats->mac_stx[1].tx_stat_outxonsent_lo;
-       ADD_64(estats->pause_frames_sent_hi,
-              pstats->mac_stx[1].tx_stat_outxoffsent_hi,
-              estats->pause_frames_sent_lo,
-              pstats->mac_stx[1].tx_stat_outxoffsent_lo);
-}
-
-static int bnx2x_hw_stats_update(struct bnx2x *bp)
-{
-       struct nig_stats *new = bnx2x_sp(bp, nig_stats);
-       struct nig_stats *old = &(bp->port.old_nig_stats);
-       struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
-       struct bnx2x_eth_stats *estats = &bp->eth_stats;
-       struct {
-               u32 lo;
-               u32 hi;
-       } diff;
-
-       if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
-               bnx2x_bmac_stats_update(bp);
-
-       else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
-               bnx2x_emac_stats_update(bp);
-
-       else { /* unreached */
-               BNX2X_ERR("stats updated by DMAE but no MAC active\n");
-               return -1;
-       }
+       REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
+              ((u32 *)&stats_flags)[0]);
+       REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
+              ((u32 *)&stats_flags)[1]);
 
-       ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
-                     new->brb_discard - old->brb_discard);
-       ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
-                     new->brb_truncate - old->brb_truncate);
+       REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
+              ((u32 *)&stats_flags)[0]);
+       REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
+              ((u32 *)&stats_flags)[1]);
 
-       UPDATE_STAT64_NIG(egress_mac_pkt0,
-                                       etherstatspkts1024octetsto1522octets);
-       UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
+       REG_WR(bp, BAR_XSTRORM_INTMEM +
+              XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
+              U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+       REG_WR(bp, BAR_XSTRORM_INTMEM +
+              XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
+              U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
 
-       memcpy(old, new, sizeof(struct nig_stats));
+       REG_WR(bp, BAR_TSTRORM_INTMEM +
+              TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
+              U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+       REG_WR(bp, BAR_TSTRORM_INTMEM +
+              TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
+              U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
 
-       memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
-              sizeof(struct mac_stx));
-       estats->brb_drop_hi = pstats->brb_drop_hi;
-       estats->brb_drop_lo = pstats->brb_drop_lo;
+       REG_WR(bp, BAR_USTRORM_INTMEM +
+              USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
+              U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
+       REG_WR(bp, BAR_USTRORM_INTMEM +
+              USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
+              U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
 
-       pstats->host_port_stats_start = ++pstats->host_port_stats_end;
+       if (CHIP_IS_E1H(bp)) {
+               REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
+                       IS_E1HMF(bp));
+               REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
+                       IS_E1HMF(bp));
+               REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
+                       IS_E1HMF(bp));
+               REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
+                       IS_E1HMF(bp));
 
-       if (!BP_NOMCP(bp)) {
-               u32 nig_timer_max =
-                       SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
-               if (nig_timer_max != estats->nig_timer_max) {
-                       estats->nig_timer_max = nig_timer_max;
-                       BNX2X_ERR("NIG timer max (%u)\n",
-                                 estats->nig_timer_max);
-               }
+               REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
+                        bp->e1hov);
        }
 
-       return 0;
-}
-
-static int bnx2x_storm_stats_update(struct bnx2x *bp)
-{
-       struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
-       struct tstorm_per_port_stats *tport =
-                                       &stats->tstorm_common.port_statistics;
-       struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
-       struct bnx2x_eth_stats *estats = &bp->eth_stats;
-       int i;
-
-       memcpy(&(fstats->total_bytes_received_hi),
-              &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
-              sizeof(struct host_func_stats) - 2*sizeof(u32));
-       estats->error_bytes_received_hi = 0;
-       estats->error_bytes_received_lo = 0;
-       estats->etherstatsoverrsizepkts_hi = 0;
-       estats->etherstatsoverrsizepkts_lo = 0;
-       estats->no_buff_discard_hi = 0;
-       estats->no_buff_discard_lo = 0;
-
+       /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
+       max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
+                                  SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
        for_each_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
-               int cl_id = fp->cl_id;
-               struct tstorm_per_client_stats *tclient =
-                               &stats->tstorm_common.client_statistics[cl_id];
-               struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
-               struct ustorm_per_client_stats *uclient =
-                               &stats->ustorm_common.client_statistics[cl_id];
-               struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
-               struct xstorm_per_client_stats *xclient =
-                               &stats->xstorm_common.client_statistics[cl_id];
-               struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
-               struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
-               u32 diff;
-
-               /* are storm stats valid? */
-               if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
-                                                       bp->stats_counter) {
-                       DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
-                          "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
-                          i, xclient->stats_counter, bp->stats_counter);
-                       return -1;
-               }
-               if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
-                                                       bp->stats_counter) {
-                       DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
-                          "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
-                          i, tclient->stats_counter, bp->stats_counter);
-                       return -2;
-               }
-               if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
-                                                       bp->stats_counter) {
-                       DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
-                          "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
-                          i, uclient->stats_counter, bp->stats_counter);
-                       return -4;
-               }
 
-               qstats->total_bytes_received_hi =
-                       le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
-               qstats->total_bytes_received_lo =
-                       le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
-
-               ADD_64(qstats->total_bytes_received_hi,
-                      le32_to_cpu(tclient->rcv_multicast_bytes.hi),
-                      qstats->total_bytes_received_lo,
-                      le32_to_cpu(tclient->rcv_multicast_bytes.lo));
-
-               ADD_64(qstats->total_bytes_received_hi,
-                      le32_to_cpu(tclient->rcv_unicast_bytes.hi),
-                      qstats->total_bytes_received_lo,
-                      le32_to_cpu(tclient->rcv_unicast_bytes.lo));
-
-               SUB_64(qstats->total_bytes_received_hi,
-                      le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
-                      qstats->total_bytes_received_lo,
-                      le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
-
-               SUB_64(qstats->total_bytes_received_hi,
-                      le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
-                      qstats->total_bytes_received_lo,
-                      le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
-
-               SUB_64(qstats->total_bytes_received_hi,
-                      le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
-                      qstats->total_bytes_received_lo,
-                      le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
-
-               qstats->valid_bytes_received_hi =
-                                       qstats->total_bytes_received_hi;
-               qstats->valid_bytes_received_lo =
-                                       qstats->total_bytes_received_lo;
-
-               qstats->error_bytes_received_hi =
-                               le32_to_cpu(tclient->rcv_error_bytes.hi);
-               qstats->error_bytes_received_lo =
-                               le32_to_cpu(tclient->rcv_error_bytes.lo);
-
-               ADD_64(qstats->total_bytes_received_hi,
-                      qstats->error_bytes_received_hi,
-                      qstats->total_bytes_received_lo,
-                      qstats->error_bytes_received_lo);
-
-               UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
-                                       total_unicast_packets_received);
-               UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
-                                       total_multicast_packets_received);
-               UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
-                                       total_broadcast_packets_received);
-               UPDATE_EXTEND_TSTAT(packets_too_big_discard,
-                                       etherstatsoverrsizepkts);
-               UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
-
-               SUB_EXTEND_USTAT(ucast_no_buff_pkts,
-                                       total_unicast_packets_received);
-               SUB_EXTEND_USTAT(mcast_no_buff_pkts,
-                                       total_multicast_packets_received);
-               SUB_EXTEND_USTAT(bcast_no_buff_pkts,
-                                       total_broadcast_packets_received);
-               UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
-               UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
-               UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
-
-               qstats->total_bytes_transmitted_hi =
-                               le32_to_cpu(xclient->unicast_bytes_sent.hi);
-               qstats->total_bytes_transmitted_lo =
-                               le32_to_cpu(xclient->unicast_bytes_sent.lo);
-
-               ADD_64(qstats->total_bytes_transmitted_hi,
-                      le32_to_cpu(xclient->multicast_bytes_sent.hi),
-                      qstats->total_bytes_transmitted_lo,
-                      le32_to_cpu(xclient->multicast_bytes_sent.lo));
-
-               ADD_64(qstats->total_bytes_transmitted_hi,
-                      le32_to_cpu(xclient->broadcast_bytes_sent.hi),
-                      qstats->total_bytes_transmitted_lo,
-                      le32_to_cpu(xclient->broadcast_bytes_sent.lo));
-
-               UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
-                                       total_unicast_packets_transmitted);
-               UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
-                                       total_multicast_packets_transmitted);
-               UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
-                                       total_broadcast_packets_transmitted);
-
-               old_tclient->checksum_discard = tclient->checksum_discard;
-               old_tclient->ttl0_discard = tclient->ttl0_discard;
-
-               ADD_64(fstats->total_bytes_received_hi,
-                      qstats->total_bytes_received_hi,
-                      fstats->total_bytes_received_lo,
-                      qstats->total_bytes_received_lo);
-               ADD_64(fstats->total_bytes_transmitted_hi,
-                      qstats->total_bytes_transmitted_hi,
-                      fstats->total_bytes_transmitted_lo,
-                      qstats->total_bytes_transmitted_lo);
-               ADD_64(fstats->total_unicast_packets_received_hi,
-                      qstats->total_unicast_packets_received_hi,
-                      fstats->total_unicast_packets_received_lo,
-                      qstats->total_unicast_packets_received_lo);
-               ADD_64(fstats->total_multicast_packets_received_hi,
-                      qstats->total_multicast_packets_received_hi,
-                      fstats->total_multicast_packets_received_lo,
-                      qstats->total_multicast_packets_received_lo);
-               ADD_64(fstats->total_broadcast_packets_received_hi,
-                      qstats->total_broadcast_packets_received_hi,
-                      fstats->total_broadcast_packets_received_lo,
-                      qstats->total_broadcast_packets_received_lo);
-               ADD_64(fstats->total_unicast_packets_transmitted_hi,
-                      qstats->total_unicast_packets_transmitted_hi,
-                      fstats->total_unicast_packets_transmitted_lo,
-                      qstats->total_unicast_packets_transmitted_lo);
-               ADD_64(fstats->total_multicast_packets_transmitted_hi,
-                      qstats->total_multicast_packets_transmitted_hi,
-                      fstats->total_multicast_packets_transmitted_lo,
-                      qstats->total_multicast_packets_transmitted_lo);
-               ADD_64(fstats->total_broadcast_packets_transmitted_hi,
-                      qstats->total_broadcast_packets_transmitted_hi,
-                      fstats->total_broadcast_packets_transmitted_lo,
-                      qstats->total_broadcast_packets_transmitted_lo);
-               ADD_64(fstats->valid_bytes_received_hi,
-                      qstats->valid_bytes_received_hi,
-                      fstats->valid_bytes_received_lo,
-                      qstats->valid_bytes_received_lo);
-
-               ADD_64(estats->error_bytes_received_hi,
-                      qstats->error_bytes_received_hi,
-                      estats->error_bytes_received_lo,
-                      qstats->error_bytes_received_lo);
-               ADD_64(estats->etherstatsoverrsizepkts_hi,
-                      qstats->etherstatsoverrsizepkts_hi,
-                      estats->etherstatsoverrsizepkts_lo,
-                      qstats->etherstatsoverrsizepkts_lo);
-               ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
-                      estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
-       }
-
-       ADD_64(fstats->total_bytes_received_hi,
-              estats->rx_stat_ifhcinbadoctets_hi,
-              fstats->total_bytes_received_lo,
-              estats->rx_stat_ifhcinbadoctets_lo);
-
-       memcpy(estats, &(fstats->total_bytes_received_hi),
-              sizeof(struct host_func_stats) - 2*sizeof(u32));
-
-       ADD_64(estats->etherstatsoverrsizepkts_hi,
-              estats->rx_stat_dot3statsframestoolong_hi,
-              estats->etherstatsoverrsizepkts_lo,
-              estats->rx_stat_dot3statsframestoolong_lo);
-       ADD_64(estats->error_bytes_received_hi,
-              estats->rx_stat_ifhcinbadoctets_hi,
-              estats->error_bytes_received_lo,
-              estats->rx_stat_ifhcinbadoctets_lo);
-
-       if (bp->port.pmf) {
-               estats->mac_filter_discard =
-                               le32_to_cpu(tport->mac_filter_discard);
-               estats->xxoverflow_discard =
-                               le32_to_cpu(tport->xxoverflow_discard);
-               estats->brb_truncate_discard =
-                               le32_to_cpu(tport->brb_truncate_discard);
-               estats->mac_discard = le32_to_cpu(tport->mac_discard);
-       }
+               REG_WR(bp, BAR_USTRORM_INTMEM +
+                      USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
+                      U64_LO(fp->rx_comp_mapping));
+               REG_WR(bp, BAR_USTRORM_INTMEM +
+                      USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
+                      U64_HI(fp->rx_comp_mapping));
 
-       fstats->host_func_stats_start = ++fstats->host_func_stats_end;
+               /* Next page */
+               REG_WR(bp, BAR_USTRORM_INTMEM +
+                      USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
+                      U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
+               REG_WR(bp, BAR_USTRORM_INTMEM +
+                      USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
+                      U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
 
-       bp->stats_pending = 0;
+               REG_WR16(bp, BAR_USTRORM_INTMEM +
+                        USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
+                        max_agg_size);
+       }
 
-       return 0;
-}
+       /* dropless flow control */
+       if (CHIP_IS_E1H(bp)) {
+               struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
 
-static void bnx2x_net_stats_update(struct bnx2x *bp)
-{
-       struct bnx2x_eth_stats *estats = &bp->eth_stats;
-       struct net_device_stats *nstats = &bp->dev->stats;
-       int i;
+               rx_pause.bd_thr_low = 250;
+               rx_pause.cqe_thr_low = 250;
+               rx_pause.cos = 1;
+               rx_pause.sge_thr_low = 0;
+               rx_pause.bd_thr_high = 350;
+               rx_pause.cqe_thr_high = 350;
+               rx_pause.sge_thr_high = 0;
 
-       nstats->rx_packets =
-               bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
-               bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
-               bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
+               for_each_queue(bp, i) {
+                       struct bnx2x_fastpath *fp = &bp->fp[i];
 
-       nstats->tx_packets =
-               bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
-               bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
-               bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
+                       if (!fp->disable_tpa) {
+                               rx_pause.sge_thr_low = 150;
+                               rx_pause.sge_thr_high = 250;
+                       }
 
-       nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
 
-       nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
+                       offset = BAR_USTRORM_INTMEM +
+                                USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
+                                                                  fp->cl_id);
+                       for (j = 0;
+                            j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
+                            j++)
+                               REG_WR(bp, offset + j*4,
+                                      ((u32 *)&rx_pause)[j]);
+               }
+       }
 
-       nstats->rx_dropped = estats->mac_discard;
-       for_each_queue(bp, i)
-               nstats->rx_dropped +=
-                       le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
-
-       nstats->tx_dropped = 0;
-
-       nstats->multicast =
-               bnx2x_hilo(&estats->total_multicast_packets_received_hi);
-
-       nstats->collisions =
-               bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
-
-       nstats->rx_length_errors =
-               bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
-               bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
-       nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
-                                bnx2x_hilo(&estats->brb_truncate_hi);
-       nstats->rx_crc_errors =
-               bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
-       nstats->rx_frame_errors =
-               bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
-       nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
-       nstats->rx_missed_errors = estats->xxoverflow_discard;
-
-       nstats->rx_errors = nstats->rx_length_errors +
-                           nstats->rx_over_errors +
-                           nstats->rx_crc_errors +
-                           nstats->rx_frame_errors +
-                           nstats->rx_fifo_errors +
-                           nstats->rx_missed_errors;
-
-       nstats->tx_aborted_errors =
-               bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
-               bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
-       nstats->tx_carrier_errors =
-               bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
-       nstats->tx_fifo_errors = 0;
-       nstats->tx_heartbeat_errors = 0;
-       nstats->tx_window_errors = 0;
-
-       nstats->tx_errors = nstats->tx_aborted_errors +
-                           nstats->tx_carrier_errors +
-           bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
-}
-
-static void bnx2x_drv_stats_update(struct bnx2x *bp)
-{
-       struct bnx2x_eth_stats *estats = &bp->eth_stats;
-       int i;
+       memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
 
-       estats->driver_xoff = 0;
-       estats->rx_err_discard_pkt = 0;
-       estats->rx_skb_alloc_failed = 0;
-       estats->hw_csum_err = 0;
-       for_each_queue(bp, i) {
-               struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
+       /* Init rate shaping and fairness contexts */
+       if (IS_E1HMF(bp)) {
+               int vn;
 
-               estats->driver_xoff += qstats->driver_xoff;
-               estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
-               estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
-               estats->hw_csum_err += qstats->hw_csum_err;
-       }
-}
+               /* During init there is no active link
+                  Until link is up, set link rate to 10Gbps */
+               bp->link_vars.line_speed = SPEED_10000;
+               bnx2x_init_port_minmax(bp);
 
-static void bnx2x_stats_update(struct bnx2x *bp)
-{
-       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+               if (!BP_NOMCP(bp))
+                       bp->mf_config =
+                             SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
+               bnx2x_calc_vn_weight_sum(bp);
 
-       if (*stats_comp != DMAE_COMP_VAL)
-               return;
+               for (vn = VN_0; vn < E1HVN_MAX; vn++)
+                       bnx2x_init_vn_minmax(bp, 2*vn + port);
 
-       if (bp->port.pmf)
-               bnx2x_hw_stats_update(bp);
+               /* Enable rate shaping and fairness */
+               bp->cmng.flags.cmng_enables |=
+                                       CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
 
-       if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
-               BNX2X_ERR("storm stats were not updated for 3 times\n");
-               bnx2x_panic();
-               return;
+       } else {
+               /* rate shaping and fairness are disabled */
+               DP(NETIF_MSG_IFUP,
+                  "single function mode  minmax will be disabled\n");
        }
 
-       bnx2x_net_stats_update(bp);
-       bnx2x_drv_stats_update(bp);
 
-       if (netif_msg_timer(bp)) {
-               struct bnx2x_eth_stats *estats = &bp->eth_stats;
-               int i;
+       /* Store cmng structures to internal memory */
+       if (bp->port.pmf)
+               for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
+                       REG_WR(bp, BAR_XSTRORM_INTMEM +
+                              XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
+                              ((u32 *)(&bp->cmng))[i]);
+}
 
-               printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
-                      bp->dev->name,
-                      estats->brb_drop_lo, estats->brb_truncate_lo);
+static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
+{
+       switch (load_code) {
+       case FW_MSG_CODE_DRV_LOAD_COMMON:
+               bnx2x_init_internal_common(bp);
+               /* no break */
 
-               for_each_queue(bp, i) {
-                       struct bnx2x_fastpath *fp = &bp->fp[i];
-                       struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
-
-                       printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
-                                         "  rx pkt(%lu)  rx calls(%lu %lu)\n",
-                              fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
-                              fp->rx_comp_cons),
-                              le16_to_cpu(*fp->rx_cons_sb),
-                              bnx2x_hilo(&qstats->
-                                         total_unicast_packets_received_hi),
-                              fp->rx_calls, fp->rx_pkt);
-               }
+       case FW_MSG_CODE_DRV_LOAD_PORT:
+               bnx2x_init_internal_port(bp);
+               /* no break */
 
-               for_each_queue(bp, i) {
-                       struct bnx2x_fastpath *fp = &bp->fp[i];
-                       struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
-                       struct netdev_queue *txq =
-                               netdev_get_tx_queue(bp->dev, i);
-
-                       printk(KERN_DEBUG "%s: tx avail(%4u)  *tx_cons_sb(%u)"
-                                         "  tx pkt(%lu) tx calls (%lu)"
-                                         "  %s (Xoff events %u)\n",
-                              fp->name, bnx2x_tx_avail(fp),
-                              le16_to_cpu(*fp->tx_cons_sb),
-                              bnx2x_hilo(&qstats->
-                                         total_unicast_packets_transmitted_hi),
-                              fp->tx_pkt,
-                              (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
-                              qstats->driver_xoff);
-               }
-       }
+       case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+               bnx2x_init_internal_func(bp);
+               break;
 
-       bnx2x_hw_stats_post(bp);
-       bnx2x_storm_stats_post(bp);
+       default:
+               BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+               break;
+       }
 }
 
-static void bnx2x_port_stats_stop(struct bnx2x *bp)
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
 {
-       struct dmae_command *dmae;
-       u32 opcode;
-       int loader_idx = PMF_DMAE_C(bp);
-       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+       int i;
 
-       bp->executer_idx = 0;
+       for_each_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
 
-       opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
-                 DMAE_CMD_C_ENABLE |
-                 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
+               fp->bp = bp;
+               fp->state = BNX2X_FP_STATE_CLOSED;
+               fp->index = i;
+               fp->cl_id = BP_L_ID(bp) + i;
+#ifdef BCM_CNIC
+               fp->sb_id = fp->cl_id + 1;
 #else
-                 DMAE_CMD_ENDIANITY_DW_SWAP |
+               fp->sb_id = fp->cl_id;
 #endif
-                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
-
-       if (bp->port.port_stx) {
-
-               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-               if (bp->func_stx)
-                       dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
-               else
-                       dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
-               dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
-               dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
-               dmae->dst_addr_lo = bp->port.port_stx >> 2;
-               dmae->dst_addr_hi = 0;
-               dmae->len = sizeof(struct host_port_stats) >> 2;
-               if (bp->func_stx) {
-                       dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
-                       dmae->comp_addr_hi = 0;
-                       dmae->comp_val = 1;
-               } else {
-                       dmae->comp_addr_lo =
-                               U64_LO(bnx2x_sp_mapping(bp, stats_comp));
-                       dmae->comp_addr_hi =
-                               U64_HI(bnx2x_sp_mapping(bp, stats_comp));
-                       dmae->comp_val = DMAE_COMP_VAL;
-
-                       *stats_comp = 0;
-               }
-       }
-
-       if (bp->func_stx) {
-
-               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-               dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
-               dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
-               dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
-               dmae->dst_addr_lo = bp->func_stx >> 2;
-               dmae->dst_addr_hi = 0;
-               dmae->len = sizeof(struct host_func_stats) >> 2;
-               dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
-               dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
-               dmae->comp_val = DMAE_COMP_VAL;
-
-               *stats_comp = 0;
+               DP(NETIF_MSG_IFUP,
+                  "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
+                  i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
+               bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
+                             fp->sb_id);
+               bnx2x_update_fpsb_idx(fp);
        }
-}
 
-static void bnx2x_stats_stop(struct bnx2x *bp)
-{
-       int update = 0;
+       /* ensure status block indices were read */
+       rmb();
 
-       bnx2x_stats_comp(bp);
 
-       if (bp->port.pmf)
-               update = (bnx2x_hw_stats_update(bp) == 0);
+       bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
+                         DEF_SB_ID);
+       bnx2x_update_dsb_idx(bp);
+       bnx2x_update_coalesce(bp);
+       bnx2x_init_rx_rings(bp);
+       bnx2x_init_tx_ring(bp);
+       bnx2x_init_sp_ring(bp);
+       bnx2x_init_context(bp);
+       bnx2x_init_internal(bp, load_code);
+       bnx2x_init_ind_table(bp);
+       bnx2x_stats_init(bp);
 
-       update |= (bnx2x_storm_stats_update(bp) == 0);
+       /* At this point, we are ready for interrupts */
+       atomic_set(&bp->intr_sem, 0);
 
-       if (update) {
-               bnx2x_net_stats_update(bp);
+       /* flush all before enabling interrupts */
+       mb();
+       mmiowb();
 
-               if (bp->port.pmf)
-                       bnx2x_port_stats_stop(bp);
+       bnx2x_int_enable(bp);
 
-               bnx2x_hw_stats_post(bp);
-               bnx2x_stats_comp(bp);
-       }
+       /* Check for SPIO5 */
+       bnx2x_attn_int_deasserted0(bp,
+               REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
+                                  AEU_INPUTS_ATTN_BITS_SPIO5);
 }
 
-static void bnx2x_stats_do_nothing(struct bnx2x *bp)
-{
-}
+/* end of nic init */
 
-static const struct {
-       void (*action)(struct bnx2x *bp);
-       enum bnx2x_stats_state next_state;
-} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
-/* state       event   */
-{
-/* DISABLED    PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
-/*             LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
-/*             UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
-/*             STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
-},
-{
-/* ENABLED     PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
-/*             LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
-/*             UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
-/*             STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
-}
-};
+/*
+ * gzip service functions
+ */
 
-static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
+static int bnx2x_gunzip_init(struct bnx2x *bp)
 {
-       enum bnx2x_stats_state state = bp->stats_state;
-
-       if (unlikely(bp->panic))
-               return;
-
-       bnx2x_stats_stm[state][event].action(bp);
-       bp->stats_state = bnx2x_stats_stm[state][event].next_state;
-
-       /* Make sure the state has been "changed" */
-       smp_wmb();
+       bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
+                                           &bp->gunzip_mapping, GFP_KERNEL);
+       if (bp->gunzip_buf  == NULL)
+               goto gunzip_nomem1;
 
-       if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
-               DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
-                  state, event, bp->stats_state);
-}
+       bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
+       if (bp->strm  == NULL)
+               goto gunzip_nomem2;
 
-static void bnx2x_port_stats_base_init(struct bnx2x *bp)
-{
-       struct dmae_command *dmae;
-       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+       bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
+                                     GFP_KERNEL);
+       if (bp->strm->workspace == NULL)
+               goto gunzip_nomem3;
 
-       /* sanity */
-       if (!bp->port.pmf || !bp->port.port_stx) {
-               BNX2X_ERR("BUG!\n");
-               return;
-       }
+       return 0;
 
-       bp->executer_idx = 0;
+gunzip_nomem3:
+       kfree(bp->strm);
+       bp->strm = NULL;
 
-       dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
-       dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
-                       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
-                       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-                       DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-                       DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-                       (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-                       (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
-       dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
-       dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
-       dmae->dst_addr_lo = bp->port.port_stx >> 2;
-       dmae->dst_addr_hi = 0;
-       dmae->len = sizeof(struct host_port_stats) >> 2;
-       dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
-       dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
-       dmae->comp_val = DMAE_COMP_VAL;
+gunzip_nomem2:
+       dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+                         bp->gunzip_mapping);
+       bp->gunzip_buf = NULL;
 
-       *stats_comp = 0;
-       bnx2x_hw_stats_post(bp);
-       bnx2x_stats_comp(bp);
+gunzip_nomem1:
+       netdev_err(bp->dev, "Cannot allocate firmware buffer for"
+              " un-compression\n");
+       return -ENOMEM;
 }
 
-static void bnx2x_func_stats_base_init(struct bnx2x *bp)
+static void bnx2x_gunzip_end(struct bnx2x *bp)
 {
-       int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
-       int port = BP_PORT(bp);
-       int func;
-       u32 func_stx;
-
-       /* sanity */
-       if (!bp->port.pmf || !bp->func_stx) {
-               BNX2X_ERR("BUG!\n");
-               return;
-       }
-
-       /* save our func_stx */
-       func_stx = bp->func_stx;
+       kfree(bp->strm->workspace);
 
-       for (vn = VN_0; vn < vn_max; vn++) {
-               func = 2*vn + port;
+       kfree(bp->strm);
+       bp->strm = NULL;
 
-               bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
-               bnx2x_func_stats_init(bp);
-               bnx2x_hw_stats_post(bp);
-               bnx2x_stats_comp(bp);
+       if (bp->gunzip_buf) {
+               dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+                                 bp->gunzip_mapping);
+               bp->gunzip_buf = NULL;
        }
-
-       /* restore our func_stx */
-       bp->func_stx = func_stx;
 }
 
-static void bnx2x_func_stats_base_update(struct bnx2x *bp)
+static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
 {
-       struct dmae_command *dmae = &bp->stats_dmae;
-       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+       int n, rc;
 
-       /* sanity */
-       if (!bp->func_stx) {
-               BNX2X_ERR("BUG!\n");
-               return;
+       /* check gzip header */
+       if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
+               BNX2X_ERR("Bad gzip header\n");
+               return -EINVAL;
        }
 
-       bp->executer_idx = 0;
-       memset(dmae, 0, sizeof(struct dmae_command));
+       n = 10;
 
-       dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
-                       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
-                       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
-#ifdef __BIG_ENDIAN
-                       DMAE_CMD_ENDIANITY_B_DW_SWAP |
-#else
-                       DMAE_CMD_ENDIANITY_DW_SWAP |
-#endif
-                       (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
-                       (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
-       dmae->src_addr_lo = bp->func_stx >> 2;
-       dmae->src_addr_hi = 0;
-       dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
-       dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
-       dmae->len = sizeof(struct host_func_stats) >> 2;
-       dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
-       dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
-       dmae->comp_val = DMAE_COMP_VAL;
+#define FNAME                          0x8
 
-       *stats_comp = 0;
-       bnx2x_hw_stats_post(bp);
-       bnx2x_stats_comp(bp);
-}
+       if (zbuf[3] & FNAME)
+               while ((zbuf[n++] != 0) && (n < len));
 
-static void bnx2x_stats_init(struct bnx2x *bp)
-{
-       int port = BP_PORT(bp);
-       int func = BP_FUNC(bp);
-       int i;
-
-       bp->stats_pending = 0;
-       bp->executer_idx = 0;
-       bp->stats_counter = 0;
+       bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
+       bp->strm->avail_in = len - n;
+       bp->strm->next_out = bp->gunzip_buf;
+       bp->strm->avail_out = FW_BUF_SIZE;
 
-       /* port and func stats for management */
-       if (!BP_NOMCP(bp)) {
-               bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
-               bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
+       rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
+       if (rc != Z_OK)
+               return rc;
 
-       } else {
-               bp->port.port_stx = 0;
-               bp->func_stx = 0;
-       }
-       DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
-          bp->port.port_stx, bp->func_stx);
-
-       /* port stats */
-       memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
-       bp->port.old_nig_stats.brb_discard =
-                       REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
-       bp->port.old_nig_stats.brb_truncate =
-                       REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
-       REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
-                   &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
-       REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
-                   &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
-
-       /* function stats */
-       for_each_queue(bp, i) {
-               struct bnx2x_fastpath *fp = &bp->fp[i];
+       rc = zlib_inflate(bp->strm, Z_FINISH);
+       if ((rc != Z_OK) && (rc != Z_STREAM_END))
+               netdev_err(bp->dev, "Firmware decompression error: %s\n",
+                          bp->strm->msg);
 
-               memset(&fp->old_tclient, 0,
-                      sizeof(struct tstorm_per_client_stats));
-               memset(&fp->old_uclient, 0,
-                      sizeof(struct ustorm_per_client_stats));
-               memset(&fp->old_xclient, 0,
-                      sizeof(struct xstorm_per_client_stats));
-               memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
-       }
+       bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
+       if (bp->gunzip_outlen & 0x3)
+               netdev_err(bp->dev, "Firmware decompression error:"
+                                   " gunzip_outlen (%d) not aligned\n",
+                               bp->gunzip_outlen);
+       bp->gunzip_outlen >>= 2;
 
-       memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
-       memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
+       zlib_inflateEnd(bp->strm);
 
-       bp->stats_state = STATS_STATE_DISABLED;
+       if (rc == Z_STREAM_END)
+               return 0;
 
-       if (bp->port.pmf) {
-               if (bp->port.port_stx)
-                       bnx2x_port_stats_base_init(bp);
+       return rc;
+}
 
-               if (bp->func_stx)
-                       bnx2x_func_stats_base_init(bp);
+/* nic load/unload */
 
-       } else if (bp->func_stx)
-               bnx2x_func_stats_base_update(bp);
-}
+/*
+ * General service functions
+ */
 
-static void bnx2x_timer(unsigned long data)
+/* send a NIG loopback debug packet */
+static void bnx2x_lb_pckt(struct bnx2x *bp)
 {
-       struct bnx2x *bp = (struct bnx2x *) data;
+       u32 wb_write[3];
 
-       if (!netif_running(bp->dev))
-               return;
+       /* Ethernet source and destination addresses */
+       wb_write[0] = 0x55555555;
+       wb_write[1] = 0x55555555;
+       wb_write[2] = 0x20;             /* SOP */
+       REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
 
-       if (atomic_read(&bp->intr_sem) != 0)
-               goto timer_restart;
+       /* NON-IP protocol */
+       wb_write[0] = 0x09000000;
+       wb_write[1] = 0x55555555;
+       wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
+       REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+}
 
-       if (poll) {
-               struct bnx2x_fastpath *fp = &bp->fp[0];
-               int rc;
+/* some of the internal memories
+ * are not directly readable from the driver
+ * to test them we send debug packets
+ */
+static int bnx2x_int_mem_test(struct bnx2x *bp)
+{
+       int factor;
+       int count, i;
+       u32 val = 0;
 
-               bnx2x_tx_int(fp);
-               rc = bnx2x_rx_int(fp, 1000);
-       }
+       if (CHIP_REV_IS_FPGA(bp))
+               factor = 120;
+       else if (CHIP_REV_IS_EMUL(bp))
+               factor = 200;
+       else
+               factor = 1;
 
-       if (!BP_NOMCP(bp)) {
-               int func = BP_FUNC(bp);
-               u32 drv_pulse;
-               u32 mcp_pulse;
+       DP(NETIF_MSG_HW, "start part1\n");
 
-               ++bp->fw_drv_pulse_wr_seq;
-               bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
-               /* TBD - add SYSTEM_TIME */
-               drv_pulse = bp->fw_drv_pulse_wr_seq;
-               SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
+       /* Disable inputs of parser neighbor blocks */
+       REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+       REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+       REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+       REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
 
-               mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
-                            MCP_PULSE_SEQ_MASK);
-               /* The delta between driver pulse and mcp response
-                * should be 1 (before mcp response) or 0 (after mcp response)
-                */
-               if ((drv_pulse != mcp_pulse) &&
-                   (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
-                       /* someone lost a heartbeat... */
-                       BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
-                                 drv_pulse, mcp_pulse);
-               }
-       }
+       /*  Write 0 to parser credits for CFC search request */
+       REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
 
-       if (bp->state == BNX2X_STATE_OPEN)
-               bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
+       /* send Ethernet packet */
+       bnx2x_lb_pckt(bp);
 
-timer_restart:
-       mod_timer(&bp->timer, jiffies + bp->current_interval);
-}
+       /* TODO do i reset NIG statistic? */
+       /* Wait until NIG register shows 1 packet of size 0x10 */
+       count = 1000 * factor;
+       while (count) {
 
-/* end of Statistics */
+               bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+               val = *bnx2x_sp(bp, wb_data[0]);
+               if (val == 0x10)
+                       break;
 
-/* nic init */
+               msleep(10);
+               count--;
+       }
+       if (val != 0x10) {
+               BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
+               return -1;
+       }
 
-/*
- * nic init service functions
- */
+       /* Wait until PRS register shows 1 packet */
+       count = 1000 * factor;
+       while (count) {
+               val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+               if (val == 1)
+                       break;
 
-static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
-{
-       int port = BP_PORT(bp);
+               msleep(10);
+               count--;
+       }
+       if (val != 0x1) {
+               BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+               return -2;
+       }
 
-       /* "CSTORM" */
-       bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
-                       CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
-                       CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
-       bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
-                       CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
-                       CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
-}
+       /* Reset and init BRB, PRS */
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+       msleep(50);
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+       msleep(50);
+       bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
 
-static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
-                         dma_addr_t mapping, int sb_id)
-{
-       int port = BP_PORT(bp);
-       int func = BP_FUNC(bp);
-       int index;
-       u64 section;
+       DP(NETIF_MSG_HW, "part2\n");
 
-       /* USTORM */
-       section = ((u64)mapping) + offsetof(struct host_status_block,
-                                           u_status_block);
-       sb->u_status_block.status_block_id = sb_id;
+       /* Disable inputs of parser neighbor blocks */
+       REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+       REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+       REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+       REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
 
-       REG_WR(bp, BAR_CSTRORM_INTMEM +
-              CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
-       REG_WR(bp, BAR_CSTRORM_INTMEM +
-              ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
-              U64_HI(section));
-       REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
-               CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
+       /* Write 0 to parser credits for CFC search request */
+       REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
 
-       for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
-               REG_WR16(bp, BAR_CSTRORM_INTMEM +
-                        CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
+       /* send 10 Ethernet packets */
+       for (i = 0; i < 10; i++)
+               bnx2x_lb_pckt(bp);
 
-       /* CSTORM */
-       section = ((u64)mapping) + offsetof(struct host_status_block,
-                                           c_status_block);
-       sb->c_status_block.status_block_id = sb_id;
+       /* Wait until NIG register shows 10 + 1
+          packets of size 11*0x10 = 0xb0 */
+       count = 1000 * factor;
+       while (count) {
 
-       REG_WR(bp, BAR_CSTRORM_INTMEM +
-              CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
-       REG_WR(bp, BAR_CSTRORM_INTMEM +
-              ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
-              U64_HI(section));
-       REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
-               CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
+               bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+               val = *bnx2x_sp(bp, wb_data[0]);
+               if (val == 0xb0)
+                       break;
 
-       for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
-               REG_WR16(bp, BAR_CSTRORM_INTMEM +
-                        CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
+               msleep(10);
+               count--;
+       }
+       if (val != 0xb0) {
+               BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
+               return -3;
+       }
 
-       bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
-}
+       /* Wait until PRS register shows 2 packets */
+       val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+       if (val != 2)
+               BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
 
-static void bnx2x_zero_def_sb(struct bnx2x *bp)
-{
-       int func = BP_FUNC(bp);
+       /* Write 1 to parser credits for CFC search request */
+       REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
 
-       bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
-                       TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
-                       sizeof(struct tstorm_def_status_block)/4);
-       bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
-                       CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
-                       sizeof(struct cstorm_def_status_block_u)/4);
-       bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
-                       CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
-                       sizeof(struct cstorm_def_status_block_c)/4);
-       bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
-                       XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
-                       sizeof(struct xstorm_def_status_block)/4);
-}
+       /* Wait until PRS register shows 3 packets */
+       msleep(10 * factor);
+       /* Wait until NIG register shows 1 packet of size 0x10 */
+       val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+       if (val != 3)
+               BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
 
-static void bnx2x_init_def_sb(struct bnx2x *bp,
-                             struct host_def_status_block *def_sb,
-                             dma_addr_t mapping, int sb_id)
-{
-       int port = BP_PORT(bp);
-       int func = BP_FUNC(bp);
-       int index, val, reg_offset;
-       u64 section;
-
-       /* ATTN */
-       section = ((u64)mapping) + offsetof(struct host_def_status_block,
-                                           atten_status_block);
-       def_sb->atten_status_block.status_block_id = sb_id;
-
-       bp->attn_state = 0;
-
-       reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
-                            MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
-
-       for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
-               bp->attn_group[index].sig[0] = REG_RD(bp,
-                                                    reg_offset + 0x10*index);
-               bp->attn_group[index].sig[1] = REG_RD(bp,
-                                              reg_offset + 0x4 + 0x10*index);
-               bp->attn_group[index].sig[2] = REG_RD(bp,
-                                              reg_offset + 0x8 + 0x10*index);
-               bp->attn_group[index].sig[3] = REG_RD(bp,
-                                              reg_offset + 0xc + 0x10*index);
+       /* clear NIG EOP FIFO */
+       for (i = 0; i < 11; i++)
+               REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
+       val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
+       if (val != 1) {
+               BNX2X_ERR("clear of NIG failed\n");
+               return -4;
        }
 
-       reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
-                            HC_REG_ATTN_MSG0_ADDR_L);
-
-       REG_WR(bp, reg_offset, U64_LO(section));
-       REG_WR(bp, reg_offset + 4, U64_HI(section));
-
-       reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
-
-       val = REG_RD(bp, reg_offset);
-       val |= sb_id;
-       REG_WR(bp, reg_offset, val);
-
-       /* USTORM */
-       section = ((u64)mapping) + offsetof(struct host_def_status_block,
-                                           u_def_status_block);
-       def_sb->u_def_status_block.status_block_id = sb_id;
-
-       REG_WR(bp, BAR_CSTRORM_INTMEM +
-              CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
-       REG_WR(bp, BAR_CSTRORM_INTMEM +
-              ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
-              U64_HI(section));
-       REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
-               CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
-
-       for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
-               REG_WR16(bp, BAR_CSTRORM_INTMEM +
-                        CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
-
-       /* CSTORM */
-       section = ((u64)mapping) + offsetof(struct host_def_status_block,
-                                           c_def_status_block);
-       def_sb->c_def_status_block.status_block_id = sb_id;
-
-       REG_WR(bp, BAR_CSTRORM_INTMEM +
-              CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
-       REG_WR(bp, BAR_CSTRORM_INTMEM +
-              ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
-              U64_HI(section));
-       REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
-               CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
-
-       for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
-               REG_WR16(bp, BAR_CSTRORM_INTMEM +
-                        CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
-
-       /* TSTORM */
-       section = ((u64)mapping) + offsetof(struct host_def_status_block,
-                                           t_def_status_block);
-       def_sb->t_def_status_block.status_block_id = sb_id;
-
-       REG_WR(bp, BAR_TSTRORM_INTMEM +
-              TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
-       REG_WR(bp, BAR_TSTRORM_INTMEM +
-              ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
-              U64_HI(section));
-       REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
-               TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
-
-       for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
-               REG_WR16(bp, BAR_TSTRORM_INTMEM +
-                        TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
-
-       /* XSTORM */
-       section = ((u64)mapping) + offsetof(struct host_def_status_block,
-                                           x_def_status_block);
-       def_sb->x_def_status_block.status_block_id = sb_id;
-
-       REG_WR(bp, BAR_XSTRORM_INTMEM +
-              XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
-       REG_WR(bp, BAR_XSTRORM_INTMEM +
-              ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
-              U64_HI(section));
-       REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
-               XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
+       /* Reset and init BRB, PRS, NIG */
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+       msleep(50);
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+       msleep(50);
+       bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
+#ifndef BCM_CNIC
+       /* set NIC mode */
+       REG_WR(bp, PRS_REG_NIC_MODE, 1);
+#endif
 
-       for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
-               REG_WR16(bp, BAR_XSTRORM_INTMEM +
-                        XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
+       /* Enable inputs of parser neighbor blocks */
+       REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
+       REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
+       REG_WR(bp, CFC_REG_DEBUG0, 0x0);
+       REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
 
-       bp->stats_pending = 0;
-       bp->set_mac_pending = 0;
+       DP(NETIF_MSG_HW, "done\n");
 
-       bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
+       return 0; /* OK */
 }
 
-static void bnx2x_update_coalesce(struct bnx2x *bp)
+static void enable_blocks_attention(struct bnx2x *bp)
 {
-       int port = BP_PORT(bp);
-       int i;
-
-       for_each_queue(bp, i) {
-               int sb_id = bp->fp[i].sb_id;
-
-               /* HC_INDEX_U_ETH_RX_CQ_CONS */
-               REG_WR8(bp, BAR_CSTRORM_INTMEM +
-                       CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
-                                                     U_SB_ETH_RX_CQ_INDEX),
-                       bp->rx_ticks/(4 * BNX2X_BTR));
-               REG_WR16(bp, BAR_CSTRORM_INTMEM +
-                        CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
-                                                      U_SB_ETH_RX_CQ_INDEX),
-                        (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
-
-               /* HC_INDEX_C_ETH_TX_CQ_CONS */
-               REG_WR8(bp, BAR_CSTRORM_INTMEM +
-                       CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
-                                                     C_SB_ETH_TX_CQ_INDEX),
-                       bp->tx_ticks/(4 * BNX2X_BTR));
-               REG_WR16(bp, BAR_CSTRORM_INTMEM +
-                        CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
-                                                      C_SB_ETH_TX_CQ_INDEX),
-                        (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
-       }
+       REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+       REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
+       REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+       REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+       REG_WR(bp, QM_REG_QM_INT_MASK, 0);
+       REG_WR(bp, TM_REG_TM_INT_MASK, 0);
+       REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
+       REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
+       REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
+/*     REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
+/*     REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
+       REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
+       REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
+       REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
+/*     REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
+/*     REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
+       REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
+       REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
+       REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
+       REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
+/*     REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
+/*     REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
+       if (CHIP_REV_IS_FPGA(bp))
+               REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
+       else
+               REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
+       REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
+       REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
+       REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
+/*     REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
+/*     REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
+       REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
+       REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
+/*     REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
+       REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
 }
 
-static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
-                                      struct bnx2x_fastpath *fp, int last)
-{
-       int i;
-
-       for (i = 0; i < last; i++) {
-               struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
-               struct sk_buff *skb = rx_buf->skb;
-
-               if (skb == NULL) {
-                       DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
-                       continue;
-               }
-
-               if (fp->tpa_state[i] == BNX2X_TPA_START)
-                       dma_unmap_single(&bp->pdev->dev,
-                                        dma_unmap_addr(rx_buf, mapping),
-                                        bp->rx_buf_size, DMA_FROM_DEVICE);
-
-               dev_kfree_skb(skb);
-               rx_buf->skb = NULL;
-       }
-}
+static const struct {
+       u32 addr;
+       u32 mask;
+} bnx2x_parity_mask[] = {
+       {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
+       {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
+       {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
+       {HC_REG_HC_PRTY_MASK, 0xffffffff},
+       {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
+       {QM_REG_QM_PRTY_MASK, 0x0},
+       {DORQ_REG_DORQ_PRTY_MASK, 0x0},
+       {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
+       {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
+       {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
+       {CDU_REG_CDU_PRTY_MASK, 0x0},
+       {CFC_REG_CFC_PRTY_MASK, 0x0},
+       {DBG_REG_DBG_PRTY_MASK, 0x0},
+       {DMAE_REG_DMAE_PRTY_MASK, 0x0},
+       {BRB1_REG_BRB1_PRTY_MASK, 0x0},
+       {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
+       {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
+       {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
+       {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
+       {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
+       {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
+       {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
+       {USEM_REG_USEM_PRTY_MASK_0, 0x0},
+       {USEM_REG_USEM_PRTY_MASK_1, 0x0},
+       {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
+       {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
+       {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
+       {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
+};
 
-static void bnx2x_init_rx_rings(struct bnx2x *bp)
+static void enable_blocks_parity(struct bnx2x *bp)
 {
-       int func = BP_FUNC(bp);
-       int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
-                                             ETH_MAX_AGGREGATION_QUEUES_E1H;
-       u16 ring_prod, cqe_ring_prod;
-       int i, j;
-
-       bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
-       DP(NETIF_MSG_IFUP,
-          "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
-
-       if (bp->flags & TPA_ENABLE_FLAG) {
-
-               for_each_queue(bp, j) {
-                       struct bnx2x_fastpath *fp = &bp->fp[j];
-
-                       for (i = 0; i < max_agg_queues; i++) {
-                               fp->tpa_pool[i].skb =
-                                  netdev_alloc_skb(bp->dev, bp->rx_buf_size);
-                               if (!fp->tpa_pool[i].skb) {
-                                       BNX2X_ERR("Failed to allocate TPA "
-                                                 "skb pool for queue[%d] - "
-                                                 "disabling TPA on this "
-                                                 "queue!\n", j);
-                                       bnx2x_free_tpa_pool(bp, fp, i);
-                                       fp->disable_tpa = 1;
-                                       break;
-                               }
-                               dma_unmap_addr_set((struct sw_rx_bd *)
-                                                       &bp->fp->tpa_pool[i],
-                                                  mapping, 0);
-                               fp->tpa_state[i] = BNX2X_TPA_STOP;
-                       }
-               }
-       }
-
-       for_each_queue(bp, j) {
-               struct bnx2x_fastpath *fp = &bp->fp[j];
-
-               fp->rx_bd_cons = 0;
-               fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
-               fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
-
-               /* "next page" elements initialization */
-               /* SGE ring */
-               for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
-                       struct eth_rx_sge *sge;
-
-                       sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
-                       sge->addr_hi =
-                               cpu_to_le32(U64_HI(fp->rx_sge_mapping +
-                                       BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
-                       sge->addr_lo =
-                               cpu_to_le32(U64_LO(fp->rx_sge_mapping +
-                                       BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
-               }
-
-               bnx2x_init_sge_ring_bit_mask(fp);
+       int i, mask_arr_len =
+               sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
 
-               /* RX BD ring */
-               for (i = 1; i <= NUM_RX_RINGS; i++) {
-                       struct eth_rx_bd *rx_bd;
-
-                       rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
-                       rx_bd->addr_hi =
-                               cpu_to_le32(U64_HI(fp->rx_desc_mapping +
-                                           BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
-                       rx_bd->addr_lo =
-                               cpu_to_le32(U64_LO(fp->rx_desc_mapping +
-                                           BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
-               }
-
-               /* CQ ring */
-               for (i = 1; i <= NUM_RCQ_RINGS; i++) {
-                       struct eth_rx_cqe_next_page *nextpg;
-
-                       nextpg = (struct eth_rx_cqe_next_page *)
-                               &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
-                       nextpg->addr_hi =
-                               cpu_to_le32(U64_HI(fp->rx_comp_mapping +
-                                          BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
-                       nextpg->addr_lo =
-                               cpu_to_le32(U64_LO(fp->rx_comp_mapping +
-                                          BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
-               }
-
-               /* Allocate SGEs and initialize the ring elements */
-               for (i = 0, ring_prod = 0;
-                    i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
-
-                       if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
-                               BNX2X_ERR("was only able to allocate "
-                                         "%d rx sges\n", i);
-                               BNX2X_ERR("disabling TPA for queue[%d]\n", j);
-                               /* Cleanup already allocated elements */
-                               bnx2x_free_rx_sge_range(bp, fp, ring_prod);
-                               bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
-                               fp->disable_tpa = 1;
-                               ring_prod = 0;
-                               break;
-                       }
-                       ring_prod = NEXT_SGE_IDX(ring_prod);
-               }
-               fp->rx_sge_prod = ring_prod;
-
-               /* Allocate BDs and initialize BD ring */
-               fp->rx_comp_cons = 0;
-               cqe_ring_prod = ring_prod = 0;
-               for (i = 0; i < bp->rx_ring_size; i++) {
-                       if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
-                               BNX2X_ERR("was only able to allocate "
-                                         "%d rx skbs on queue[%d]\n", i, j);
-                               fp->eth_q_stats.rx_skb_alloc_failed++;
-                               break;
-                       }
-                       ring_prod = NEXT_RX_IDX(ring_prod);
-                       cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
-                       WARN_ON(ring_prod <= i);
-               }
-
-               fp->rx_bd_prod = ring_prod;
-               /* must not have more available CQEs than BDs */
-               fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
-                                        cqe_ring_prod);
-               fp->rx_pkt = fp->rx_calls = 0;
-
-               /* Warning!
-                * this will generate an interrupt (to the TSTORM)
-                * must only be done after chip is initialized
-                */
-               bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
-                                    fp->rx_sge_prod);
-               if (j != 0)
-                       continue;
-
-               REG_WR(bp, BAR_USTRORM_INTMEM +
-                      USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
-                      U64_LO(fp->rx_comp_mapping));
-               REG_WR(bp, BAR_USTRORM_INTMEM +
-                      USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
-                      U64_HI(fp->rx_comp_mapping));
-       }
-}
-
-static void bnx2x_init_tx_ring(struct bnx2x *bp)
-{
-       int i, j;
-
-       for_each_queue(bp, j) {
-               struct bnx2x_fastpath *fp = &bp->fp[j];
-
-               for (i = 1; i <= NUM_TX_RINGS; i++) {
-                       struct eth_tx_next_bd *tx_next_bd =
-                               &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
-
-                       tx_next_bd->addr_hi =
-                               cpu_to_le32(U64_HI(fp->tx_desc_mapping +
-                                           BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
-                       tx_next_bd->addr_lo =
-                               cpu_to_le32(U64_LO(fp->tx_desc_mapping +
-                                           BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
-               }
-
-               fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
-               fp->tx_db.data.zero_fill1 = 0;
-               fp->tx_db.data.prod = 0;
-
-               fp->tx_pkt_prod = 0;
-               fp->tx_pkt_cons = 0;
-               fp->tx_bd_prod = 0;
-               fp->tx_bd_cons = 0;
-               fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
-               fp->tx_pkt = 0;
-       }
-}
-
-static void bnx2x_init_sp_ring(struct bnx2x *bp)
-{
-       int func = BP_FUNC(bp);
-
-       spin_lock_init(&bp->spq_lock);
-
-       bp->spq_left = MAX_SPQ_PENDING;
-       bp->spq_prod_idx = 0;
-       bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
-       bp->spq_prod_bd = bp->spq;
-       bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
-
-       REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
-              U64_LO(bp->spq_mapping));
-       REG_WR(bp,
-              XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
-              U64_HI(bp->spq_mapping));
-
-       REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
-              bp->spq_prod_idx);
-}
-
-static void bnx2x_init_context(struct bnx2x *bp)
-{
-       int i;
-
-       /* Rx */
-       for_each_queue(bp, i) {
-               struct eth_context *context = bnx2x_sp(bp, context[i].eth);
-               struct bnx2x_fastpath *fp = &bp->fp[i];
-               u8 cl_id = fp->cl_id;
-
-               context->ustorm_st_context.common.sb_index_numbers =
-                                               BNX2X_RX_SB_INDEX_NUM;
-               context->ustorm_st_context.common.clientId = cl_id;
-               context->ustorm_st_context.common.status_block_id = fp->sb_id;
-               context->ustorm_st_context.common.flags =
-                       (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
-                        USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
-               context->ustorm_st_context.common.statistics_counter_id =
-                                               cl_id;
-               context->ustorm_st_context.common.mc_alignment_log_size =
-                                               BNX2X_RX_ALIGN_SHIFT;
-               context->ustorm_st_context.common.bd_buff_size =
-                                               bp->rx_buf_size;
-               context->ustorm_st_context.common.bd_page_base_hi =
-                                               U64_HI(fp->rx_desc_mapping);
-               context->ustorm_st_context.common.bd_page_base_lo =
-                                               U64_LO(fp->rx_desc_mapping);
-               if (!fp->disable_tpa) {
-                       context->ustorm_st_context.common.flags |=
-                               USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
-                       context->ustorm_st_context.common.sge_buff_size =
-                               (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
-                                          0xffff);
-                       context->ustorm_st_context.common.sge_page_base_hi =
-                                               U64_HI(fp->rx_sge_mapping);
-                       context->ustorm_st_context.common.sge_page_base_lo =
-                                               U64_LO(fp->rx_sge_mapping);
-
-                       context->ustorm_st_context.common.max_sges_for_packet =
-                               SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
-                       context->ustorm_st_context.common.max_sges_for_packet =
-                               ((context->ustorm_st_context.common.
-                                 max_sges_for_packet + PAGES_PER_SGE - 1) &
-                                (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
-               }
-
-               context->ustorm_ag_context.cdu_usage =
-                       CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
-                                              CDU_REGION_NUMBER_UCM_AG,
-                                              ETH_CONNECTION_TYPE);
-
-               context->xstorm_ag_context.cdu_reserved =
-                       CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
-                                              CDU_REGION_NUMBER_XCM_AG,
-                                              ETH_CONNECTION_TYPE);
-       }
-
-       /* Tx */
-       for_each_queue(bp, i) {
-               struct bnx2x_fastpath *fp = &bp->fp[i];
-               struct eth_context *context =
-                       bnx2x_sp(bp, context[i].eth);
-
-               context->cstorm_st_context.sb_index_number =
-                                               C_SB_ETH_TX_CQ_INDEX;
-               context->cstorm_st_context.status_block_id = fp->sb_id;
-
-               context->xstorm_st_context.tx_bd_page_base_hi =
-                                               U64_HI(fp->tx_desc_mapping);
-               context->xstorm_st_context.tx_bd_page_base_lo =
-                                               U64_LO(fp->tx_desc_mapping);
-               context->xstorm_st_context.statistics_data = (fp->cl_id |
-                               XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
-       }
-}
-
-static void bnx2x_init_ind_table(struct bnx2x *bp)
-{
-       int func = BP_FUNC(bp);
-       int i;
-
-       if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
-               return;
-
-       DP(NETIF_MSG_IFUP,
-          "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
-       for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
-               REG_WR8(bp, BAR_TSTRORM_INTMEM +
-                       TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
-                       bp->fp->cl_id + (i % bp->num_queues));
-}
-
-static void bnx2x_set_client_config(struct bnx2x *bp)
-{
-       struct tstorm_eth_client_config tstorm_client = {0};
-       int port = BP_PORT(bp);
-       int i;
-
-       tstorm_client.mtu = bp->dev->mtu;
-       tstorm_client.config_flags =
-                               (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
-                                TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
-#ifdef BCM_VLAN
-       if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
-               tstorm_client.config_flags |=
-                               TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
-               DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
-       }
-#endif
-
-       for_each_queue(bp, i) {
-               tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
-
-               REG_WR(bp, BAR_TSTRORM_INTMEM +
-                      TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
-                      ((u32 *)&tstorm_client)[0]);
-               REG_WR(bp, BAR_TSTRORM_INTMEM +
-                      TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
-                      ((u32 *)&tstorm_client)[1]);
-       }
-
-       DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
-          ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
-}
-
-static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
-{
-       struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
-       int mode = bp->rx_mode;
-       int mask = bp->rx_mode_cl_mask;
-       int func = BP_FUNC(bp);
-       int port = BP_PORT(bp);
-       int i;
-       /* All but management unicast packets should pass to the host as well */
-       u32 llh_mask =
-               NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
-               NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
-               NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
-               NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
-
-       DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
-
-       switch (mode) {
-       case BNX2X_RX_MODE_NONE: /* no Rx */
-               tstorm_mac_filter.ucast_drop_all = mask;
-               tstorm_mac_filter.mcast_drop_all = mask;
-               tstorm_mac_filter.bcast_drop_all = mask;
-               break;
-
-       case BNX2X_RX_MODE_NORMAL:
-               tstorm_mac_filter.bcast_accept_all = mask;
-               break;
-
-       case BNX2X_RX_MODE_ALLMULTI:
-               tstorm_mac_filter.mcast_accept_all = mask;
-               tstorm_mac_filter.bcast_accept_all = mask;
-               break;
-
-       case BNX2X_RX_MODE_PROMISC:
-               tstorm_mac_filter.ucast_accept_all = mask;
-               tstorm_mac_filter.mcast_accept_all = mask;
-               tstorm_mac_filter.bcast_accept_all = mask;
-               /* pass management unicast packets as well */
-               llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
-               break;
-
-       default:
-               BNX2X_ERR("BAD rx mode (%d)\n", mode);
-               break;
-       }
-
-       REG_WR(bp,
-              (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
-              llh_mask);
-
-       for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
-               REG_WR(bp, BAR_TSTRORM_INTMEM +
-                      TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
-                      ((u32 *)&tstorm_mac_filter)[i]);
-
-/*             DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
-                  ((u32 *)&tstorm_mac_filter)[i]); */
-       }
-
-       if (mode != BNX2X_RX_MODE_NONE)
-               bnx2x_set_client_config(bp);
-}
-
-static void bnx2x_init_internal_common(struct bnx2x *bp)
-{
-       int i;
-
-       /* Zero this manually as its initialization is
-          currently missing in the initTool */
-       for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
-               REG_WR(bp, BAR_USTRORM_INTMEM +
-                      USTORM_AGG_DATA_OFFSET + i * 4, 0);
-}
-
-static void bnx2x_init_internal_port(struct bnx2x *bp)
-{
-       int port = BP_PORT(bp);
-
-       REG_WR(bp,
-              BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
-       REG_WR(bp,
-              BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
-       REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
-       REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
-}
-
-static void bnx2x_init_internal_func(struct bnx2x *bp)
-{
-       struct tstorm_eth_function_common_config tstorm_config = {0};
-       struct stats_indication_flags stats_flags = {0};
-       int port = BP_PORT(bp);
-       int func = BP_FUNC(bp);
-       int i, j;
-       u32 offset;
-       u16 max_agg_size;
-
-       tstorm_config.config_flags = RSS_FLAGS(bp);
-
-       if (is_multi(bp))
-               tstorm_config.rss_result_mask = MULTI_MASK;
-
-       /* Enable TPA if needed */
-       if (bp->flags & TPA_ENABLE_FLAG)
-               tstorm_config.config_flags |=
-                       TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
-
-       if (IS_E1HMF(bp))
-               tstorm_config.config_flags |=
-                               TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
-
-       tstorm_config.leading_client_id = BP_L_ID(bp);
-
-       REG_WR(bp, BAR_TSTRORM_INTMEM +
-              TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
-              (*(u32 *)&tstorm_config));
-
-       bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
-       bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
-       bnx2x_set_storm_rx_mode(bp);
-
-       for_each_queue(bp, i) {
-               u8 cl_id = bp->fp[i].cl_id;
-
-               /* reset xstorm per client statistics */
-               offset = BAR_XSTRORM_INTMEM +
-                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
-               for (j = 0;
-                    j < sizeof(struct xstorm_per_client_stats) / 4; j++)
-                       REG_WR(bp, offset + j*4, 0);
-
-               /* reset tstorm per client statistics */
-               offset = BAR_TSTRORM_INTMEM +
-                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
-               for (j = 0;
-                    j < sizeof(struct tstorm_per_client_stats) / 4; j++)
-                       REG_WR(bp, offset + j*4, 0);
-
-               /* reset ustorm per client statistics */
-               offset = BAR_USTRORM_INTMEM +
-                        USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
-               for (j = 0;
-                    j < sizeof(struct ustorm_per_client_stats) / 4; j++)
-                       REG_WR(bp, offset + j*4, 0);
-       }
-
-       /* Init statistics related context */
-       stats_flags.collect_eth = 1;
-
-       REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
-              ((u32 *)&stats_flags)[0]);
-       REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
-              ((u32 *)&stats_flags)[1]);
-
-       REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
-              ((u32 *)&stats_flags)[0]);
-       REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
-              ((u32 *)&stats_flags)[1]);
-
-       REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
-              ((u32 *)&stats_flags)[0]);
-       REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
-              ((u32 *)&stats_flags)[1]);
-
-       REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
-              ((u32 *)&stats_flags)[0]);
-       REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
-              ((u32 *)&stats_flags)[1]);
-
-       REG_WR(bp, BAR_XSTRORM_INTMEM +
-              XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
-              U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
-       REG_WR(bp, BAR_XSTRORM_INTMEM +
-              XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
-              U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-
-       REG_WR(bp, BAR_TSTRORM_INTMEM +
-              TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
-              U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
-       REG_WR(bp, BAR_TSTRORM_INTMEM +
-              TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
-              U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-
-       REG_WR(bp, BAR_USTRORM_INTMEM +
-              USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
-              U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
-       REG_WR(bp, BAR_USTRORM_INTMEM +
-              USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
-              U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
-
-       if (CHIP_IS_E1H(bp)) {
-               REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
-                       IS_E1HMF(bp));
-               REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
-                       IS_E1HMF(bp));
-               REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
-                       IS_E1HMF(bp));
-               REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
-                       IS_E1HMF(bp));
-
-               REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
-                        bp->e1hov);
-       }
-
-       /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
-       max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
-                                  SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
-       for_each_queue(bp, i) {
-               struct bnx2x_fastpath *fp = &bp->fp[i];
-
-               REG_WR(bp, BAR_USTRORM_INTMEM +
-                      USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
-                      U64_LO(fp->rx_comp_mapping));
-               REG_WR(bp, BAR_USTRORM_INTMEM +
-                      USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
-                      U64_HI(fp->rx_comp_mapping));
-
-               /* Next page */
-               REG_WR(bp, BAR_USTRORM_INTMEM +
-                      USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
-                      U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
-               REG_WR(bp, BAR_USTRORM_INTMEM +
-                      USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
-                      U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
-
-               REG_WR16(bp, BAR_USTRORM_INTMEM +
-                        USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
-                        max_agg_size);
-       }
-
-       /* dropless flow control */
-       if (CHIP_IS_E1H(bp)) {
-               struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
-
-               rx_pause.bd_thr_low = 250;
-               rx_pause.cqe_thr_low = 250;
-               rx_pause.cos = 1;
-               rx_pause.sge_thr_low = 0;
-               rx_pause.bd_thr_high = 350;
-               rx_pause.cqe_thr_high = 350;
-               rx_pause.sge_thr_high = 0;
-
-               for_each_queue(bp, i) {
-                       struct bnx2x_fastpath *fp = &bp->fp[i];
-
-                       if (!fp->disable_tpa) {
-                               rx_pause.sge_thr_low = 150;
-                               rx_pause.sge_thr_high = 250;
-                       }
-
-
-                       offset = BAR_USTRORM_INTMEM +
-                                USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
-                                                                  fp->cl_id);
-                       for (j = 0;
-                            j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
-                            j++)
-                               REG_WR(bp, offset + j*4,
-                                      ((u32 *)&rx_pause)[j]);
-               }
-       }
-
-       memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
-
-       /* Init rate shaping and fairness contexts */
-       if (IS_E1HMF(bp)) {
-               int vn;
-
-               /* During init there is no active link
-                  Until link is up, set link rate to 10Gbps */
-               bp->link_vars.line_speed = SPEED_10000;
-               bnx2x_init_port_minmax(bp);
-
-               if (!BP_NOMCP(bp))
-                       bp->mf_config =
-                             SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
-               bnx2x_calc_vn_weight_sum(bp);
-
-               for (vn = VN_0; vn < E1HVN_MAX; vn++)
-                       bnx2x_init_vn_minmax(bp, 2*vn + port);
-
-               /* Enable rate shaping and fairness */
-               bp->cmng.flags.cmng_enables |=
-                                       CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
-
-       } else {
-               /* rate shaping and fairness are disabled */
-               DP(NETIF_MSG_IFUP,
-                  "single function mode  minmax will be disabled\n");
-       }
-
-
-       /* Store cmng structures to internal memory */
-       if (bp->port.pmf)
-               for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
-                       REG_WR(bp, BAR_XSTRORM_INTMEM +
-                              XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
-                              ((u32 *)(&bp->cmng))[i]);
-}
-
-static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
-{
-       switch (load_code) {
-       case FW_MSG_CODE_DRV_LOAD_COMMON:
-               bnx2x_init_internal_common(bp);
-               /* no break */
-
-       case FW_MSG_CODE_DRV_LOAD_PORT:
-               bnx2x_init_internal_port(bp);
-               /* no break */
-
-       case FW_MSG_CODE_DRV_LOAD_FUNCTION:
-               bnx2x_init_internal_func(bp);
-               break;
-
-       default:
-               BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
-               break;
-       }
-}
-
-static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
-{
-       int i;
-
-       for_each_queue(bp, i) {
-               struct bnx2x_fastpath *fp = &bp->fp[i];
-
-               fp->bp = bp;
-               fp->state = BNX2X_FP_STATE_CLOSED;
-               fp->index = i;
-               fp->cl_id = BP_L_ID(bp) + i;
-#ifdef BCM_CNIC
-               fp->sb_id = fp->cl_id + 1;
-#else
-               fp->sb_id = fp->cl_id;
-#endif
-               DP(NETIF_MSG_IFUP,
-                  "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
-                  i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
-               bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
-                             fp->sb_id);
-               bnx2x_update_fpsb_idx(fp);
-       }
-
-       /* ensure status block indices were read */
-       rmb();
-
-
-       bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
-                         DEF_SB_ID);
-       bnx2x_update_dsb_idx(bp);
-       bnx2x_update_coalesce(bp);
-       bnx2x_init_rx_rings(bp);
-       bnx2x_init_tx_ring(bp);
-       bnx2x_init_sp_ring(bp);
-       bnx2x_init_context(bp);
-       bnx2x_init_internal(bp, load_code);
-       bnx2x_init_ind_table(bp);
-       bnx2x_stats_init(bp);
-
-       /* At this point, we are ready for interrupts */
-       atomic_set(&bp->intr_sem, 0);
-
-       /* flush all before enabling interrupts */
-       mb();
-       mmiowb();
-
-       bnx2x_int_enable(bp);
-
-       /* Check for SPIO5 */
-       bnx2x_attn_int_deasserted0(bp,
-               REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
-                                  AEU_INPUTS_ATTN_BITS_SPIO5);
-}
-
-/* end of nic init */
-
-/*
- * gzip service functions
- */
-
-static int bnx2x_gunzip_init(struct bnx2x *bp)
-{
-       bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
-                                           &bp->gunzip_mapping, GFP_KERNEL);
-       if (bp->gunzip_buf  == NULL)
-               goto gunzip_nomem1;
-
-       bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
-       if (bp->strm  == NULL)
-               goto gunzip_nomem2;
-
-       bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
-                                     GFP_KERNEL);
-       if (bp->strm->workspace == NULL)
-               goto gunzip_nomem3;
-
-       return 0;
-
-gunzip_nomem3:
-       kfree(bp->strm);
-       bp->strm = NULL;
-
-gunzip_nomem2:
-       dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
-                         bp->gunzip_mapping);
-       bp->gunzip_buf = NULL;
-
-gunzip_nomem1:
-       netdev_err(bp->dev, "Cannot allocate firmware buffer for"
-              " un-compression\n");
-       return -ENOMEM;
-}
-
-static void bnx2x_gunzip_end(struct bnx2x *bp)
-{
-       kfree(bp->strm->workspace);
-
-       kfree(bp->strm);
-       bp->strm = NULL;
-
-       if (bp->gunzip_buf) {
-               dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
-                                 bp->gunzip_mapping);
-               bp->gunzip_buf = NULL;
-       }
-}
-
-static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
-{
-       int n, rc;
-
-       /* check gzip header */
-       if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
-               BNX2X_ERR("Bad gzip header\n");
-               return -EINVAL;
-       }
-
-       n = 10;
-
-#define FNAME                          0x8
-
-       if (zbuf[3] & FNAME)
-               while ((zbuf[n++] != 0) && (n < len));
-
-       bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
-       bp->strm->avail_in = len - n;
-       bp->strm->next_out = bp->gunzip_buf;
-       bp->strm->avail_out = FW_BUF_SIZE;
-
-       rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
-       if (rc != Z_OK)
-               return rc;
-
-       rc = zlib_inflate(bp->strm, Z_FINISH);
-       if ((rc != Z_OK) && (rc != Z_STREAM_END))
-               netdev_err(bp->dev, "Firmware decompression error: %s\n",
-                          bp->strm->msg);
-
-       bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
-       if (bp->gunzip_outlen & 0x3)
-               netdev_err(bp->dev, "Firmware decompression error:"
-                                   " gunzip_outlen (%d) not aligned\n",
-                               bp->gunzip_outlen);
-       bp->gunzip_outlen >>= 2;
-
-       zlib_inflateEnd(bp->strm);
-
-       if (rc == Z_STREAM_END)
-               return 0;
-
-       return rc;
-}
-
-/* nic load/unload */
-
-/*
- * General service functions
- */
-
-/* send a NIG loopback debug packet */
-static void bnx2x_lb_pckt(struct bnx2x *bp)
-{
-       u32 wb_write[3];
-
-       /* Ethernet source and destination addresses */
-       wb_write[0] = 0x55555555;
-       wb_write[1] = 0x55555555;
-       wb_write[2] = 0x20;             /* SOP */
-       REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
-
-       /* NON-IP protocol */
-       wb_write[0] = 0x09000000;
-       wb_write[1] = 0x55555555;
-       wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
-       REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
-}
-
-/* some of the internal memories
- * are not directly readable from the driver
- * to test them we send debug packets
- */
-static int bnx2x_int_mem_test(struct bnx2x *bp)
-{
-       int factor;
-       int count, i;
-       u32 val = 0;
-
-       if (CHIP_REV_IS_FPGA(bp))
-               factor = 120;
-       else if (CHIP_REV_IS_EMUL(bp))
-               factor = 200;
-       else
-               factor = 1;
-
-       DP(NETIF_MSG_HW, "start part1\n");
-
-       /* Disable inputs of parser neighbor blocks */
-       REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
-       REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
-       REG_WR(bp, CFC_REG_DEBUG0, 0x1);
-       REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
-
-       /*  Write 0 to parser credits for CFC search request */
-       REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
-
-       /* send Ethernet packet */
-       bnx2x_lb_pckt(bp);
-
-       /* TODO do i reset NIG statistic? */
-       /* Wait until NIG register shows 1 packet of size 0x10 */
-       count = 1000 * factor;
-       while (count) {
-
-               bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
-               val = *bnx2x_sp(bp, wb_data[0]);
-               if (val == 0x10)
-                       break;
-
-               msleep(10);
-               count--;
-       }
-       if (val != 0x10) {
-               BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
-               return -1;
-       }
-
-       /* Wait until PRS register shows 1 packet */
-       count = 1000 * factor;
-       while (count) {
-               val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
-               if (val == 1)
-                       break;
-
-               msleep(10);
-               count--;
-       }
-       if (val != 0x1) {
-               BNX2X_ERR("PRS timeout val = 0x%x\n", val);
-               return -2;
-       }
-
-       /* Reset and init BRB, PRS */
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
-       msleep(50);
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
-       msleep(50);
-       bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
-
-       DP(NETIF_MSG_HW, "part2\n");
-
-       /* Disable inputs of parser neighbor blocks */
-       REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
-       REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
-       REG_WR(bp, CFC_REG_DEBUG0, 0x1);
-       REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
-
-       /* Write 0 to parser credits for CFC search request */
-       REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
-
-       /* send 10 Ethernet packets */
-       for (i = 0; i < 10; i++)
-               bnx2x_lb_pckt(bp);
-
-       /* Wait until NIG register shows 10 + 1
-          packets of size 11*0x10 = 0xb0 */
-       count = 1000 * factor;
-       while (count) {
-
-               bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
-               val = *bnx2x_sp(bp, wb_data[0]);
-               if (val == 0xb0)
-                       break;
-
-               msleep(10);
-               count--;
-       }
-       if (val != 0xb0) {
-               BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
-               return -3;
-       }
-
-       /* Wait until PRS register shows 2 packets */
-       val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
-       if (val != 2)
-               BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
-
-       /* Write 1 to parser credits for CFC search request */
-       REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
-
-       /* Wait until PRS register shows 3 packets */
-       msleep(10 * factor);
-       /* Wait until NIG register shows 1 packet of size 0x10 */
-       val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
-       if (val != 3)
-               BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
-
-       /* clear NIG EOP FIFO */
-       for (i = 0; i < 11; i++)
-               REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
-       val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
-       if (val != 1) {
-               BNX2X_ERR("clear of NIG failed\n");
-               return -4;
-       }
-
-       /* Reset and init BRB, PRS, NIG */
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
-       msleep(50);
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
-       msleep(50);
-       bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
-#ifndef BCM_CNIC
-       /* set NIC mode */
-       REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif
-
-       /* Enable inputs of parser neighbor blocks */
-       REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
-       REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
-       REG_WR(bp, CFC_REG_DEBUG0, 0x0);
-       REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
-
-       DP(NETIF_MSG_HW, "done\n");
-
-       return 0; /* OK */
-}
-
-static void enable_blocks_attention(struct bnx2x *bp)
-{
-       REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
-       REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
-       REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
-       REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
-       REG_WR(bp, QM_REG_QM_INT_MASK, 0);
-       REG_WR(bp, TM_REG_TM_INT_MASK, 0);
-       REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
-       REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
-       REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
-/*     REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
-/*     REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
-       REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
-       REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
-       REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
-/*     REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
-/*     REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
-       REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
-       REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
-       REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
-       REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
-/*     REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
-/*     REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
-       if (CHIP_REV_IS_FPGA(bp))
-               REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
-       else
-               REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
-       REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
-       REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
-       REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
-/*     REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
-/*     REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
-       REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
-       REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
-/*     REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
-       REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
-}
-
-static const struct {
-       u32 addr;
-       u32 mask;
-} bnx2x_parity_mask[] = {
-       {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
-       {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
-       {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
-       {HC_REG_HC_PRTY_MASK, 0xffffffff},
-       {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
-       {QM_REG_QM_PRTY_MASK, 0x0},
-       {DORQ_REG_DORQ_PRTY_MASK, 0x0},
-       {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
-       {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
-       {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
-       {CDU_REG_CDU_PRTY_MASK, 0x0},
-       {CFC_REG_CFC_PRTY_MASK, 0x0},
-       {DBG_REG_DBG_PRTY_MASK, 0x0},
-       {DMAE_REG_DMAE_PRTY_MASK, 0x0},
-       {BRB1_REG_BRB1_PRTY_MASK, 0x0},
-       {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
-       {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
-       {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
-       {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
-       {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
-       {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
-       {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
-       {USEM_REG_USEM_PRTY_MASK_0, 0x0},
-       {USEM_REG_USEM_PRTY_MASK_1, 0x0},
-       {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
-       {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
-       {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
-       {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
-};
-
-static void enable_blocks_parity(struct bnx2x *bp)
-{
-       int i, mask_arr_len =
-               sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
-
-       for (i = 0; i < mask_arr_len; i++)
-               REG_WR(bp, bnx2x_parity_mask[i].addr,
-                       bnx2x_parity_mask[i].mask);
-}
-
-
-static void bnx2x_reset_common(struct bnx2x *bp)
-{
-       /* reset_common */
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
-              0xd3ffff7f);
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
-}
-
-static void bnx2x_init_pxp(struct bnx2x *bp)
-{
-       u16 devctl;
-       int r_order, w_order;
-
-       pci_read_config_word(bp->pdev,
-                            bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
-       DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
-       w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
-       if (bp->mrrs == -1)
-               r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
-       else {
-               DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
-               r_order = bp->mrrs;
-       }
-
-       bnx2x_init_pxp_arb(bp, r_order, w_order);
-}
-
-static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
-{
-       int is_required;
-       u32 val;
-       int port;
-
-       if (BP_NOMCP(bp))
-               return;
-
-       is_required = 0;
-       val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
-             SHARED_HW_CFG_FAN_FAILURE_MASK;
-
-       if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
-               is_required = 1;
-
-       /*
-        * The fan failure mechanism is usually related to the PHY type since
-        * the power consumption of the board is affected by the PHY. Currently,
-        * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
-        */
-       else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
-               for (port = PORT_0; port < PORT_MAX; port++) {
-                       u32 phy_type =
-                               SHMEM_RD(bp, dev_info.port_hw_config[port].
-                                        external_phy_config) &
-                               PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
-                       is_required |=
-                               ((phy_type ==
-                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
-                                (phy_type ==
-                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
-                                (phy_type ==
-                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
-               }
-
-       DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
-
-       if (is_required == 0)
-               return;
-
-       /* Fan failure is indicated by SPIO 5 */
-       bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
-                      MISC_REGISTERS_SPIO_INPUT_HI_Z);
-
-       /* set to active low mode */
-       val = REG_RD(bp, MISC_REG_SPIO_INT);
-       val |= ((1 << MISC_REGISTERS_SPIO_5) <<
-                                       MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
-       REG_WR(bp, MISC_REG_SPIO_INT, val);
-
-       /* enable interrupt to signal the IGU */
-       val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
-       val |= (1 << MISC_REGISTERS_SPIO_5);
-       REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
-}
-
-static int bnx2x_init_common(struct bnx2x *bp)
-{
-       u32 val, i;
-#ifdef BCM_CNIC
-       u32 wb_write[2];
-#endif
-
-       DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
-
-       bnx2x_reset_common(bp);
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
-
-       bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
-       if (CHIP_IS_E1H(bp))
-               REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
-
-       REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
-       msleep(30);
-       REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
-
-       bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
-       if (CHIP_IS_E1(bp)) {
-               /* enable HW interrupt from PXP on USDM overflow
-                  bit 16 on INT_MASK_0 */
-               REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
-       }
-
-       bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
-       bnx2x_init_pxp(bp);
-
-#ifdef __BIG_ENDIAN
-       REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
-       REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
-       /* make sure this value is 0 */
-       REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
-
-/*     REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
-       REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
-       REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
-       REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
-       REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
-#endif
-
-       REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
-#ifdef BCM_CNIC
-       REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
-       REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
-       REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
-#endif
-
-       if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
-               REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
-
-       /* let the HW do it's magic ... */
-       msleep(100);
-       /* finish PXP init */
-       val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
-       if (val != 1) {
-               BNX2X_ERR("PXP2 CFG failed\n");
-               return -EBUSY;
-       }
-       val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
-       if (val != 1) {
-               BNX2X_ERR("PXP2 RD_INIT failed\n");
-               return -EBUSY;
-       }
-
-       REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
-       REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
-
-       bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
-
-       /* clean the DMAE memory */
-       bp->dmae_ready = 1;
-       bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
-
-       bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
-
-       bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
-       bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
-       bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
-       bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
-
-       bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
-
-#ifdef BCM_CNIC
-       wb_write[0] = 0;
-       wb_write[1] = 0;
-       for (i = 0; i < 64; i++) {
-               REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
-               bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
-
-               if (CHIP_IS_E1H(bp)) {
-                       REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
-                       bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
-                                         wb_write, 2);
-               }
-       }
-#endif
-       /* soft reset pulse */
-       REG_WR(bp, QM_REG_SOFT_RESET, 1);
-       REG_WR(bp, QM_REG_SOFT_RESET, 0);
-
-#ifdef BCM_CNIC
-       bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
-#endif
-
-       bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
-       REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
-       if (!CHIP_REV_IS_SLOW(bp)) {
-               /* enable hw interrupt from doorbell Q */
-               REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
-       }
-
-       bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
-       REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
-#ifndef BCM_CNIC
-       /* set NIC mode */
-       REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif
-       if (CHIP_IS_E1H(bp))
-               REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
-
-       bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
-
-       bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
-       bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
-       bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
-       bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
-
-       bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
-
-       /* sync semi rtc */
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
-              0x80000000);
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
-              0x80000000);
-
-       bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
-
-       REG_WR(bp, SRC_REG_SOFT_RST, 1);
-       for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
-               REG_WR(bp, i, random32());
-       bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
-#ifdef BCM_CNIC
-       REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
-       REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
-       REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
-       REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
-       REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
-       REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
-       REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
-       REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
-       REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
-       REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
-#endif
-       REG_WR(bp, SRC_REG_SOFT_RST, 0);
-
-       if (sizeof(union cdu_context) != 1024)
-               /* we currently assume that a context is 1024 bytes */
-               dev_alert(&bp->pdev->dev, "please adjust the size "
-                                         "of cdu_context(%ld)\n",
-                        (long)sizeof(union cdu_context));
-
-       bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
-       val = (4 << 24) + (0 << 12) + 1024;
-       REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
-
-       bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
-       REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
-       /* enable context validation interrupt from CFC */
-       REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
-
-       /* set the thresholds to prevent CFC/CDU race */
-       REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
-
-       bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
-
-       bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
-       /* Reset PCIE errors for debug */
-       REG_WR(bp, 0x2814, 0xffffffff);
-       REG_WR(bp, 0x3820, 0xffffffff);
-
-       bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
-       bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
-
-       bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
-       if (CHIP_IS_E1H(bp)) {
-               REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
-               REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
-       }
-
-       if (CHIP_REV_IS_SLOW(bp))
-               msleep(200);
-
-       /* finish CFC init */
-       val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
-       if (val != 1) {
-               BNX2X_ERR("CFC LL_INIT failed\n");
-               return -EBUSY;
-       }
-       val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
-       if (val != 1) {
-               BNX2X_ERR("CFC AC_INIT failed\n");
-               return -EBUSY;
-       }
-       val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
-       if (val != 1) {
-               BNX2X_ERR("CFC CAM_INIT failed\n");
-               return -EBUSY;
-       }
-       REG_WR(bp, CFC_REG_DEBUG0, 0);
-
-       /* read NIG statistic
-          to see if this is our first up since powerup */
-       bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
-       val = *bnx2x_sp(bp, wb_data[0]);
-
-       /* do internal memory self test */
-       if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
-               BNX2X_ERR("internal mem self test failed\n");
-               return -EBUSY;
-       }
-
-       switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
-       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
-       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
-       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
-       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
-               bp->port.need_hw_lock = 1;
-               break;
-
-       default:
-               break;
-       }
-
-       bnx2x_setup_fan_failure_detection(bp);
-
-       /* clear PXP2 attentions */
-       REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
-
-       enable_blocks_attention(bp);
-       if (CHIP_PARITY_SUPPORTED(bp))
-               enable_blocks_parity(bp);
-
-       if (!BP_NOMCP(bp)) {
-               bnx2x_acquire_phy_lock(bp);
-               bnx2x_common_init_phy(bp, bp->common.shmem_base);
-               bnx2x_release_phy_lock(bp);
-       } else
-               BNX2X_ERR("Bootcode is missing - can not initialize link\n");
-
-       return 0;
-}
-
-static int bnx2x_init_port(struct bnx2x *bp)
-{
-       int port = BP_PORT(bp);
-       int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
-       u32 low, high;
-       u32 val;
-
-       DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
-
-       REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
-
-       bnx2x_init_block(bp, PXP_BLOCK, init_stage);
-       bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
-
-       bnx2x_init_block(bp, TCM_BLOCK, init_stage);
-       bnx2x_init_block(bp, UCM_BLOCK, init_stage);
-       bnx2x_init_block(bp, CCM_BLOCK, init_stage);
-       bnx2x_init_block(bp, XCM_BLOCK, init_stage);
-
-#ifdef BCM_CNIC
-       REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
-
-       bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
-       REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
-       REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
-#endif
-
-       bnx2x_init_block(bp, DQ_BLOCK, init_stage);
-
-       bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
-       if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
-               /* no pause for emulation and FPGA */
-               low = 0;
-               high = 513;
-       } else {
-               if (IS_E1HMF(bp))
-                       low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
-               else if (bp->dev->mtu > 4096) {
-                       if (bp->flags & ONE_PORT_FLAG)
-                               low = 160;
-                       else {
-                               val = bp->dev->mtu;
-                               /* (24*1024 + val*4)/256 */
-                               low = 96 + (val/64) + ((val % 64) ? 1 : 0);
-                       }
-               } else
-                       low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
-               high = low + 56;        /* 14*1024/256 */
-       }
-       REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
-       REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
-
-
-       bnx2x_init_block(bp, PRS_BLOCK, init_stage);
-
-       bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
-       bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
-       bnx2x_init_block(bp, USDM_BLOCK, init_stage);
-       bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
-
-       bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
-       bnx2x_init_block(bp, USEM_BLOCK, init_stage);
-       bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
-       bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
-
-       bnx2x_init_block(bp, UPB_BLOCK, init_stage);
-       bnx2x_init_block(bp, XPB_BLOCK, init_stage);
-
-       bnx2x_init_block(bp, PBF_BLOCK, init_stage);
-
-       /* configure PBF to work without PAUSE mtu 9000 */
-       REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
-
-       /* update threshold */
-       REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
-       /* update init credit */
-       REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
-
-       /* probe changes */
-       REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
-       msleep(5);
-       REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
-
-#ifdef BCM_CNIC
-       bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
-#endif
-       bnx2x_init_block(bp, CDU_BLOCK, init_stage);
-       bnx2x_init_block(bp, CFC_BLOCK, init_stage);
-
-       if (CHIP_IS_E1(bp)) {
-               REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
-               REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
-       }
-       bnx2x_init_block(bp, HC_BLOCK, init_stage);
-
-       bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
-       /* init aeu_mask_attn_func_0/1:
-        *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
-        *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
-        *             bits 4-7 are used for "per vn group attention" */
-       REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
-              (IS_E1HMF(bp) ? 0xF7 : 0x7));
-
-       bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
-       bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
-       bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
-       bnx2x_init_block(bp, DBU_BLOCK, init_stage);
-       bnx2x_init_block(bp, DBG_BLOCK, init_stage);
-
-       bnx2x_init_block(bp, NIG_BLOCK, init_stage);
-
-       REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
-
-       if (CHIP_IS_E1H(bp)) {
-               /* 0x2 disable e1hov, 0x1 enable */
-               REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
-                      (IS_E1HMF(bp) ? 0x1 : 0x2));
-
-               {
-                       REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
-                       REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
-                       REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
-               }
-       }
-
-       bnx2x_init_block(bp, MCP_BLOCK, init_stage);
-       bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
-
-       switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
-       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
-               {
-               u32 swap_val, swap_override, aeu_gpio_mask, offset;
-
-               bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
-                              MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
-
-               /* The GPIO should be swapped if the swap register is
-                  set and active */
-               swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
-               swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
-
-               /* Select function upon port-swap configuration */
-               if (port == 0) {
-                       offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
-                       aeu_gpio_mask = (swap_val && swap_override) ?
-                               AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
-                               AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
-               } else {
-                       offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
-                       aeu_gpio_mask = (swap_val && swap_override) ?
-                               AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
-                               AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
-               }
-               val = REG_RD(bp, offset);
-               /* add GPIO3 to group */
-               val |= aeu_gpio_mask;
-               REG_WR(bp, offset, val);
-               }
-               break;
-
-       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
-       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
-               /* add SPIO 5 to group 0 */
-               {
-               u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
-                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
-               val = REG_RD(bp, reg_addr);
-               val |= AEU_INPUTS_ATTN_BITS_SPIO5;
-               REG_WR(bp, reg_addr, val);
-               }
-               break;
-
-       default:
-               break;
-       }
-
-       bnx2x__link_reset(bp);
-
-       return 0;
-}
-
-#define ILT_PER_FUNC           (768/2)
-#define FUNC_ILT_BASE(func)    (func * ILT_PER_FUNC)
-/* the phys address is shifted right 12 bits and has an added
-   1=valid bit added to the 53rd bit
-   then since this is a wide register(TM)
-   we split it into two 32 bit writes
- */
-#define ONCHIP_ADDR1(x)                ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
-#define ONCHIP_ADDR2(x)                ((u32)((1 << 20) | ((u64)x >> 44)))
-#define PXP_ONE_ILT(x)         (((x) << 10) | x)
-#define PXP_ILT_RANGE(f, l)    (((l) << 10) | f)
-
-#ifdef BCM_CNIC
-#define CNIC_ILT_LINES         127
-#define CNIC_CTX_PER_ILT       16
-#else
-#define CNIC_ILT_LINES         0
-#endif
-
-static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
-{
-       int reg;
-
-       if (CHIP_IS_E1H(bp))
-               reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
-       else /* E1 */
-               reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
-
-       bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
-}
-
-static int bnx2x_init_func(struct bnx2x *bp)
-{
-       int port = BP_PORT(bp);
-       int func = BP_FUNC(bp);
-       u32 addr, val;
-       int i;
-
-       DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
-
-       /* set MSI reconfigure capability */
-       addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
-       val = REG_RD(bp, addr);
-       val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
-       REG_WR(bp, addr, val);
-
-       i = FUNC_ILT_BASE(func);
-
-       bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
-       if (CHIP_IS_E1H(bp)) {
-               REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
-               REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
-       } else /* E1 */
-               REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
-                      PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
-
-#ifdef BCM_CNIC
-       i += 1 + CNIC_ILT_LINES;
-       bnx2x_ilt_wr(bp, i, bp->timers_mapping);
-       if (CHIP_IS_E1(bp))
-               REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
-       else {
-               REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
-               REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
-       }
-
-       i++;
-       bnx2x_ilt_wr(bp, i, bp->qm_mapping);
-       if (CHIP_IS_E1(bp))
-               REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
-       else {
-               REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
-               REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
-       }
-
-       i++;
-       bnx2x_ilt_wr(bp, i, bp->t1_mapping);
-       if (CHIP_IS_E1(bp))
-               REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
-       else {
-               REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
-               REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
-       }
-
-       /* tell the searcher where the T2 table is */
-       REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
-
-       bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
-                   U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
-
-       bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
-                   U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
-                   U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
-
-       REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
-#endif
-
-       if (CHIP_IS_E1H(bp)) {
-               bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
-               bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
-               bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
-               bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
-               bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
-               bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
-               bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
-               bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
-               bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
-
-               REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
-               REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
-       }
-
-       /* HC init per function */
-       if (CHIP_IS_E1H(bp)) {
-               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
-
-               REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
-               REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
-       }
-       bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
-
-       /* Reset PCIE errors for debug */
-       REG_WR(bp, 0x2114, 0xffffffff);
-       REG_WR(bp, 0x2120, 0xffffffff);
-
-       return 0;
-}
-
-static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
-{
-       int i, rc = 0;
-
-       DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
-          BP_FUNC(bp), load_code);
-
-       bp->dmae_ready = 0;
-       mutex_init(&bp->dmae_mutex);
-       rc = bnx2x_gunzip_init(bp);
-       if (rc)
-               return rc;
-
-       switch (load_code) {
-       case FW_MSG_CODE_DRV_LOAD_COMMON:
-               rc = bnx2x_init_common(bp);
-               if (rc)
-                       goto init_hw_err;
-               /* no break */
-
-       case FW_MSG_CODE_DRV_LOAD_PORT:
-               bp->dmae_ready = 1;
-               rc = bnx2x_init_port(bp);
-               if (rc)
-                       goto init_hw_err;
-               /* no break */
-
-       case FW_MSG_CODE_DRV_LOAD_FUNCTION:
-               bp->dmae_ready = 1;
-               rc = bnx2x_init_func(bp);
-               if (rc)
-                       goto init_hw_err;
-               break;
-
-       default:
-               BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
-               break;
-       }
-
-       if (!BP_NOMCP(bp)) {
-               int func = BP_FUNC(bp);
-
-               bp->fw_drv_pulse_wr_seq =
-                               (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
-                                DRV_PULSE_SEQ_MASK);
-               DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
-       }
-
-       /* this needs to be done before gunzip end */
-       bnx2x_zero_def_sb(bp);
-       for_each_queue(bp, i)
-               bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
-#ifdef BCM_CNIC
-       bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
-#endif
-
-init_hw_err:
-       bnx2x_gunzip_end(bp);
-
-       return rc;
-}
-
-static void bnx2x_free_mem(struct bnx2x *bp)
-{
-
-#define BNX2X_PCI_FREE(x, y, size) \
-       do { \
-               if (x) { \
-                       dma_free_coherent(&bp->pdev->dev, size, x, y); \
-                       x = NULL; \
-                       y = 0; \
-               } \
-       } while (0)
-
-#define BNX2X_FREE(x) \
-       do { \
-               if (x) { \
-                       vfree(x); \
-                       x = NULL; \
-               } \
-       } while (0)
-
-       int i;
-
-       /* fastpath */
-       /* Common */
-       for_each_queue(bp, i) {
-
-               /* status blocks */
-               BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
-                              bnx2x_fp(bp, i, status_blk_mapping),
-                              sizeof(struct host_status_block));
-       }
-       /* Rx */
-       for_each_queue(bp, i) {
-
-               /* fastpath rx rings: rx_buf rx_desc rx_comp */
-               BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
-               BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
-                              bnx2x_fp(bp, i, rx_desc_mapping),
-                              sizeof(struct eth_rx_bd) * NUM_RX_BD);
-
-               BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
-                              bnx2x_fp(bp, i, rx_comp_mapping),
-                              sizeof(struct eth_fast_path_rx_cqe) *
-                              NUM_RCQ_BD);
-
-               /* SGE ring */
-               BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
-               BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
-                              bnx2x_fp(bp, i, rx_sge_mapping),
-                              BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
-       }
-       /* Tx */
-       for_each_queue(bp, i) {
-
-               /* fastpath tx rings: tx_buf tx_desc */
-               BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
-               BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
-                              bnx2x_fp(bp, i, tx_desc_mapping),
-                              sizeof(union eth_tx_bd_types) * NUM_TX_BD);
-       }
-       /* end of fastpath */
-
-       BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
-                      sizeof(struct host_def_status_block));
-
-       BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
-                      sizeof(struct bnx2x_slowpath));
-
-#ifdef BCM_CNIC
-       BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
-       BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
-       BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
-       BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
-       BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
-                      sizeof(struct host_status_block));
-#endif
-       BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
-
-#undef BNX2X_PCI_FREE
-#undef BNX2X_KFREE
-}
-
-static int bnx2x_alloc_mem(struct bnx2x *bp)
-{
-
-#define BNX2X_PCI_ALLOC(x, y, size) \
-       do { \
-               x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
-               if (x == NULL) \
-                       goto alloc_mem_err; \
-               memset(x, 0, size); \
-       } while (0)
-
-#define BNX2X_ALLOC(x, size) \
-       do { \
-               x = vmalloc(size); \
-               if (x == NULL) \
-                       goto alloc_mem_err; \
-               memset(x, 0, size); \
-       } while (0)
-
-       int i;
-
-       /* fastpath */
-       /* Common */
-       for_each_queue(bp, i) {
-               bnx2x_fp(bp, i, bp) = bp;
-
-               /* status blocks */
-               BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
-                               &bnx2x_fp(bp, i, status_blk_mapping),
-                               sizeof(struct host_status_block));
-       }
-       /* Rx */
-       for_each_queue(bp, i) {
-
-               /* fastpath rx rings: rx_buf rx_desc rx_comp */
-               BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
-                               sizeof(struct sw_rx_bd) * NUM_RX_BD);
-               BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
-                               &bnx2x_fp(bp, i, rx_desc_mapping),
-                               sizeof(struct eth_rx_bd) * NUM_RX_BD);
-
-               BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
-                               &bnx2x_fp(bp, i, rx_comp_mapping),
-                               sizeof(struct eth_fast_path_rx_cqe) *
-                               NUM_RCQ_BD);
-
-               /* SGE ring */
-               BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
-                               sizeof(struct sw_rx_page) * NUM_RX_SGE);
-               BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
-                               &bnx2x_fp(bp, i, rx_sge_mapping),
-                               BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
-       }
-       /* Tx */
-       for_each_queue(bp, i) {
-
-               /* fastpath tx rings: tx_buf tx_desc */
-               BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
-                               sizeof(struct sw_tx_bd) * NUM_TX_BD);
-               BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
-                               &bnx2x_fp(bp, i, tx_desc_mapping),
-                               sizeof(union eth_tx_bd_types) * NUM_TX_BD);
-       }
-       /* end of fastpath */
-
-       BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
-                       sizeof(struct host_def_status_block));
-
-       BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
-                       sizeof(struct bnx2x_slowpath));
-
-#ifdef BCM_CNIC
-       BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
-
-       /* allocate searcher T2 table
-          we allocate 1/4 of alloc num for T2
-         (which is not entered into the ILT) */
-       BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
-
-       /* Initialize T2 (for 1024 connections) */
-       for (i = 0; i < 16*1024; i += 64)
-               *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
-
-       /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
-       BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
-
-       /* QM queues (128*MAX_CONN) */
-       BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
-
-       BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
-                       sizeof(struct host_status_block));
-#endif
-
-       /* Slow path ring */
-       BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
-
-       return 0;
-
-alloc_mem_err:
-       bnx2x_free_mem(bp);
-       return -ENOMEM;
-
-#undef BNX2X_PCI_ALLOC
-#undef BNX2X_ALLOC
-}
-
-static void bnx2x_free_tx_skbs(struct bnx2x *bp)
-{
-       int i;
-
-       for_each_queue(bp, i) {
-               struct bnx2x_fastpath *fp = &bp->fp[i];
-
-               u16 bd_cons = fp->tx_bd_cons;
-               u16 sw_prod = fp->tx_pkt_prod;
-               u16 sw_cons = fp->tx_pkt_cons;
-
-               while (sw_cons != sw_prod) {
-                       bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
-                       sw_cons++;
-               }
-       }
-}
-
-static void bnx2x_free_rx_skbs(struct bnx2x *bp)
-{
-       int i, j;
-
-       for_each_queue(bp, j) {
-               struct bnx2x_fastpath *fp = &bp->fp[j];
-
-               for (i = 0; i < NUM_RX_BD; i++) {
-                       struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
-                       struct sk_buff *skb = rx_buf->skb;
-
-                       if (skb == NULL)
-                               continue;
-
-                       dma_unmap_single(&bp->pdev->dev,
-                                        dma_unmap_addr(rx_buf, mapping),
-                                        bp->rx_buf_size, DMA_FROM_DEVICE);
-
-                       rx_buf->skb = NULL;
-                       dev_kfree_skb(skb);
-               }
-               if (!fp->disable_tpa)
-                       bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
-                                           ETH_MAX_AGGREGATION_QUEUES_E1 :
-                                           ETH_MAX_AGGREGATION_QUEUES_E1H);
-       }
-}
-
-static void bnx2x_free_skbs(struct bnx2x *bp)
-{
-       bnx2x_free_tx_skbs(bp);
-       bnx2x_free_rx_skbs(bp);
-}
-
-static void bnx2x_free_msix_irqs(struct bnx2x *bp)
-{
-       int i, offset = 1;
-
-       free_irq(bp->msix_table[0].vector, bp->dev);
-       DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
-          bp->msix_table[0].vector);
-
-#ifdef BCM_CNIC
-       offset++;
-#endif
-       for_each_queue(bp, i) {
-               DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
-                  "state %x\n", i, bp->msix_table[i + offset].vector,
-                  bnx2x_fp(bp, i, state));
-
-               free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
-       }
-}
-
-static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
-{
-       if (bp->flags & USING_MSIX_FLAG) {
-               if (!disable_only)
-                       bnx2x_free_msix_irqs(bp);
-               pci_disable_msix(bp->pdev);
-               bp->flags &= ~USING_MSIX_FLAG;
-
-       } else if (bp->flags & USING_MSI_FLAG) {
-               if (!disable_only)
-                       free_irq(bp->pdev->irq, bp->dev);
-               pci_disable_msi(bp->pdev);
-               bp->flags &= ~USING_MSI_FLAG;
-
-       } else if (!disable_only)
-               free_irq(bp->pdev->irq, bp->dev);
-}
-
-static int bnx2x_enable_msix(struct bnx2x *bp)
-{
-       int i, rc, offset = 1;
-       int igu_vec = 0;
-
-       bp->msix_table[0].entry = igu_vec;
-       DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
-
-#ifdef BCM_CNIC
-       igu_vec = BP_L_ID(bp) + offset;
-       bp->msix_table[1].entry = igu_vec;
-       DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
-       offset++;
-#endif
-       for_each_queue(bp, i) {
-               igu_vec = BP_L_ID(bp) + offset + i;
-               bp->msix_table[i + offset].entry = igu_vec;
-               DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
-                  "(fastpath #%u)\n", i + offset, igu_vec, i);
-       }
-
-       rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
-                            BNX2X_NUM_QUEUES(bp) + offset);
-
-       /*
-        * reconfigure number of tx/rx queues according to available
-        * MSI-X vectors
-        */
-       if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
-               /* vectors available for FP */
-               int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
-
-               DP(NETIF_MSG_IFUP,
-                  "Trying to use less MSI-X vectors: %d\n", rc);
-
-               rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
-
-               if (rc) {
-                       DP(NETIF_MSG_IFUP,
-                          "MSI-X is not attainable  rc %d\n", rc);
-                       return rc;
-               }
-
-               bp->num_queues = min(bp->num_queues, fp_vec);
-
-               DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
-                                 bp->num_queues);
-       } else if (rc) {
-               DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
-               return rc;
-       }
-
-       bp->flags |= USING_MSIX_FLAG;
-
-       return 0;
-}
-
-static int bnx2x_req_msix_irqs(struct bnx2x *bp)
-{
-       int i, rc, offset = 1;
-
-       rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
-                        bp->dev->name, bp->dev);
-       if (rc) {
-               BNX2X_ERR("request sp irq failed\n");
-               return -EBUSY;
-       }
-
-#ifdef BCM_CNIC
-       offset++;
-#endif
-       for_each_queue(bp, i) {
-               struct bnx2x_fastpath *fp = &bp->fp[i];
-               snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
-                        bp->dev->name, i);
-
-               rc = request_irq(bp->msix_table[i + offset].vector,
-                                bnx2x_msix_fp_int, 0, fp->name, fp);
-               if (rc) {
-                       BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
-                       bnx2x_free_msix_irqs(bp);
-                       return -EBUSY;
-               }
-
-               fp->state = BNX2X_FP_STATE_IRQ;
-       }
-
-       i = BNX2X_NUM_QUEUES(bp);
-       netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
-              " ... fp[%d] %d\n",
-              bp->msix_table[0].vector,
-              0, bp->msix_table[offset].vector,
-              i - 1, bp->msix_table[offset + i - 1].vector);
-
-       return 0;
-}
-
-static int bnx2x_enable_msi(struct bnx2x *bp)
-{
-       int rc;
-
-       rc = pci_enable_msi(bp->pdev);
-       if (rc) {
-               DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
-               return -1;
-       }
-       bp->flags |= USING_MSI_FLAG;
-
-       return 0;
-}
-
-static int bnx2x_req_irq(struct bnx2x *bp)
-{
-       unsigned long flags;
-       int rc;
-
-       if (bp->flags & USING_MSI_FLAG)
-               flags = 0;
-       else
-               flags = IRQF_SHARED;
-
-       rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
-                        bp->dev->name, bp->dev);
-       if (!rc)
-               bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
-
-       return rc;
-}
-
-static void bnx2x_napi_enable(struct bnx2x *bp)
-{
-       int i;
-
-       for_each_queue(bp, i)
-               napi_enable(&bnx2x_fp(bp, i, napi));
-}
-
-static void bnx2x_napi_disable(struct bnx2x *bp)
-{
-       int i;
-
-       for_each_queue(bp, i)
-               napi_disable(&bnx2x_fp(bp, i, napi));
-}
-
-static void bnx2x_netif_start(struct bnx2x *bp)
-{
-       int intr_sem;
-
-       intr_sem = atomic_dec_and_test(&bp->intr_sem);
-       smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
-
-       if (intr_sem) {
-               if (netif_running(bp->dev)) {
-                       bnx2x_napi_enable(bp);
-                       bnx2x_int_enable(bp);
-                       if (bp->state == BNX2X_STATE_OPEN)
-                               netif_tx_wake_all_queues(bp->dev);
-               }
-       }
-}
-
-static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
-{
-       bnx2x_int_disable_sync(bp, disable_hw);
-       bnx2x_napi_disable(bp);
-       netif_tx_disable(bp->dev);
-}
-
-/*
- * Init service functions
- */
-
-/**
- * Sets a MAC in a CAM for a few L2 Clients for E1 chip
- *
- * @param bp driver descriptor
- * @param set set or clear an entry (1 or 0)
- * @param mac pointer to a buffer containing a MAC
- * @param cl_bit_vec bit vector of clients to register a MAC for
- * @param cam_offset offset in a CAM to use
- * @param with_bcast set broadcast MAC as well
- */
-static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
-                                     u32 cl_bit_vec, u8 cam_offset,
-                                     u8 with_bcast)
-{
-       struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
-       int port = BP_PORT(bp);
-
-       /* CAM allocation
-        * unicasts 0-31:port0 32-63:port1
-        * multicast 64-127:port0 128-191:port1
-        */
-       config->hdr.length = 1 + (with_bcast ? 1 : 0);
-       config->hdr.offset = cam_offset;
-       config->hdr.client_id = 0xff;
-       config->hdr.reserved1 = 0;
-
-       /* primary MAC */
-       config->config_table[0].cam_entry.msb_mac_addr =
-                                       swab16(*(u16 *)&mac[0]);
-       config->config_table[0].cam_entry.middle_mac_addr =
-                                       swab16(*(u16 *)&mac[2]);
-       config->config_table[0].cam_entry.lsb_mac_addr =
-                                       swab16(*(u16 *)&mac[4]);
-       config->config_table[0].cam_entry.flags = cpu_to_le16(port);
-       if (set)
-               config->config_table[0].target_table_entry.flags = 0;
-       else
-               CAM_INVALIDATE(config->config_table[0]);
-       config->config_table[0].target_table_entry.clients_bit_vector =
-                                               cpu_to_le32(cl_bit_vec);
-       config->config_table[0].target_table_entry.vlan_id = 0;
-
-       DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
-          (set ? "setting" : "clearing"),
-          config->config_table[0].cam_entry.msb_mac_addr,
-          config->config_table[0].cam_entry.middle_mac_addr,
-          config->config_table[0].cam_entry.lsb_mac_addr);
-
-       /* broadcast */
-       if (with_bcast) {
-               config->config_table[1].cam_entry.msb_mac_addr =
-                       cpu_to_le16(0xffff);
-               config->config_table[1].cam_entry.middle_mac_addr =
-                       cpu_to_le16(0xffff);
-               config->config_table[1].cam_entry.lsb_mac_addr =
-                       cpu_to_le16(0xffff);
-               config->config_table[1].cam_entry.flags = cpu_to_le16(port);
-               if (set)
-                       config->config_table[1].target_table_entry.flags =
-                                       TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
-               else
-                       CAM_INVALIDATE(config->config_table[1]);
-               config->config_table[1].target_table_entry.clients_bit_vector =
-                                                       cpu_to_le32(cl_bit_vec);
-               config->config_table[1].target_table_entry.vlan_id = 0;
-       }
-
-       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
-                     U64_HI(bnx2x_sp_mapping(bp, mac_config)),
-                     U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
-}
-
-/**
- * Sets a MAC in a CAM for a few L2 Clients for E1H chip
- *
- * @param bp driver descriptor
- * @param set set or clear an entry (1 or 0)
- * @param mac pointer to a buffer containing a MAC
- * @param cl_bit_vec bit vector of clients to register a MAC for
- * @param cam_offset offset in a CAM to use
- */
-static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
-                                      u32 cl_bit_vec, u8 cam_offset)
-{
-       struct mac_configuration_cmd_e1h *config =
-               (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
-
-       config->hdr.length = 1;
-       config->hdr.offset = cam_offset;
-       config->hdr.client_id = 0xff;
-       config->hdr.reserved1 = 0;
-
-       /* primary MAC */
-       config->config_table[0].msb_mac_addr =
-                                       swab16(*(u16 *)&mac[0]);
-       config->config_table[0].middle_mac_addr =
-                                       swab16(*(u16 *)&mac[2]);
-       config->config_table[0].lsb_mac_addr =
-                                       swab16(*(u16 *)&mac[4]);
-       config->config_table[0].clients_bit_vector =
-                                       cpu_to_le32(cl_bit_vec);
-       config->config_table[0].vlan_id = 0;
-       config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
-       if (set)
-               config->config_table[0].flags = BP_PORT(bp);
-       else
-               config->config_table[0].flags =
-                               MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
-
-       DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
-          (set ? "setting" : "clearing"),
-          config->config_table[0].msb_mac_addr,
-          config->config_table[0].middle_mac_addr,
-          config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
-
-       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
-                     U64_HI(bnx2x_sp_mapping(bp, mac_config)),
-                     U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
-}
-
-static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
-                            int *state_p, int poll)
-{
-       /* can take a while if any port is running */
-       int cnt = 5000;
-
-       DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
-          poll ? "polling" : "waiting", state, idx);
-
-       might_sleep();
-       while (cnt--) {
-               if (poll) {
-                       bnx2x_rx_int(bp->fp, 10);
-                       /* if index is different from 0
-                        * the reply for some commands will
-                        * be on the non default queue
-                        */
-                       if (idx)
-                               bnx2x_rx_int(&bp->fp[idx], 10);
-               }
-
-               mb(); /* state is changed by bnx2x_sp_event() */
-               if (*state_p == state) {
-#ifdef BNX2X_STOP_ON_ERROR
-                       DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
-#endif
-                       return 0;
-               }
-
-               msleep(1);
-
-               if (bp->panic)
-                       return -EIO;
-       }
-
-       /* timeout! */
-       BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
-                 poll ? "polling" : "waiting", state, idx);
-#ifdef BNX2X_STOP_ON_ERROR
-       bnx2x_panic();
-#endif
-
-       return -EBUSY;
-}
-
-static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
-{
-       bp->set_mac_pending++;
-       smp_wmb();
-
-       bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
-                                  (1 << bp->fp->cl_id), BP_FUNC(bp));
-
-       /* Wait for a completion */
-       bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
-}
-
-static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
-{
-       bp->set_mac_pending++;
-       smp_wmb();
-
-       bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
-                                 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
-                                 1);
-
-       /* Wait for a completion */
-       bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
-}
-
-#ifdef BCM_CNIC
-/**
- * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
- * MAC(s). This function will wait until the ramdord completion
- * returns.
- *
- * @param bp driver handle
- * @param set set or clear the CAM entry
- *
- * @return 0 if cussess, -ENODEV if ramrod doesn't return.
- */
-static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
-{
-       u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
-
-       bp->set_mac_pending++;
-       smp_wmb();
-
-       /* Send a SET_MAC ramrod */
-       if (CHIP_IS_E1(bp))
-               bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
-                                 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
-                                 1);
-       else
-               /* CAM allocation for E1H
-               * unicasts: by func number
-               * multicast: 20+FUNC*20, 20 each
-               */
-               bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
-                                  cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
-
-       /* Wait for a completion when setting */
-       bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
-
-       return 0;
-}
-#endif
-
-static int bnx2x_setup_leading(struct bnx2x *bp)
-{
-       int rc;
-
-       /* reset IGU state */
-       bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
-
-       /* SETUP ramrod */
-       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
-
-       /* Wait for completion */
-       rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
-
-       return rc;
-}
-
-static int bnx2x_setup_multi(struct bnx2x *bp, int index)
-{
-       struct bnx2x_fastpath *fp = &bp->fp[index];
-
-       /* reset IGU state */
-       bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
-
-       /* SETUP ramrod */
-       fp->state = BNX2X_FP_STATE_OPENING;
-       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
-                     fp->cl_id, 0);
-
-       /* Wait for completion */
-       return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
-                                &(fp->state), 0);
-}
-
-static int bnx2x_poll(struct napi_struct *napi, int budget);
-
-static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
-{
-
-       switch (bp->multi_mode) {
-       case ETH_RSS_MODE_DISABLED:
-               bp->num_queues = 1;
-               break;
-
-       case ETH_RSS_MODE_REGULAR:
-               if (num_queues)
-                       bp->num_queues = min_t(u32, num_queues,
-                                                 BNX2X_MAX_QUEUES(bp));
-               else
-                       bp->num_queues = min_t(u32, num_online_cpus(),
-                                                 BNX2X_MAX_QUEUES(bp));
-               break;
-
-
-       default:
-               bp->num_queues = 1;
-               break;
-       }
-}
-
-static int bnx2x_set_num_queues(struct bnx2x *bp)
-{
-       int rc = 0;
-
-       switch (int_mode) {
-       case INT_MODE_INTx:
-       case INT_MODE_MSI:
-               bp->num_queues = 1;
-               DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
-               break;
-       default:
-               /* Set number of queues according to bp->multi_mode value */
-               bnx2x_set_num_queues_msix(bp);
-
-               DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
-                  bp->num_queues);
-
-               /* if we can't use MSI-X we only need one fp,
-                * so try to enable MSI-X with the requested number of fp's
-                * and fallback to MSI or legacy INTx with one fp
-                */
-               rc = bnx2x_enable_msix(bp);
-               if (rc)
-                       /* failed to enable MSI-X */
-                       bp->num_queues = 1;
-               break;
-       }
-       bp->dev->real_num_tx_queues = bp->num_queues;
-       return rc;
-}
-
-#ifdef BCM_CNIC
-static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
-static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
-#endif
-
-/* must be called with rtnl_lock */
-static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
-{
-       u32 load_code;
-       int i, rc;
-
-#ifdef BNX2X_STOP_ON_ERROR
-       if (unlikely(bp->panic))
-               return -EPERM;
-#endif
-
-       bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
-
-       rc = bnx2x_set_num_queues(bp);
-
-       if (bnx2x_alloc_mem(bp)) {
-               bnx2x_free_irq(bp, true);
-               return -ENOMEM;
-       }
-
-       for_each_queue(bp, i)
-               bnx2x_fp(bp, i, disable_tpa) =
-                                       ((bp->flags & TPA_ENABLE_FLAG) == 0);
-
-       for_each_queue(bp, i)
-               netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
-                              bnx2x_poll, 128);
-
-       bnx2x_napi_enable(bp);
-
-       if (bp->flags & USING_MSIX_FLAG) {
-               rc = bnx2x_req_msix_irqs(bp);
-               if (rc) {
-                       bnx2x_free_irq(bp, true);
-                       goto load_error1;
-               }
-       } else {
-               /* Fall to INTx if failed to enable MSI-X due to lack of
-                  memory (in bnx2x_set_num_queues()) */
-               if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
-                       bnx2x_enable_msi(bp);
-               bnx2x_ack_int(bp);
-               rc = bnx2x_req_irq(bp);
-               if (rc) {
-                       BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
-                       bnx2x_free_irq(bp, true);
-                       goto load_error1;
-               }
-               if (bp->flags & USING_MSI_FLAG) {
-                       bp->dev->irq = bp->pdev->irq;
-                       netdev_info(bp->dev, "using MSI  IRQ %d\n",
-                                   bp->pdev->irq);
-               }
-       }
-
-       /* Send LOAD_REQUEST command to MCP
-          Returns the type of LOAD command:
-          if it is the first port to be initialized
-          common blocks should be initialized, otherwise - not
-       */
-       if (!BP_NOMCP(bp)) {
-               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
-               if (!load_code) {
-                       BNX2X_ERR("MCP response failure, aborting\n");
-                       rc = -EBUSY;
-                       goto load_error2;
-               }
-               if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
-                       rc = -EBUSY; /* other port in diagnostic mode */
-                       goto load_error2;
-               }
-
-       } else {
-               int port = BP_PORT(bp);
-
-               DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
-                  load_count[0], load_count[1], load_count[2]);
-               load_count[0]++;
-               load_count[1 + port]++;
-               DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
-                  load_count[0], load_count[1], load_count[2]);
-               if (load_count[0] == 1)
-                       load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
-               else if (load_count[1 + port] == 1)
-                       load_code = FW_MSG_CODE_DRV_LOAD_PORT;
-               else
-                       load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
-       }
-
-       if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
-           (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
-               bp->port.pmf = 1;
-       else
-               bp->port.pmf = 0;
-       DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
-
-       /* Initialize HW */
-       rc = bnx2x_init_hw(bp, load_code);
-       if (rc) {
-               BNX2X_ERR("HW init failed, aborting\n");
-               bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
-               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
-               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
-               goto load_error2;
-       }
-
-       /* Setup NIC internals and enable interrupts */
-       bnx2x_nic_init(bp, load_code);
-
-       if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
-           (bp->common.shmem2_base))
-               SHMEM2_WR(bp, dcc_support,
-                         (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
-                          SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
-
-       /* Send LOAD_DONE command to MCP */
-       if (!BP_NOMCP(bp)) {
-               load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
-               if (!load_code) {
-                       BNX2X_ERR("MCP response failure, aborting\n");
-                       rc = -EBUSY;
-                       goto load_error3;
-               }
-       }
-
-       bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
-
-       rc = bnx2x_setup_leading(bp);
-       if (rc) {
-               BNX2X_ERR("Setup leading failed!\n");
-#ifndef BNX2X_STOP_ON_ERROR
-               goto load_error3;
-#else
-               bp->panic = 1;
-               return -EBUSY;
-#endif
-       }
-
-       if (CHIP_IS_E1H(bp))
-               if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
-                       DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
-                       bp->flags |= MF_FUNC_DIS;
-               }
-
-       if (bp->state == BNX2X_STATE_OPEN) {
-#ifdef BCM_CNIC
-               /* Enable Timer scan */
-               REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
-#endif
-               for_each_nondefault_queue(bp, i) {
-                       rc = bnx2x_setup_multi(bp, i);
-                       if (rc)
-#ifdef BCM_CNIC
-                               goto load_error4;
-#else
-                               goto load_error3;
-#endif
-               }
-
-               if (CHIP_IS_E1(bp))
-                       bnx2x_set_eth_mac_addr_e1(bp, 1);
-               else
-                       bnx2x_set_eth_mac_addr_e1h(bp, 1);
-#ifdef BCM_CNIC
-               /* Set iSCSI L2 MAC */
-               mutex_lock(&bp->cnic_mutex);
-               if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
-                       bnx2x_set_iscsi_eth_mac_addr(bp, 1);
-                       bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
-                       bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
-                                     CNIC_SB_ID(bp));
-               }
-               mutex_unlock(&bp->cnic_mutex);
-#endif
-       }
-
-       if (bp->port.pmf)
-               bnx2x_initial_phy_init(bp, load_mode);
-
-       /* Start fast path */
-       switch (load_mode) {
-       case LOAD_NORMAL:
-               if (bp->state == BNX2X_STATE_OPEN) {
-                       /* Tx queue should be only reenabled */
-                       netif_tx_wake_all_queues(bp->dev);
-               }
-               /* Initialize the receive filter. */
-               bnx2x_set_rx_mode(bp->dev);
-               break;
-
-       case LOAD_OPEN:
-               netif_tx_start_all_queues(bp->dev);
-               if (bp->state != BNX2X_STATE_OPEN)
-                       netif_tx_disable(bp->dev);
-               /* Initialize the receive filter. */
-               bnx2x_set_rx_mode(bp->dev);
-               break;
-
-       case LOAD_DIAG:
-               /* Initialize the receive filter. */
-               bnx2x_set_rx_mode(bp->dev);
-               bp->state = BNX2X_STATE_DIAG;
-               break;
-
-       default:
-               break;
-       }
-
-       if (!bp->port.pmf)
-               bnx2x__link_status_update(bp);
-
-       /* start the timer */
-       mod_timer(&bp->timer, jiffies + bp->current_interval);
-
-#ifdef BCM_CNIC
-       bnx2x_setup_cnic_irq_info(bp);
-       if (bp->state == BNX2X_STATE_OPEN)
-               bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
-#endif
-       bnx2x_inc_load_cnt(bp);
-
-       return 0;
-
-#ifdef BCM_CNIC
-load_error4:
-       /* Disable Timer scan */
-       REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
-#endif
-load_error3:
-       bnx2x_int_disable_sync(bp, 1);
-       if (!BP_NOMCP(bp)) {
-               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
-               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
-       }
-       bp->port.pmf = 0;
-       /* Free SKBs, SGEs, TPA pool and driver internals */
-       bnx2x_free_skbs(bp);
-       for_each_queue(bp, i)
-               bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-load_error2:
-       /* Release IRQs */
-       bnx2x_free_irq(bp, false);
-load_error1:
-       bnx2x_napi_disable(bp);
-       for_each_queue(bp, i)
-               netif_napi_del(&bnx2x_fp(bp, i, napi));
-       bnx2x_free_mem(bp);
-
-       return rc;
-}
-
-static int bnx2x_stop_multi(struct bnx2x *bp, int index)
-{
-       struct bnx2x_fastpath *fp = &bp->fp[index];
-       int rc;
-
-       /* halt the connection */
-       fp->state = BNX2X_FP_STATE_HALTING;
-       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
-
-       /* Wait for completion */
-       rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
-                              &(fp->state), 1);
-       if (rc) /* timeout */
-               return rc;
-
-       /* delete cfc entry */
-       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
-
-       /* Wait for completion */
-       rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
-                              &(fp->state), 1);
-       return rc;
-}
-
-static int bnx2x_stop_leading(struct bnx2x *bp)
-{
-       __le16 dsb_sp_prod_idx;
-       /* if the other port is handling traffic,
-          this can take a lot of time */
-       int cnt = 500;
-       int rc;
-
-       might_sleep();
-
-       /* Send HALT ramrod */
-       bp->fp[0].state = BNX2X_FP_STATE_HALTING;
-       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
-
-       /* Wait for completion */
-       rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
-                              &(bp->fp[0].state), 1);
-       if (rc) /* timeout */
-               return rc;
-
-       dsb_sp_prod_idx = *bp->dsb_sp_prod;
-
-       /* Send PORT_DELETE ramrod */
-       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
-
-       /* Wait for completion to arrive on default status block
-          we are going to reset the chip anyway
-          so there is not much to do if this times out
-        */
-       while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
-               if (!cnt) {
-                       DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
-                          "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
-                          *bp->dsb_sp_prod, dsb_sp_prod_idx);
-#ifdef BNX2X_STOP_ON_ERROR
-                       bnx2x_panic();
-#endif
-                       rc = -EBUSY;
-                       break;
-               }
-               cnt--;
-               msleep(1);
-               rmb(); /* Refresh the dsb_sp_prod */
-       }
-       bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
-       bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
-
-       return rc;
-}
-
-static void bnx2x_reset_func(struct bnx2x *bp)
-{
-       int port = BP_PORT(bp);
-       int func = BP_FUNC(bp);
-       int base, i;
-
-       /* Configure IGU */
-       REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
-       REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
-
-#ifdef BCM_CNIC
-       /* Disable Timer scan */
-       REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
-       /*
-        * Wait for at least 10ms and up to 2 second for the timers scan to
-        * complete
-        */
-       for (i = 0; i < 200; i++) {
-               msleep(10);
-               if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
-                       break;
-       }
-#endif
-       /* Clear ILT */
-       base = FUNC_ILT_BASE(func);
-       for (i = base; i < base + ILT_PER_FUNC; i++)
-               bnx2x_ilt_wr(bp, i, 0);
-}
-
-static void bnx2x_reset_port(struct bnx2x *bp)
-{
-       int port = BP_PORT(bp);
-       u32 val;
-
-       REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
-
-       /* Do not rcv packets to BRB */
-       REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
-       /* Do not direct rcv packets that are not for MCP to the BRB */
-       REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
-                          NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
-
-       /* Configure AEU */
-       REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
-
-       msleep(100);
-       /* Check for BRB port occupancy */
-       val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
-       if (val)
-               DP(NETIF_MSG_IFDOWN,
-                  "BRB1 is not empty  %d blocks are occupied\n", val);
-
-       /* TODO: Close Doorbell port? */
-}
-
-static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
-{
-       DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
-          BP_FUNC(bp), reset_code);
-
-       switch (reset_code) {
-       case FW_MSG_CODE_DRV_UNLOAD_COMMON:
-               bnx2x_reset_port(bp);
-               bnx2x_reset_func(bp);
-               bnx2x_reset_common(bp);
-               break;
-
-       case FW_MSG_CODE_DRV_UNLOAD_PORT:
-               bnx2x_reset_port(bp);
-               bnx2x_reset_func(bp);
-               break;
-
-       case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
-               bnx2x_reset_func(bp);
-               break;
-
-       default:
-               BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
-               break;
-       }
-}
-
-static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
-{
-       int port = BP_PORT(bp);
-       u32 reset_code = 0;
-       int i, cnt, rc;
-
-       /* Wait until tx fastpath tasks complete */
-       for_each_queue(bp, i) {
-               struct bnx2x_fastpath *fp = &bp->fp[i];
-
-               cnt = 1000;
-               while (bnx2x_has_tx_work_unload(fp)) {
-
-                       bnx2x_tx_int(fp);
-                       if (!cnt) {
-                               BNX2X_ERR("timeout waiting for queue[%d]\n",
-                                         i);
-#ifdef BNX2X_STOP_ON_ERROR
-                               bnx2x_panic();
-                               return -EBUSY;
-#else
-                               break;
-#endif
-                       }
-                       cnt--;
-                       msleep(1);
-               }
-       }
-       /* Give HW time to discard old tx messages */
-       msleep(1);
-
-       if (CHIP_IS_E1(bp)) {
-               struct mac_configuration_cmd *config =
-                                               bnx2x_sp(bp, mcast_config);
-
-               bnx2x_set_eth_mac_addr_e1(bp, 0);
-
-               for (i = 0; i < config->hdr.length; i++)
-                       CAM_INVALIDATE(config->config_table[i]);
-
-               config->hdr.length = i;
-               if (CHIP_REV_IS_SLOW(bp))
-                       config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
-               else
-                       config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
-               config->hdr.client_id = bp->fp->cl_id;
-               config->hdr.reserved1 = 0;
-
-               bp->set_mac_pending++;
-               smp_wmb();
-
-               bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
-                             U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
-                             U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
-
-       } else { /* E1H */
-               REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
-
-               bnx2x_set_eth_mac_addr_e1h(bp, 0);
-
-               for (i = 0; i < MC_HASH_SIZE; i++)
-                       REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
-
-               REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
-       }
-#ifdef BCM_CNIC
-       /* Clear iSCSI L2 MAC */
-       mutex_lock(&bp->cnic_mutex);
-       if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
-               bnx2x_set_iscsi_eth_mac_addr(bp, 0);
-               bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
-       }
-       mutex_unlock(&bp->cnic_mutex);
-#endif
-
-       if (unload_mode == UNLOAD_NORMAL)
-               reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
-
-       else if (bp->flags & NO_WOL_FLAG)
-               reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
-
-       else if (bp->wol) {
-               u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-               u8 *mac_addr = bp->dev->dev_addr;
-               u32 val;
-               /* The mac address is written to entries 1-4 to
-                  preserve entry 0 which is used by the PMF */
-               u8 entry = (BP_E1HVN(bp) + 1)*8;
-
-               val = (mac_addr[0] << 8) | mac_addr[1];
-               EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
-
-               val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
-                     (mac_addr[4] << 8) | mac_addr[5];
-               EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
-
-               reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
-
-       } else
-               reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
-
-       /* Close multi and leading connections
-          Completions for ramrods are collected in a synchronous way */
-       for_each_nondefault_queue(bp, i)
-               if (bnx2x_stop_multi(bp, i))
-                       goto unload_error;
-
-       rc = bnx2x_stop_leading(bp);
-       if (rc) {
-               BNX2X_ERR("Stop leading failed!\n");
-#ifdef BNX2X_STOP_ON_ERROR
-               return -EBUSY;
-#else
-               goto unload_error;
-#endif
-       }
-
-unload_error:
-       if (!BP_NOMCP(bp))
-               reset_code = bnx2x_fw_command(bp, reset_code);
-       else {
-               DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
-                  load_count[0], load_count[1], load_count[2]);
-               load_count[0]--;
-               load_count[1 + port]--;
-               DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
-                  load_count[0], load_count[1], load_count[2]);
-               if (load_count[0] == 0)
-                       reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
-               else if (load_count[1 + port] == 0)
-                       reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
-               else
-                       reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
-       }
-
-       if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
-           (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
-               bnx2x__link_reset(bp);
-
-       /* Reset the chip */
-       bnx2x_reset_chip(bp, reset_code);
-
-       /* Report UNLOAD_DONE to MCP */
-       if (!BP_NOMCP(bp))
-               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
-
-}
-
-static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
-{
-       u32 val;
-
-       DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
-
-       if (CHIP_IS_E1(bp)) {
-               int port = BP_PORT(bp);
-               u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
-                       MISC_REG_AEU_MASK_ATTN_FUNC_0;
-
-               val = REG_RD(bp, addr);
-               val &= ~(0x300);
-               REG_WR(bp, addr, val);
-       } else if (CHIP_IS_E1H(bp)) {
-               val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
-               val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
-                        MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
-               REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
-       }
-}
-
-/* must be called with rtnl_lock */
-static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
-{
-       int i;
-
-       if (bp->state == BNX2X_STATE_CLOSED) {
-               /* Interface has been removed - nothing to recover */
-               bp->recovery_state = BNX2X_RECOVERY_DONE;
-               bp->is_leader = 0;
-               bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
-               smp_wmb();
-
-               return -EINVAL;
-       }
-
-#ifdef BCM_CNIC
-       bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
-#endif
-       bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
-
-       /* Set "drop all" */
-       bp->rx_mode = BNX2X_RX_MODE_NONE;
-       bnx2x_set_storm_rx_mode(bp);
-
-       /* Disable HW interrupts, NAPI and Tx */
-       bnx2x_netif_stop(bp, 1);
-       netif_carrier_off(bp->dev);
-
-       del_timer_sync(&bp->timer);
-       SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
-                (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
-       bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
-       /* Release IRQs */
-       bnx2x_free_irq(bp, false);
-
-       /* Cleanup the chip if needed */
-       if (unload_mode != UNLOAD_RECOVERY)
-               bnx2x_chip_cleanup(bp, unload_mode);
-
-       bp->port.pmf = 0;
-
-       /* Free SKBs, SGEs, TPA pool and driver internals */
-       bnx2x_free_skbs(bp);
-       for_each_queue(bp, i)
-               bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-       for_each_queue(bp, i)
-               netif_napi_del(&bnx2x_fp(bp, i, napi));
-       bnx2x_free_mem(bp);
-
-       bp->state = BNX2X_STATE_CLOSED;
-
-       /* The last driver must disable a "close the gate" if there is no
-        * parity attention or "process kill" pending.
-        */
-       if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
-           bnx2x_reset_is_done(bp))
-               bnx2x_disable_close_the_gate(bp);
-
-       /* Reset MCP mail box sequence if there is on going recovery */
-       if (unload_mode == UNLOAD_RECOVERY)
-               bp->fw_seq = 0;
-
-       return 0;
-}
-
-/* Close gates #2, #3 and #4: */
-static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
-{
-       u32 val, addr;
-
-       /* Gates #2 and #4a are closed/opened for "not E1" only */
-       if (!CHIP_IS_E1(bp)) {
-               /* #4 */
-               val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
-               REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
-                      close ? (val | 0x1) : (val & (~(u32)1)));
-               /* #2 */
-               val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
-               REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
-                      close ? (val | 0x1) : (val & (~(u32)1)));
-       }
-
-       /* #3 */
-       addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
-       val = REG_RD(bp, addr);
-       REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
-
-       DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
-               close ? "closing" : "opening");
-       mmiowb();
-}
-
-#define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
-
-static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
-{
-       /* Do some magic... */
-       u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
-       *magic_val = val & SHARED_MF_CLP_MAGIC;
-       MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
-}
-
-/* Restore the value of the `magic' bit.
- *
- * @param pdev Device handle.
- * @param magic_val Old value of the `magic' bit.
- */
-static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
-{
-       /* Restore the `magic' bit value... */
-       /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
-       SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
-               (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
-       u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
-       MF_CFG_WR(bp, shared_mf_config.clp_mb,
-               (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
-}
-
-/* Prepares for MCP reset: takes care of CLP configurations.
- *
- * @param bp
- * @param magic_val Old value of 'magic' bit.
- */
-static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
-{
-       u32 shmem;
-       u32 validity_offset;
-
-       DP(NETIF_MSG_HW, "Starting\n");
-
-       /* Set `magic' bit in order to save MF config */
-       if (!CHIP_IS_E1(bp))
-               bnx2x_clp_reset_prep(bp, magic_val);
-
-       /* Get shmem offset */
-       shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
-       validity_offset = offsetof(struct shmem_region, validity_map[0]);
-
-       /* Clear validity map flags */
-       if (shmem > 0)
-               REG_WR(bp, shmem + validity_offset, 0);
-}
-
-#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
-#define MCP_ONE_TIMEOUT  100    /* 100 ms */
-
-/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
- * depending on the HW type.
- *
- * @param bp
- */
-static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
-{
-       /* special handling for emulation and FPGA,
-          wait 10 times longer */
-       if (CHIP_REV_IS_SLOW(bp))
-               msleep(MCP_ONE_TIMEOUT*10);
-       else
-               msleep(MCP_ONE_TIMEOUT);
-}
-
-static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
-{
-       u32 shmem, cnt, validity_offset, val;
-       int rc = 0;
-
-       msleep(100);
-
-       /* Get shmem offset */
-       shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
-       if (shmem == 0) {
-               BNX2X_ERR("Shmem 0 return failure\n");
-               rc = -ENOTTY;
-               goto exit_lbl;
-       }
-
-       validity_offset = offsetof(struct shmem_region, validity_map[0]);
-
-       /* Wait for MCP to come up */
-       for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
-               /* TBD: its best to check validity map of last port.
-                * currently checks on port 0.
-                */
-               val = REG_RD(bp, shmem + validity_offset);
-               DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
-                  shmem + validity_offset, val);
-
-               /* check that shared memory is valid. */
-               if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
-                   == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
-                       break;
-
-               bnx2x_mcp_wait_one(bp);
-       }
-
-       DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
-
-       /* Check that shared memory is valid. This indicates that MCP is up. */
-       if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
-           (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
-               BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
-               rc = -ENOTTY;
-               goto exit_lbl;
-       }
-
-exit_lbl:
-       /* Restore the `magic' bit value */
-       if (!CHIP_IS_E1(bp))
-               bnx2x_clp_reset_done(bp, magic_val);
-
-       return rc;
-}
-
-static void bnx2x_pxp_prep(struct bnx2x *bp)
-{
-       if (!CHIP_IS_E1(bp)) {
-               REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
-               REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
-               REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
-               mmiowb();
-       }
-}
-
-/*
- * Reset the whole chip except for:
- *      - PCIE core
- *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
- *              one reset bit)
- *      - IGU
- *      - MISC (including AEU)
- *      - GRC
- *      - RBCN, RBCP
- */
-static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
-{
-       u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
-
-       not_reset_mask1 =
-               MISC_REGISTERS_RESET_REG_1_RST_HC |
-               MISC_REGISTERS_RESET_REG_1_RST_PXPV |
-               MISC_REGISTERS_RESET_REG_1_RST_PXP;
-
-       not_reset_mask2 =
-               MISC_REGISTERS_RESET_REG_2_RST_MDIO |
-               MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
-               MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
-               MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
-               MISC_REGISTERS_RESET_REG_2_RST_RBCN |
-               MISC_REGISTERS_RESET_REG_2_RST_GRC  |
-               MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
-               MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
-
-       reset_mask1 = 0xffffffff;
-
-       if (CHIP_IS_E1(bp))
-               reset_mask2 = 0xffff;
-       else
-               reset_mask2 = 0x1ffff;
-
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
-              reset_mask1 & (~not_reset_mask1));
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-              reset_mask2 & (~not_reset_mask2));
-
-       barrier();
-       mmiowb();
-
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
-       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
-       mmiowb();
-}
-
-static int bnx2x_process_kill(struct bnx2x *bp)
-{
-       int cnt = 1000;
-       u32 val = 0;
-       u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
-
-
-       /* Empty the Tetris buffer, wait for 1s */
-       do {
-               sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
-               blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
-               port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
-               port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
-               pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
-               if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
-                   ((port_is_idle_0 & 0x1) == 0x1) &&
-                   ((port_is_idle_1 & 0x1) == 0x1) &&
-                   (pgl_exp_rom2 == 0xffffffff))
-                       break;
-               msleep(1);
-       } while (cnt-- > 0);
-
-       if (cnt <= 0) {
-               DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
-                         " are still"
-                         " outstanding read requests after 1s!\n");
-               DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
-                         " port_is_idle_0=0x%08x,"
-                         " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
-                         sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
-                         pgl_exp_rom2);
-               return -EAGAIN;
-       }
-
-       barrier();
-
-       /* Close gates #2, #3 and #4 */
-       bnx2x_set_234_gates(bp, true);
-
-       /* TBD: Indicate that "process kill" is in progress to MCP */
-
-       /* Clear "unprepared" bit */
-       REG_WR(bp, MISC_REG_UNPREPARED, 0);
-       barrier();
-
-       /* Make sure all is written to the chip before the reset */
-       mmiowb();
-
-       /* Wait for 1ms to empty GLUE and PCI-E core queues,
-        * PSWHST, GRC and PSWRD Tetris buffer.
-        */
-       msleep(1);
-
-       /* Prepare to chip reset: */
-       /* MCP */
-       bnx2x_reset_mcp_prep(bp, &val);
-
-       /* PXP */
-       bnx2x_pxp_prep(bp);
-       barrier();
-
-       /* reset the chip */
-       bnx2x_process_kill_chip_reset(bp);
-       barrier();
-
-       /* Recover after reset: */
-       /* MCP */
-       if (bnx2x_reset_mcp_comp(bp, val))
-               return -EAGAIN;
-
-       /* PXP */
-       bnx2x_pxp_prep(bp);
-
-       /* Open the gates #2, #3 and #4 */
-       bnx2x_set_234_gates(bp, false);
-
-       /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
-        * reset state, re-enable attentions. */
-
-       return 0;
-}
-
-static int bnx2x_leader_reset(struct bnx2x *bp)
-{
-       int rc = 0;
-       /* Try to recover after the failure */
-       if (bnx2x_process_kill(bp)) {
-               printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
-                      bp->dev->name);
-               rc = -EAGAIN;
-               goto exit_leader_reset;
-       }
-
-       /* Clear "reset is in progress" bit and update the driver state */
-       bnx2x_set_reset_done(bp);
-       bp->recovery_state = BNX2X_RECOVERY_DONE;
-
-exit_leader_reset:
-       bp->is_leader = 0;
-       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
-       smp_wmb();
-       return rc;
-}
-
-static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
-
-/* Assumption: runs under rtnl lock. This together with the fact
- * that it's called only from bnx2x_reset_task() ensure that it
- * will never be called when netif_running(bp->dev) is false.
- */
-static void bnx2x_parity_recover(struct bnx2x *bp)
-{
-       DP(NETIF_MSG_HW, "Handling parity\n");
-       while (1) {
-               switch (bp->recovery_state) {
-               case BNX2X_RECOVERY_INIT:
-                       DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
-                       /* Try to get a LEADER_LOCK HW lock */
-                       if (bnx2x_trylock_hw_lock(bp,
-                               HW_LOCK_RESOURCE_RESERVED_08))
-                               bp->is_leader = 1;
-
-                       /* Stop the driver */
-                       /* If interface has been removed - break */
-                       if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
-                               return;
-
-                       bp->recovery_state = BNX2X_RECOVERY_WAIT;
-                       /* Ensure "is_leader" and "recovery_state"
-                        *  update values are seen on other CPUs
-                        */
-                       smp_wmb();
-                       break;
-
-               case BNX2X_RECOVERY_WAIT:
-                       DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
-                       if (bp->is_leader) {
-                               u32 load_counter = bnx2x_get_load_cnt(bp);
-                               if (load_counter) {
-                                       /* Wait until all other functions get
-                                        * down.
-                                        */
-                                       schedule_delayed_work(&bp->reset_task,
-                                                               HZ/10);
-                                       return;
-                               } else {
-                                       /* If all other functions got down -
-                                        * try to bring the chip back to
-                                        * normal. In any case it's an exit
-                                        * point for a leader.
-                                        */
-                                       if (bnx2x_leader_reset(bp) ||
-                                       bnx2x_nic_load(bp, LOAD_NORMAL)) {
-                                               printk(KERN_ERR"%s: Recovery "
-                                               "has failed. Power cycle is "
-                                               "needed.\n", bp->dev->name);
-                                               /* Disconnect this device */
-                                               netif_device_detach(bp->dev);
-                                               /* Block ifup for all function
-                                                * of this ASIC until
-                                                * "process kill" or power
-                                                * cycle.
-                                                */
-                                               bnx2x_set_reset_in_progress(bp);
-                                               /* Shut down the power */
-                                               bnx2x_set_power_state(bp,
-                                                               PCI_D3hot);
-                                               return;
-                                       }
-
-                                       return;
-                               }
-                       } else { /* non-leader */
-                               if (!bnx2x_reset_is_done(bp)) {
-                                       /* Try to get a LEADER_LOCK HW lock as
-                                        * long as a former leader may have
-                                        * been unloaded by the user or
-                                        * released a leadership by another
-                                        * reason.
-                                        */
-                                       if (bnx2x_trylock_hw_lock(bp,
-                                           HW_LOCK_RESOURCE_RESERVED_08)) {
-                                               /* I'm a leader now! Restart a
-                                                * switch case.
-                                                */
-                                               bp->is_leader = 1;
-                                               break;
-                                       }
-
-                                       schedule_delayed_work(&bp->reset_task,
-                                                               HZ/10);
-                                       return;
-
-                               } else { /* A leader has completed
-                                         * the "process kill". It's an exit
-                                         * point for a non-leader.
-                                         */
-                                       bnx2x_nic_load(bp, LOAD_NORMAL);
-                                       bp->recovery_state =
-                                               BNX2X_RECOVERY_DONE;
-                                       smp_wmb();
-                                       return;
-                               }
-                       }
-               default:
-                       return;
-               }
-       }
-}
-
-/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
- * scheduled on a general queue in order to prevent a dead lock.
- */
-static void bnx2x_reset_task(struct work_struct *work)
-{
-       struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
-
-#ifdef BNX2X_STOP_ON_ERROR
-       BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
-                 " so reset not done to allow debug dump,\n"
-        KERN_ERR " you will need to reboot when done\n");
-       return;
-#endif
+       for (i = 0; i < mask_arr_len; i++)
+               REG_WR(bp, bnx2x_parity_mask[i].addr,
+                       bnx2x_parity_mask[i].mask);
+}
 
-       rtnl_lock();
 
-       if (!netif_running(bp->dev))
-               goto reset_task_exit;
+static void bnx2x_reset_common(struct bnx2x *bp)
+{
+       /* reset_common */
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+              0xd3ffff7f);
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
+}
 
-       if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
-               bnx2x_parity_recover(bp);
+static void bnx2x_init_pxp(struct bnx2x *bp)
+{
+       u16 devctl;
+       int r_order, w_order;
+
+       pci_read_config_word(bp->pdev,
+                            bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
+       DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
+       w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+       if (bp->mrrs == -1)
+               r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
        else {
-               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
-               bnx2x_nic_load(bp, LOAD_NORMAL);
+               DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
+               r_order = bp->mrrs;
        }
 
-reset_task_exit:
-       rtnl_unlock();
+       bnx2x_init_pxp_arb(bp, r_order, w_order);
 }
 
-/* end of nic load/unload */
-
-/* ethtool_ops */
+static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
+{
+       int is_required;
+       u32 val;
+       int port;
 
-/*
- * Init service functions
- */
+       if (BP_NOMCP(bp))
+               return;
 
-static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
-{
-       switch (func) {
-       case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
-       case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
-       case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
-       case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
-       case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
-       case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
-       case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
-       case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
-       default:
-               BNX2X_ERR("Unsupported function index: %d\n", func);
-               return (u32)(-1);
-       }
-}
+       is_required = 0;
+       val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
+             SHARED_HW_CFG_FAN_FAILURE_MASK;
 
-static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
-{
-       u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
+       if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
+               is_required = 1;
 
-       /* Flush all outstanding writes */
-       mmiowb();
+       /*
+        * The fan failure mechanism is usually related to the PHY type since
+        * the power consumption of the board is affected by the PHY. Currently,
+        * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
+        */
+       else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
+               for (port = PORT_0; port < PORT_MAX; port++) {
+                       u32 phy_type =
+                               SHMEM_RD(bp, dev_info.port_hw_config[port].
+                                        external_phy_config) &
+                               PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+                       is_required |=
+                               ((phy_type ==
+                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
+                                (phy_type ==
+                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
+                                (phy_type ==
+                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
+               }
 
-       /* Pretend to be function 0 */
-       REG_WR(bp, reg, 0);
-       /* Flush the GRC transaction (in the chip) */
-       new_val = REG_RD(bp, reg);
-       if (new_val != 0) {
-               BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
-                         new_val);
-               BUG();
-       }
+       DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
 
-       /* From now we are in the "like-E1" mode */
-       bnx2x_int_disable(bp);
+       if (is_required == 0)
+               return;
 
-       /* Flush all outstanding writes */
-       mmiowb();
+       /* Fan failure is indicated by SPIO 5 */
+       bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
+                      MISC_REGISTERS_SPIO_INPUT_HI_Z);
 
-       /* Restore the original funtion settings */
-       REG_WR(bp, reg, orig_func);
-       new_val = REG_RD(bp, reg);
-       if (new_val != orig_func) {
-               BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
-                         orig_func, new_val);
-               BUG();
-       }
-}
+       /* set to active low mode */
+       val = REG_RD(bp, MISC_REG_SPIO_INT);
+       val |= ((1 << MISC_REGISTERS_SPIO_5) <<
+                                       MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
+       REG_WR(bp, MISC_REG_SPIO_INT, val);
 
-static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
-{
-       if (CHIP_IS_E1H(bp))
-               bnx2x_undi_int_disable_e1h(bp, func);
-       else
-               bnx2x_int_disable(bp);
+       /* enable interrupt to signal the IGU */
+       val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+       val |= (1 << MISC_REGISTERS_SPIO_5);
+       REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
 }
 
-static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
+static int bnx2x_init_common(struct bnx2x *bp)
 {
-       u32 val;
-
-       /* Check if there is any driver already loaded */
-       val = REG_RD(bp, MISC_REG_UNPREPARED);
-       if (val == 0x1) {
-               /* Check if it is the UNDI driver
-                * UNDI driver initializes CID offset for normal bell to 0x7
-                */
-               bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
-               val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
-               if (val == 0x7) {
-                       u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
-                       /* save our func */
-                       int func = BP_FUNC(bp);
-                       u32 swap_en;
-                       u32 swap_val;
+       u32 val, i;
+#ifdef BCM_CNIC
+       u32 wb_write[2];
+#endif
 
-                       /* clear the UNDI indication */
-                       REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+       DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
 
-                       BNX2X_DEV_INFO("UNDI is active! reset device\n");
+       bnx2x_reset_common(bp);
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
 
-                       /* try unload UNDI on port 0 */
-                       bp->func = 0;
-                       bp->fw_seq =
-                              (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
-                               DRV_MSG_SEQ_NUMBER_MASK);
-                       reset_code = bnx2x_fw_command(bp, reset_code);
+       bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
+       if (CHIP_IS_E1H(bp))
+               REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
 
-                       /* if UNDI is loaded on the other port */
-                       if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+       REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
+       msleep(30);
+       REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
 
-                               /* send "DONE" for previous unload */
-                               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+       bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
+       if (CHIP_IS_E1(bp)) {
+               /* enable HW interrupt from PXP on USDM overflow
+                  bit 16 on INT_MASK_0 */
+               REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+       }
 
-                               /* unload UNDI on port 1 */
-                               bp->func = 1;
-                               bp->fw_seq =
-                              (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
-                                       DRV_MSG_SEQ_NUMBER_MASK);
-                               reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+       bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
+       bnx2x_init_pxp(bp);
 
-                               bnx2x_fw_command(bp, reset_code);
-                       }
+#ifdef __BIG_ENDIAN
+       REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
+       REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
+       REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
+       REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
+       REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
+       /* make sure this value is 0 */
+       REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
 
-                       /* now it's safe to release the lock */
-                       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+/*     REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
+       REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
+       REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
+       REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
+       REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
+#endif
 
-                       bnx2x_undi_int_disable(bp, func);
+       REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
+#ifdef BCM_CNIC
+       REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
+       REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
+       REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
+#endif
 
-                       /* close input traffic and wait for it */
-                       /* Do not rcv packets to BRB */
-                       REG_WR(bp,
-                             (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
-                                            NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
-                       /* Do not direct rcv packets that are not for MCP to
-                        * the BRB */
-                       REG_WR(bp,
-                              (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
-                                             NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
-                       /* clear AEU */
-                       REG_WR(bp,
-                            (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
-                                           MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
-                       msleep(10);
+       if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
+               REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
 
-                       /* save NIG port swap info */
-                       swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
-                       swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
-                       /* reset device */
-                       REG_WR(bp,
-                              GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
-                              0xd3ffffff);
-                       REG_WR(bp,
-                              GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-                              0x1403);
-                       /* take the NIG out of reset and restore swap values */
-                       REG_WR(bp,
-                              GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
-                              MISC_REGISTERS_RESET_REG_1_RST_NIG);
-                       REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
-                       REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
+       /* let the HW do it's magic ... */
+       msleep(100);
+       /* finish PXP init */
+       val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
+       if (val != 1) {
+               BNX2X_ERR("PXP2 CFG failed\n");
+               return -EBUSY;
+       }
+       val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
+       if (val != 1) {
+               BNX2X_ERR("PXP2 RD_INIT failed\n");
+               return -EBUSY;
+       }
 
-                       /* send unload done to the MCP */
-                       bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+       REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
+       REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
 
-                       /* restore our func and fw_seq */
-                       bp->func = func;
-                       bp->fw_seq =
-                              (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
-                               DRV_MSG_SEQ_NUMBER_MASK);
+       bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
 
-               } else
-                       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
-       }
-}
+       /* clean the DMAE memory */
+       bp->dmae_ready = 1;
+       bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
 
-static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
-{
-       u32 val, val2, val3, val4, id;
-       u16 pmc;
+       bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
 
-       /* Get the chip revision id and number. */
-       /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
-       val = REG_RD(bp, MISC_REG_CHIP_NUM);
-       id = ((val & 0xffff) << 16);
-       val = REG_RD(bp, MISC_REG_CHIP_REV);
-       id |= ((val & 0xf) << 12);
-       val = REG_RD(bp, MISC_REG_CHIP_METAL);
-       id |= ((val & 0xff) << 4);
-       val = REG_RD(bp, MISC_REG_BOND_ID);
-       id |= (val & 0xf);
-       bp->common.chip_id = id;
-       bp->link_params.chip_id = bp->common.chip_id;
-       BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
+       bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
+       bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
+       bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
+       bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
 
-       val = (REG_RD(bp, 0x2874) & 0x55);
-       if ((bp->common.chip_id & 0x1) ||
-           (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
-               bp->flags |= ONE_PORT_FLAG;
-               BNX2X_DEV_INFO("single port device\n");
-       }
+       bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
 
-       val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
-       bp->common.flash_size = (NVRAM_1MB_SIZE <<
-                                (val & MCPR_NVM_CFG4_FLASH_SIZE));
-       BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
-                      bp->common.flash_size, bp->common.flash_size);
+#ifdef BCM_CNIC
+       wb_write[0] = 0;
+       wb_write[1] = 0;
+       for (i = 0; i < 64; i++) {
+               REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
+               bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
 
-       bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
-       bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
-       bp->link_params.shmem_base = bp->common.shmem_base;
-       BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
-                      bp->common.shmem_base, bp->common.shmem2_base);
+               if (CHIP_IS_E1H(bp)) {
+                       REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
+                       bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
+                                         wb_write, 2);
+               }
+       }
+#endif
+       /* soft reset pulse */
+       REG_WR(bp, QM_REG_SOFT_RESET, 1);
+       REG_WR(bp, QM_REG_SOFT_RESET, 0);
 
-       if (!bp->common.shmem_base ||
-           (bp->common.shmem_base < 0xA0000) ||
-           (bp->common.shmem_base >= 0xC0000)) {
-               BNX2X_DEV_INFO("MCP not active\n");
-               bp->flags |= NO_MCP_FLAG;
-               return;
+#ifdef BCM_CNIC
+       bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
+#endif
+
+       bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
+       REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
+       if (!CHIP_REV_IS_SLOW(bp)) {
+               /* enable hw interrupt from doorbell Q */
+               REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
        }
 
-       val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
-       if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
-               != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
-               BNX2X_ERROR("BAD MCP validity signature\n");
+       bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
+       REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
+#ifndef BCM_CNIC
+       /* set NIC mode */
+       REG_WR(bp, PRS_REG_NIC_MODE, 1);
+#endif
+       if (CHIP_IS_E1H(bp))
+               REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
 
-       bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
-       BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
+       bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
 
-       bp->link_params.hw_led_mode = ((bp->common.hw_config &
-                                       SHARED_HW_CFG_LED_MODE_MASK) >>
-                                      SHARED_HW_CFG_LED_MODE_SHIFT);
+       bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
+       bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
+       bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
+       bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
 
-       bp->link_params.feature_config_flags = 0;
-       val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
-       if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
-               bp->link_params.feature_config_flags |=
-                               FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
-       else
-               bp->link_params.feature_config_flags &=
-                               ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+       bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
 
-       val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
-       bp->common.bc_ver = val;
-       BNX2X_DEV_INFO("bc_ver %X\n", val);
-       if (val < BNX2X_BC_VER) {
-               /* for now only warn
-                * later we might need to enforce this */
-               BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
-                           "please upgrade BC\n", BNX2X_BC_VER, val);
-       }
-       bp->link_params.feature_config_flags |=
-               (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
-               FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
+       /* sync semi rtc */
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+              0x80000000);
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+              0x80000000);
 
-       if (BP_E1HVN(bp) == 0) {
-               pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
-               bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
-       } else {
-               /* no WOL capability for E1HVN != 0 */
-               bp->flags |= NO_WOL_FLAG;
-       }
-       BNX2X_DEV_INFO("%sWoL capable\n",
-                      (bp->flags & NO_WOL_FLAG) ? "not " : "");
+       bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
 
-       val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
-       val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
-       val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
-       val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
+       REG_WR(bp, SRC_REG_SOFT_RST, 1);
+       for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
+               REG_WR(bp, i, random32());
+       bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
+#ifdef BCM_CNIC
+       REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
+       REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
+       REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
+       REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
+       REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
+       REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
+       REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
+       REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
+       REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
+       REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
+#endif
+       REG_WR(bp, SRC_REG_SOFT_RST, 0);
 
-       dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
-                val, val2, val3, val4);
-}
+       if (sizeof(union cdu_context) != 1024)
+               /* we currently assume that a context is 1024 bytes */
+               dev_alert(&bp->pdev->dev, "please adjust the size "
+                                         "of cdu_context(%ld)\n",
+                        (long)sizeof(union cdu_context));
 
-static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
-                                                   u32 switch_cfg)
-{
-       int port = BP_PORT(bp);
-       u32 ext_phy_type;
+       bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
+       val = (4 << 24) + (0 << 12) + 1024;
+       REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
 
-       switch (switch_cfg) {
-       case SWITCH_CFG_1G:
-               BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
+       bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
+       REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
+       /* enable context validation interrupt from CFC */
+       REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
 
-               ext_phy_type =
-                       SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
-               switch (ext_phy_type) {
-               case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
-                       BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
-                                      ext_phy_type);
+       /* set the thresholds to prevent CFC/CDU race */
+       REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
 
-                       bp->port.supported |= (SUPPORTED_10baseT_Half |
-                                              SUPPORTED_10baseT_Full |
-                                              SUPPORTED_100baseT_Half |
-                                              SUPPORTED_100baseT_Full |
-                                              SUPPORTED_1000baseT_Full |
-                                              SUPPORTED_2500baseX_Full |
-                                              SUPPORTED_TP |
-                                              SUPPORTED_FIBRE |
-                                              SUPPORTED_Autoneg |
-                                              SUPPORTED_Pause |
-                                              SUPPORTED_Asym_Pause);
-                       break;
+       bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
 
-               case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
-                       BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
-                                      ext_phy_type);
+       bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
+       /* Reset PCIE errors for debug */
+       REG_WR(bp, 0x2814, 0xffffffff);
+       REG_WR(bp, 0x3820, 0xffffffff);
 
-                       bp->port.supported |= (SUPPORTED_10baseT_Half |
-                                              SUPPORTED_10baseT_Full |
-                                              SUPPORTED_100baseT_Half |
-                                              SUPPORTED_100baseT_Full |
-                                              SUPPORTED_1000baseT_Full |
-                                              SUPPORTED_TP |
-                                              SUPPORTED_FIBRE |
-                                              SUPPORTED_Autoneg |
-                                              SUPPORTED_Pause |
-                                              SUPPORTED_Asym_Pause);
-                       break;
+       bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
+       bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
+
+       bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
+       if (CHIP_IS_E1H(bp)) {
+               REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
+               REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
+       }
 
-               default:
-                       BNX2X_ERR("NVRAM config error. "
-                                 "BAD SerDes ext_phy_config 0x%x\n",
-                                 bp->link_params.ext_phy_config);
-                       return;
-               }
+       if (CHIP_REV_IS_SLOW(bp))
+               msleep(200);
 
-               bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
-                                          port*0x10);
-               BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
-               break;
+       /* finish CFC init */
+       val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
+       if (val != 1) {
+               BNX2X_ERR("CFC LL_INIT failed\n");
+               return -EBUSY;
+       }
+       val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
+       if (val != 1) {
+               BNX2X_ERR("CFC AC_INIT failed\n");
+               return -EBUSY;
+       }
+       val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
+       if (val != 1) {
+               BNX2X_ERR("CFC CAM_INIT failed\n");
+               return -EBUSY;
+       }
+       REG_WR(bp, CFC_REG_DEBUG0, 0);
 
-       case SWITCH_CFG_10G:
-               BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
+       /* read NIG statistic
+          to see if this is our first up since powerup */
+       bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+       val = *bnx2x_sp(bp, wb_data[0]);
 
-               ext_phy_type =
-                       XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
-               switch (ext_phy_type) {
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
-                       BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
-                                      ext_phy_type);
+       /* do internal memory self test */
+       if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
+               BNX2X_ERR("internal mem self test failed\n");
+               return -EBUSY;
+       }
 
-                       bp->port.supported |= (SUPPORTED_10baseT_Half |
-                                              SUPPORTED_10baseT_Full |
-                                              SUPPORTED_100baseT_Half |
-                                              SUPPORTED_100baseT_Full |
-                                              SUPPORTED_1000baseT_Full |
-                                              SUPPORTED_2500baseX_Full |
-                                              SUPPORTED_10000baseT_Full |
-                                              SUPPORTED_TP |
-                                              SUPPORTED_FIBRE |
-                                              SUPPORTED_Autoneg |
-                                              SUPPORTED_Pause |
-                                              SUPPORTED_Asym_Pause);
-                       break;
+       switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+               bp->port.need_hw_lock = 1;
+               break;
 
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
-                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
-                                      ext_phy_type);
+       default:
+               break;
+       }
 
-                       bp->port.supported |= (SUPPORTED_10000baseT_Full |
-                                              SUPPORTED_1000baseT_Full |
-                                              SUPPORTED_FIBRE |
-                                              SUPPORTED_Autoneg |
-                                              SUPPORTED_Pause |
-                                              SUPPORTED_Asym_Pause);
-                       break;
+       bnx2x_setup_fan_failure_detection(bp);
 
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
-                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
-                                      ext_phy_type);
+       /* clear PXP2 attentions */
+       REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
 
-                       bp->port.supported |= (SUPPORTED_10000baseT_Full |
-                                              SUPPORTED_2500baseX_Full |
-                                              SUPPORTED_1000baseT_Full |
-                                              SUPPORTED_FIBRE |
-                                              SUPPORTED_Autoneg |
-                                              SUPPORTED_Pause |
-                                              SUPPORTED_Asym_Pause);
-                       break;
+       enable_blocks_attention(bp);
+       if (CHIP_PARITY_SUPPORTED(bp))
+               enable_blocks_parity(bp);
 
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
-                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
-                                      ext_phy_type);
+       if (!BP_NOMCP(bp)) {
+               bnx2x_acquire_phy_lock(bp);
+               bnx2x_common_init_phy(bp, bp->common.shmem_base);
+               bnx2x_release_phy_lock(bp);
+       } else
+               BNX2X_ERR("Bootcode is missing - can not initialize link\n");
 
-                       bp->port.supported |= (SUPPORTED_10000baseT_Full |
-                                              SUPPORTED_FIBRE |
-                                              SUPPORTED_Pause |
-                                              SUPPORTED_Asym_Pause);
-                       break;
+       return 0;
+}
 
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
-                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
-                                      ext_phy_type);
+static int bnx2x_init_port(struct bnx2x *bp)
+{
+       int port = BP_PORT(bp);
+       int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
+       u32 low, high;
+       u32 val;
 
-                       bp->port.supported |= (SUPPORTED_10000baseT_Full |
-                                              SUPPORTED_1000baseT_Full |
-                                              SUPPORTED_FIBRE |
-                                              SUPPORTED_Pause |
-                                              SUPPORTED_Asym_Pause);
-                       break;
+       DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
 
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
-                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
-                                      ext_phy_type);
+       REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
 
-                       bp->port.supported |= (SUPPORTED_10000baseT_Full |
-                                              SUPPORTED_1000baseT_Full |
-                                              SUPPORTED_Autoneg |
-                                              SUPPORTED_FIBRE |
-                                              SUPPORTED_Pause |
-                                              SUPPORTED_Asym_Pause);
-                       break;
+       bnx2x_init_block(bp, PXP_BLOCK, init_stage);
+       bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
 
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
-                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
-                                      ext_phy_type);
+       bnx2x_init_block(bp, TCM_BLOCK, init_stage);
+       bnx2x_init_block(bp, UCM_BLOCK, init_stage);
+       bnx2x_init_block(bp, CCM_BLOCK, init_stage);
+       bnx2x_init_block(bp, XCM_BLOCK, init_stage);
 
-                       bp->port.supported |= (SUPPORTED_10000baseT_Full |
-                                              SUPPORTED_1000baseT_Full |
-                                              SUPPORTED_Autoneg |
-                                              SUPPORTED_FIBRE |
-                                              SUPPORTED_Pause |
-                                              SUPPORTED_Asym_Pause);
-                       break;
+#ifdef BCM_CNIC
+       REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
 
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
-                       BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
-                                      ext_phy_type);
+       bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
+       REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
+       REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
+#endif
 
-                       bp->port.supported |= (SUPPORTED_10000baseT_Full |
-                                              SUPPORTED_TP |
-                                              SUPPORTED_Autoneg |
-                                              SUPPORTED_Pause |
-                                              SUPPORTED_Asym_Pause);
-                       break;
+       bnx2x_init_block(bp, DQ_BLOCK, init_stage);
 
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
-                       BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
-                                      ext_phy_type);
+       bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
+       if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
+               /* no pause for emulation and FPGA */
+               low = 0;
+               high = 513;
+       } else {
+               if (IS_E1HMF(bp))
+                       low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
+               else if (bp->dev->mtu > 4096) {
+                       if (bp->flags & ONE_PORT_FLAG)
+                               low = 160;
+                       else {
+                               val = bp->dev->mtu;
+                               /* (24*1024 + val*4)/256 */
+                               low = 96 + (val/64) + ((val % 64) ? 1 : 0);
+                       }
+               } else
+                       low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
+               high = low + 56;        /* 14*1024/256 */
+       }
+       REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
+       REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
 
-                       bp->port.supported |= (SUPPORTED_10baseT_Half |
-                                              SUPPORTED_10baseT_Full |
-                                              SUPPORTED_100baseT_Half |
-                                              SUPPORTED_100baseT_Full |
-                                              SUPPORTED_1000baseT_Full |
-                                              SUPPORTED_10000baseT_Full |
-                                              SUPPORTED_TP |
-                                              SUPPORTED_Autoneg |
-                                              SUPPORTED_Pause |
-                                              SUPPORTED_Asym_Pause);
-                       break;
 
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
-                       BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
-                                 bp->link_params.ext_phy_config);
-                       break;
+       bnx2x_init_block(bp, PRS_BLOCK, init_stage);
 
-               default:
-                       BNX2X_ERR("NVRAM config error. "
-                                 "BAD XGXS ext_phy_config 0x%x\n",
-                                 bp->link_params.ext_phy_config);
-                       return;
-               }
+       bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
+       bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
+       bnx2x_init_block(bp, USDM_BLOCK, init_stage);
+       bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
 
-               bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
-                                          port*0x18);
-               BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
+       bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
+       bnx2x_init_block(bp, USEM_BLOCK, init_stage);
+       bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
+       bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
 
-               break;
+       bnx2x_init_block(bp, UPB_BLOCK, init_stage);
+       bnx2x_init_block(bp, XPB_BLOCK, init_stage);
 
-       default:
-               BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
-                         bp->port.link_config);
-               return;
-       }
-       bp->link_params.phy_addr = bp->port.phy_addr;
+       bnx2x_init_block(bp, PBF_BLOCK, init_stage);
 
-       /* mask what we support according to speed_cap_mask */
-       if (!(bp->link_params.speed_cap_mask &
-                               PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
-               bp->port.supported &= ~SUPPORTED_10baseT_Half;
+       /* configure PBF to work without PAUSE mtu 9000 */
+       REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
 
-       if (!(bp->link_params.speed_cap_mask &
-                               PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
-               bp->port.supported &= ~SUPPORTED_10baseT_Full;
+       /* update threshold */
+       REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
+       /* update init credit */
+       REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
 
-       if (!(bp->link_params.speed_cap_mask &
-                               PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
-               bp->port.supported &= ~SUPPORTED_100baseT_Half;
+       /* probe changes */
+       REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
+       msleep(5);
+       REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
 
-       if (!(bp->link_params.speed_cap_mask &
-                               PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
-               bp->port.supported &= ~SUPPORTED_100baseT_Full;
+#ifdef BCM_CNIC
+       bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
+#endif
+       bnx2x_init_block(bp, CDU_BLOCK, init_stage);
+       bnx2x_init_block(bp, CFC_BLOCK, init_stage);
 
-       if (!(bp->link_params.speed_cap_mask &
-                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
-               bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
-                                       SUPPORTED_1000baseT_Full);
+       if (CHIP_IS_E1(bp)) {
+               REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+               REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+       }
+       bnx2x_init_block(bp, HC_BLOCK, init_stage);
 
-       if (!(bp->link_params.speed_cap_mask &
-                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
-               bp->port.supported &= ~SUPPORTED_2500baseX_Full;
+       bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
+       /* init aeu_mask_attn_func_0/1:
+        *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
+        *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
+        *             bits 4-7 are used for "per vn group attention" */
+       REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
+              (IS_E1HMF(bp) ? 0xF7 : 0x7));
 
-       if (!(bp->link_params.speed_cap_mask &
-                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
-               bp->port.supported &= ~SUPPORTED_10000baseT_Full;
+       bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
+       bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
+       bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
+       bnx2x_init_block(bp, DBU_BLOCK, init_stage);
+       bnx2x_init_block(bp, DBG_BLOCK, init_stage);
 
-       BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
-}
+       bnx2x_init_block(bp, NIG_BLOCK, init_stage);
 
-static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
-{
-       bp->link_params.req_duplex = DUPLEX_FULL;
+       REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
 
-       switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
-       case PORT_FEATURE_LINK_SPEED_AUTO:
-               if (bp->port.supported & SUPPORTED_Autoneg) {
-                       bp->link_params.req_line_speed = SPEED_AUTO_NEG;
-                       bp->port.advertising = bp->port.supported;
-               } else {
-                       u32 ext_phy_type =
-                           XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+       if (CHIP_IS_E1H(bp)) {
+               /* 0x2 disable e1hov, 0x1 enable */
+               REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
+                      (IS_E1HMF(bp) ? 0x1 : 0x2));
 
-                       if ((ext_phy_type ==
-                            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
-                           (ext_phy_type ==
-                            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
-                               /* force 10G, no AN */
-                               bp->link_params.req_line_speed = SPEED_10000;
-                               bp->port.advertising =
-                                               (ADVERTISED_10000baseT_Full |
-                                                ADVERTISED_FIBRE);
-                               break;
-                       }
-                       BNX2X_ERR("NVRAM config error. "
-                                 "Invalid link_config 0x%x"
-                                 "  Autoneg not supported\n",
-                                 bp->port.link_config);
-                       return;
+               {
+                       REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
+                       REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
+                       REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
                }
-               break;
+       }
 
-       case PORT_FEATURE_LINK_SPEED_10M_FULL:
-               if (bp->port.supported & SUPPORTED_10baseT_Full) {
-                       bp->link_params.req_line_speed = SPEED_10;
-                       bp->port.advertising = (ADVERTISED_10baseT_Full |
-                                               ADVERTISED_TP);
-               } else {
-                       BNX2X_ERROR("NVRAM config error. "
-                                   "Invalid link_config 0x%x"
-                                   "  speed_cap_mask 0x%x\n",
-                                   bp->port.link_config,
-                                   bp->link_params.speed_cap_mask);
-                       return;
-               }
-               break;
+       bnx2x_init_block(bp, MCP_BLOCK, init_stage);
+       bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
 
-       case PORT_FEATURE_LINK_SPEED_10M_HALF:
-               if (bp->port.supported & SUPPORTED_10baseT_Half) {
-                       bp->link_params.req_line_speed = SPEED_10;
-                       bp->link_params.req_duplex = DUPLEX_HALF;
-                       bp->port.advertising = (ADVERTISED_10baseT_Half |
-                                               ADVERTISED_TP);
-               } else {
-                       BNX2X_ERROR("NVRAM config error. "
-                                   "Invalid link_config 0x%x"
-                                   "  speed_cap_mask 0x%x\n",
-                                   bp->port.link_config,
-                                   bp->link_params.speed_cap_mask);
-                       return;
-               }
-               break;
+       switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+               {
+               u32 swap_val, swap_override, aeu_gpio_mask, offset;
 
-       case PORT_FEATURE_LINK_SPEED_100M_FULL:
-               if (bp->port.supported & SUPPORTED_100baseT_Full) {
-                       bp->link_params.req_line_speed = SPEED_100;
-                       bp->port.advertising = (ADVERTISED_100baseT_Full |
-                                               ADVERTISED_TP);
-               } else {
-                       BNX2X_ERROR("NVRAM config error. "
-                                   "Invalid link_config 0x%x"
-                                   "  speed_cap_mask 0x%x\n",
-                                   bp->port.link_config,
-                                   bp->link_params.speed_cap_mask);
-                       return;
-               }
-               break;
+               bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+                              MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
 
-       case PORT_FEATURE_LINK_SPEED_100M_HALF:
-               if (bp->port.supported & SUPPORTED_100baseT_Half) {
-                       bp->link_params.req_line_speed = SPEED_100;
-                       bp->link_params.req_duplex = DUPLEX_HALF;
-                       bp->port.advertising = (ADVERTISED_100baseT_Half |
-                                               ADVERTISED_TP);
-               } else {
-                       BNX2X_ERROR("NVRAM config error. "
-                                   "Invalid link_config 0x%x"
-                                   "  speed_cap_mask 0x%x\n",
-                                   bp->port.link_config,
-                                   bp->link_params.speed_cap_mask);
-                       return;
-               }
-               break;
+               /* The GPIO should be swapped if the swap register is
+                  set and active */
+               swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+               swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
 
-       case PORT_FEATURE_LINK_SPEED_1G:
-               if (bp->port.supported & SUPPORTED_1000baseT_Full) {
-                       bp->link_params.req_line_speed = SPEED_1000;
-                       bp->port.advertising = (ADVERTISED_1000baseT_Full |
-                                               ADVERTISED_TP);
+               /* Select function upon port-swap configuration */
+               if (port == 0) {
+                       offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
+                       aeu_gpio_mask = (swap_val && swap_override) ?
+                               AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
+                               AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
                } else {
-                       BNX2X_ERROR("NVRAM config error. "
-                                   "Invalid link_config 0x%x"
-                                   "  speed_cap_mask 0x%x\n",
-                                   bp->port.link_config,
-                                   bp->link_params.speed_cap_mask);
-                       return;
+                       offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
+                       aeu_gpio_mask = (swap_val && swap_override) ?
+                               AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
+                               AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
                }
-               break;
-
-       case PORT_FEATURE_LINK_SPEED_2_5G:
-               if (bp->port.supported & SUPPORTED_2500baseX_Full) {
-                       bp->link_params.req_line_speed = SPEED_2500;
-                       bp->port.advertising = (ADVERTISED_2500baseX_Full |
-                                               ADVERTISED_TP);
-               } else {
-                       BNX2X_ERROR("NVRAM config error. "
-                                   "Invalid link_config 0x%x"
-                                   "  speed_cap_mask 0x%x\n",
-                                   bp->port.link_config,
-                                   bp->link_params.speed_cap_mask);
-                       return;
+               val = REG_RD(bp, offset);
+               /* add GPIO3 to group */
+               val |= aeu_gpio_mask;
+               REG_WR(bp, offset, val);
                }
                break;
 
-       case PORT_FEATURE_LINK_SPEED_10G_CX4:
-       case PORT_FEATURE_LINK_SPEED_10G_KX4:
-       case PORT_FEATURE_LINK_SPEED_10G_KR:
-               if (bp->port.supported & SUPPORTED_10000baseT_Full) {
-                       bp->link_params.req_line_speed = SPEED_10000;
-                       bp->port.advertising = (ADVERTISED_10000baseT_Full |
-                                               ADVERTISED_FIBRE);
-               } else {
-                       BNX2X_ERROR("NVRAM config error. "
-                                   "Invalid link_config 0x%x"
-                                   "  speed_cap_mask 0x%x\n",
-                                   bp->port.link_config,
-                                   bp->link_params.speed_cap_mask);
-                       return;
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+               /* add SPIO 5 to group 0 */
+               {
+               u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+               val = REG_RD(bp, reg_addr);
+               val |= AEU_INPUTS_ATTN_BITS_SPIO5;
+               REG_WR(bp, reg_addr, val);
                }
                break;
 
        default:
-               BNX2X_ERROR("NVRAM config error. "
-                           "BAD link speed link_config 0x%x\n",
-                           bp->port.link_config);
-               bp->link_params.req_line_speed = SPEED_AUTO_NEG;
-               bp->port.advertising = bp->port.supported;
                break;
        }
 
-       bp->link_params.req_flow_ctrl = (bp->port.link_config &
-                                        PORT_FEATURE_FLOW_CONTROL_MASK);
-       if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
-           !(bp->port.supported & SUPPORTED_Autoneg))
-               bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+       bnx2x__link_reset(bp);
 
-       BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
-                      "  advertising 0x%x\n",
-                      bp->link_params.req_line_speed,
-                      bp->link_params.req_duplex,
-                      bp->link_params.req_flow_ctrl, bp->port.advertising);
+       return 0;
 }
 
-static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
+#define ILT_PER_FUNC           (768/2)
+#define FUNC_ILT_BASE(func)    (func * ILT_PER_FUNC)
+/* the phys address is shifted right 12 bits and has an added
+   1=valid bit added to the 53rd bit
+   then since this is a wide register(TM)
+   we split it into two 32 bit writes
+ */
+#define ONCHIP_ADDR1(x)                ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
+#define ONCHIP_ADDR2(x)                ((u32)((1 << 20) | ((u64)x >> 44)))
+#define PXP_ONE_ILT(x)         (((x) << 10) | x)
+#define PXP_ILT_RANGE(f, l)    (((l) << 10) | f)
+
+#ifdef BCM_CNIC
+#define CNIC_ILT_LINES         127
+#define CNIC_CTX_PER_ILT       16
+#else
+#define CNIC_ILT_LINES         0
+#endif
+
+static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
 {
-       mac_hi = cpu_to_be16(mac_hi);
-       mac_lo = cpu_to_be32(mac_lo);
-       memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
-       memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
+       int reg;
+
+       if (CHIP_IS_E1H(bp))
+               reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
+       else /* E1 */
+               reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
+
+       bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
 }
 
-static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
+static int bnx2x_init_func(struct bnx2x *bp)
 {
        int port = BP_PORT(bp);
-       u32 val, val2;
-       u32 config;
-       u16 i;
-       u32 ext_phy_type;
+       int func = BP_FUNC(bp);
+       u32 addr, val;
+       int i;
 
-       bp->link_params.bp = bp;
-       bp->link_params.port = port;
+       DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
 
-       bp->link_params.lane_config =
-               SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
-       bp->link_params.ext_phy_config =
-               SHMEM_RD(bp,
-                        dev_info.port_hw_config[port].external_phy_config);
-       /* BCM8727_NOC => BCM8727 no over current */
-       if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
-           PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
-               bp->link_params.ext_phy_config &=
-                       ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
-               bp->link_params.ext_phy_config |=
-                       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
-               bp->link_params.feature_config_flags |=
-                       FEATURE_CONFIG_BCM8727_NOC;
-       }
+       /* set MSI reconfigure capability */
+       addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
+       val = REG_RD(bp, addr);
+       val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
+       REG_WR(bp, addr, val);
 
-       bp->link_params.speed_cap_mask =
-               SHMEM_RD(bp,
-                        dev_info.port_hw_config[port].speed_capability_mask);
+       i = FUNC_ILT_BASE(func);
 
-       bp->port.link_config =
-               SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
+       bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
+       if (CHIP_IS_E1H(bp)) {
+               REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
+               REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
+       } else /* E1 */
+               REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
+                      PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
 
-       /* Get the 4 lanes xgxs config rx and tx */
-       for (i = 0; i < 2; i++) {
-               val = SHMEM_RD(bp,
-                          dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
-               bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
-               bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
+#ifdef BCM_CNIC
+       i += 1 + CNIC_ILT_LINES;
+       bnx2x_ilt_wr(bp, i, bp->timers_mapping);
+       if (CHIP_IS_E1(bp))
+               REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
+       else {
+               REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
+               REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
+       }
 
-               val = SHMEM_RD(bp,
-                          dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
-               bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
-               bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
+       i++;
+       bnx2x_ilt_wr(bp, i, bp->qm_mapping);
+       if (CHIP_IS_E1(bp))
+               REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
+       else {
+               REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
+               REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
        }
 
-       /* If the device is capable of WoL, set the default state according
-        * to the HW
-        */
-       config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
-       bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
-                  (config & PORT_FEATURE_WOL_ENABLED));
+       i++;
+       bnx2x_ilt_wr(bp, i, bp->t1_mapping);
+       if (CHIP_IS_E1(bp))
+               REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
+       else {
+               REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
+               REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
+       }
 
-       BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
-                      "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
-                      bp->link_params.lane_config,
-                      bp->link_params.ext_phy_config,
-                      bp->link_params.speed_cap_mask, bp->port.link_config);
+       /* tell the searcher where the T2 table is */
+       REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
 
-       bp->link_params.switch_cfg |= (bp->port.link_config &
-                                      PORT_FEATURE_CONNECTED_SWITCH_MASK);
-       bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
+       bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
+                   U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
 
-       bnx2x_link_settings_requested(bp);
+       bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
+                   U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
+                   U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
 
-       /*
-        * If connected directly, work with the internal PHY, otherwise, work
-        * with the external PHY
-        */
-       ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
-       if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
-               bp->mdio.prtad = bp->link_params.phy_addr;
+       REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
+#endif
 
-       else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
-                (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
-               bp->mdio.prtad =
-                       XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
+       if (CHIP_IS_E1H(bp)) {
+               bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
+               bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
+               bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
+               bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
+               bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
+               bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
+               bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
+               bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
+               bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
 
-       val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
-       val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
-       bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
-       memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
-       memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
+               REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+               REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
+       }
 
-#ifdef BCM_CNIC
-       val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
-       val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
-       bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
-#endif
+       /* HC init per function */
+       if (CHIP_IS_E1H(bp)) {
+               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+               REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+               REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+       }
+       bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
+
+       /* Reset PCIE errors for debug */
+       REG_WR(bp, 0x2114, 0xffffffff);
+       REG_WR(bp, 0x2120, 0xffffffff);
+
+       return 0;
 }
 
-static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
+int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
 {
-       int func = BP_FUNC(bp);
-       u32 val, val2;
-       int rc = 0;
+       int i, rc = 0;
 
-       bnx2x_get_common_hwinfo(bp);
+       DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
+          BP_FUNC(bp), load_code);
 
-       bp->e1hov = 0;
-       bp->e1hmf = 0;
-       if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
-               bp->mf_config =
-                       SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
+       bp->dmae_ready = 0;
+       mutex_init(&bp->dmae_mutex);
+       rc = bnx2x_gunzip_init(bp);
+       if (rc)
+               return rc;
 
-               val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
-                      FUNC_MF_CFG_E1HOV_TAG_MASK);
-               if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
-                       bp->e1hmf = 1;
-               BNX2X_DEV_INFO("%s function mode\n",
-                              IS_E1HMF(bp) ? "multi" : "single");
+       switch (load_code) {
+       case FW_MSG_CODE_DRV_LOAD_COMMON:
+               rc = bnx2x_init_common(bp);
+               if (rc)
+                       goto init_hw_err;
+               /* no break */
 
-               if (IS_E1HMF(bp)) {
-                       val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
-                                                               e1hov_tag) &
-                              FUNC_MF_CFG_E1HOV_TAG_MASK);
-                       if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
-                               bp->e1hov = val;
-                               BNX2X_DEV_INFO("E1HOV for func %d is %d "
-                                              "(0x%04x)\n",
-                                              func, bp->e1hov, bp->e1hov);
-                       } else {
-                               BNX2X_ERROR("No valid E1HOV for func %d,"
-                                           "  aborting\n", func);
-                               rc = -EPERM;
-                       }
-               } else {
-                       if (BP_E1HVN(bp)) {
-                               BNX2X_ERROR("VN %d in single function mode,"
-                                           "  aborting\n", BP_E1HVN(bp));
-                               rc = -EPERM;
-                       }
-               }
+       case FW_MSG_CODE_DRV_LOAD_PORT:
+               bp->dmae_ready = 1;
+               rc = bnx2x_init_port(bp);
+               if (rc)
+                       goto init_hw_err;
+               /* no break */
+
+       case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+               bp->dmae_ready = 1;
+               rc = bnx2x_init_func(bp);
+               if (rc)
+                       goto init_hw_err;
+               break;
+
+       default:
+               BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+               break;
        }
 
-       if (!BP_NOMCP(bp)) {
-               bnx2x_get_port_hwinfo(bp);
+       if (!BP_NOMCP(bp)) {
+               int func = BP_FUNC(bp);
+
+               bp->fw_drv_pulse_wr_seq =
+                               (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
+                                DRV_PULSE_SEQ_MASK);
+               DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+       }
+
+       /* this needs to be done before gunzip end */
+       bnx2x_zero_def_sb(bp);
+       for_each_queue(bp, i)
+               bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
+#ifdef BCM_CNIC
+       bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
+#endif
+
+init_hw_err:
+       bnx2x_gunzip_end(bp);
+
+       return rc;
+}
+
+void bnx2x_free_mem(struct bnx2x *bp)
+{
+
+#define BNX2X_PCI_FREE(x, y, size) \
+       do { \
+               if (x) { \
+                       dma_free_coherent(&bp->pdev->dev, size, x, y); \
+                       x = NULL; \
+                       y = 0; \
+               } \
+       } while (0)
+
+#define BNX2X_FREE(x) \
+       do { \
+               if (x) { \
+                       vfree(x); \
+                       x = NULL; \
+               } \
+       } while (0)
 
-               bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
-                             DRV_MSG_SEQ_NUMBER_MASK);
-               BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
-       }
+       int i;
 
-       if (IS_E1HMF(bp)) {
-               val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
-               val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
-               if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
-                   (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
-                       bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
-                       bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
-                       bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
-                       bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
-                       bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
-                       bp->dev->dev_addr[5] = (u8)(val & 0xff);
-                       memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
-                              ETH_ALEN);
-                       memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
-                              ETH_ALEN);
-               }
+       /* fastpath */
+       /* Common */
+       for_each_queue(bp, i) {
 
-               return rc;
+               /* status blocks */
+               BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
+                              bnx2x_fp(bp, i, status_blk_mapping),
+                              sizeof(struct host_status_block));
        }
+       /* Rx */
+       for_each_queue(bp, i) {
 
-       if (BP_NOMCP(bp)) {
-               /* only supposed to happen on emulation/FPGA */
-               BNX2X_ERROR("warning: random MAC workaround active\n");
-               random_ether_addr(bp->dev->dev_addr);
-               memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
-       }
+               /* fastpath rx rings: rx_buf rx_desc rx_comp */
+               BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
+               BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
+                              bnx2x_fp(bp, i, rx_desc_mapping),
+                              sizeof(struct eth_rx_bd) * NUM_RX_BD);
 
-       return rc;
-}
+               BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
+                              bnx2x_fp(bp, i, rx_comp_mapping),
+                              sizeof(struct eth_fast_path_rx_cqe) *
+                              NUM_RCQ_BD);
 
-static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
-{
-       int cnt, i, block_end, rodi;
-       char vpd_data[BNX2X_VPD_LEN+1];
-       char str_id_reg[VENDOR_ID_LEN+1];
-       char str_id_cap[VENDOR_ID_LEN+1];
-       u8 len;
+               /* SGE ring */
+               BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
+               BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
+                              bnx2x_fp(bp, i, rx_sge_mapping),
+                              BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+       }
+       /* Tx */
+       for_each_queue(bp, i) {
 
-       cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
-       memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
+               /* fastpath tx rings: tx_buf tx_desc */
+               BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
+               BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
+                              bnx2x_fp(bp, i, tx_desc_mapping),
+                              sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+       }
+       /* end of fastpath */
 
-       if (cnt < BNX2X_VPD_LEN)
-               goto out_not_found;
+       BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
+                      sizeof(struct host_def_status_block));
 
-       i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
-                            PCI_VPD_LRDT_RO_DATA);
-       if (i < 0)
-               goto out_not_found;
+       BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
+                      sizeof(struct bnx2x_slowpath));
 
+#ifdef BCM_CNIC
+       BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
+       BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
+       BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
+       BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
+       BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
+                      sizeof(struct host_status_block));
+#endif
+       BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
 
-       block_end = i + PCI_VPD_LRDT_TAG_SIZE +
-                   pci_vpd_lrdt_size(&vpd_data[i]);
+#undef BNX2X_PCI_FREE
+#undef BNX2X_KFREE
+}
 
-       i += PCI_VPD_LRDT_TAG_SIZE;
+int bnx2x_alloc_mem(struct bnx2x *bp)
+{
 
-       if (block_end > BNX2X_VPD_LEN)
-               goto out_not_found;
+#define BNX2X_PCI_ALLOC(x, y, size) \
+       do { \
+               x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+               if (x == NULL) \
+                       goto alloc_mem_err; \
+               memset(x, 0, size); \
+       } while (0)
 
-       rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
-                                  PCI_VPD_RO_KEYWORD_MFR_ID);
-       if (rodi < 0)
-               goto out_not_found;
+#define BNX2X_ALLOC(x, size) \
+       do { \
+               x = vmalloc(size); \
+               if (x == NULL) \
+                       goto alloc_mem_err; \
+               memset(x, 0, size); \
+       } while (0)
 
-       len = pci_vpd_info_field_size(&vpd_data[rodi]);
+       int i;
 
-       if (len != VENDOR_ID_LEN)
-               goto out_not_found;
+       /* fastpath */
+       /* Common */
+       for_each_queue(bp, i) {
+               bnx2x_fp(bp, i, bp) = bp;
 
-       rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+               /* status blocks */
+               BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
+                               &bnx2x_fp(bp, i, status_blk_mapping),
+                               sizeof(struct host_status_block));
+       }
+       /* Rx */
+       for_each_queue(bp, i) {
 
-       /* vendor specific info */
-       snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
-       snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
-       if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
-           !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
+               /* fastpath rx rings: rx_buf rx_desc rx_comp */
+               BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
+                               sizeof(struct sw_rx_bd) * NUM_RX_BD);
+               BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
+                               &bnx2x_fp(bp, i, rx_desc_mapping),
+                               sizeof(struct eth_rx_bd) * NUM_RX_BD);
 
-               rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
-                                               PCI_VPD_RO_KEYWORD_VENDOR0);
-               if (rodi >= 0) {
-                       len = pci_vpd_info_field_size(&vpd_data[rodi]);
+               BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
+                               &bnx2x_fp(bp, i, rx_comp_mapping),
+                               sizeof(struct eth_fast_path_rx_cqe) *
+                               NUM_RCQ_BD);
 
-                       rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+               /* SGE ring */
+               BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
+                               sizeof(struct sw_rx_page) * NUM_RX_SGE);
+               BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
+                               &bnx2x_fp(bp, i, rx_sge_mapping),
+                               BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+       }
+       /* Tx */
+       for_each_queue(bp, i) {
 
-                       if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
-                               memcpy(bp->fw_ver, &vpd_data[rodi], len);
-                               bp->fw_ver[len] = ' ';
-                       }
-               }
-               return;
+               /* fastpath tx rings: tx_buf tx_desc */
+               BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
+                               sizeof(struct sw_tx_bd) * NUM_TX_BD);
+               BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
+                               &bnx2x_fp(bp, i, tx_desc_mapping),
+                               sizeof(union eth_tx_bd_types) * NUM_TX_BD);
        }
-out_not_found:
-       return;
-}
+       /* end of fastpath */
 
-static int __devinit bnx2x_init_bp(struct bnx2x *bp)
-{
-       int func = BP_FUNC(bp);
-       int timer_interval;
-       int rc;
+       BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
+                       sizeof(struct host_def_status_block));
 
-       /* Disable interrupt handling until HW is initialized */
-       atomic_set(&bp->intr_sem, 1);
-       smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
+       BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
+                       sizeof(struct bnx2x_slowpath));
 
-       mutex_init(&bp->port.phy_mutex);
-       mutex_init(&bp->fw_mb_mutex);
 #ifdef BCM_CNIC
-       mutex_init(&bp->cnic_mutex);
-#endif
+       BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
 
-       INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
-       INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
+       /* allocate searcher T2 table
+          we allocate 1/4 of alloc num for T2
+         (which is not entered into the ILT) */
+       BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
 
-       rc = bnx2x_get_hwinfo(bp);
+       /* Initialize T2 (for 1024 connections) */
+       for (i = 0; i < 16*1024; i += 64)
+               *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
 
-       bnx2x_read_fwinfo(bp);
-       /* need to reset chip if undi was active */
-       if (!BP_NOMCP(bp))
-               bnx2x_undi_unload(bp);
+       /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
+       BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
 
-       if (CHIP_REV_IS_FPGA(bp))
-               dev_err(&bp->pdev->dev, "FPGA detected\n");
+       /* QM queues (128*MAX_CONN) */
+       BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
 
-       if (BP_NOMCP(bp) && (func == 0))
-               dev_err(&bp->pdev->dev, "MCP disabled, "
-                                       "must load devices in order!\n");
+       BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
+                       sizeof(struct host_status_block));
+#endif
 
-       /* Set multi queue mode */
-       if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
-           ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
-               dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
-                                       "requested is not MSI-X\n");
-               multi_mode = ETH_RSS_MODE_DISABLED;
-       }
-       bp->multi_mode = multi_mode;
+       /* Slow path ring */
+       BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
 
+       return 0;
 
-       bp->dev->features |= NETIF_F_GRO;
+alloc_mem_err:
+       bnx2x_free_mem(bp);
+       return -ENOMEM;
 
-       /* Set TPA flags */
-       if (disable_tpa) {
-               bp->flags &= ~TPA_ENABLE_FLAG;
-               bp->dev->features &= ~NETIF_F_LRO;
-       } else {
-               bp->flags |= TPA_ENABLE_FLAG;
-               bp->dev->features |= NETIF_F_LRO;
-       }
+#undef BNX2X_PCI_ALLOC
+#undef BNX2X_ALLOC
+}
 
-       if (CHIP_IS_E1(bp))
-               bp->dropless_fc = 0;
-       else
-               bp->dropless_fc = dropless_fc;
 
-       bp->mrrs = mrrs;
+/*
+ * Init service functions
+ */
 
-       bp->tx_ring_size = MAX_TX_AVAIL;
-       bp->rx_ring_size = MAX_RX_AVAIL;
+/**
+ * Sets a MAC in a CAM for a few L2 Clients for E1 chip
+ *
+ * @param bp driver descriptor
+ * @param set set or clear an entry (1 or 0)
+ * @param mac pointer to a buffer containing a MAC
+ * @param cl_bit_vec bit vector of clients to register a MAC for
+ * @param cam_offset offset in a CAM to use
+ * @param with_bcast set broadcast MAC as well
+ */
+static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
+                                     u32 cl_bit_vec, u8 cam_offset,
+                                     u8 with_bcast)
+{
+       struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
+       int port = BP_PORT(bp);
 
-       bp->rx_csum = 1;
+       /* CAM allocation
+        * unicasts 0-31:port0 32-63:port1
+        * multicast 64-127:port0 128-191:port1
+        */
+       config->hdr.length = 1 + (with_bcast ? 1 : 0);
+       config->hdr.offset = cam_offset;
+       config->hdr.client_id = 0xff;
+       config->hdr.reserved1 = 0;
 
-       /* make sure that the numbers are in the right granularity */
-       bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
-       bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
+       /* primary MAC */
+       config->config_table[0].cam_entry.msb_mac_addr =
+                                       swab16(*(u16 *)&mac[0]);
+       config->config_table[0].cam_entry.middle_mac_addr =
+                                       swab16(*(u16 *)&mac[2]);
+       config->config_table[0].cam_entry.lsb_mac_addr =
+                                       swab16(*(u16 *)&mac[4]);
+       config->config_table[0].cam_entry.flags = cpu_to_le16(port);
+       if (set)
+               config->config_table[0].target_table_entry.flags = 0;
+       else
+               CAM_INVALIDATE(config->config_table[0]);
+       config->config_table[0].target_table_entry.clients_bit_vector =
+                                               cpu_to_le32(cl_bit_vec);
+       config->config_table[0].target_table_entry.vlan_id = 0;
 
-       timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
-       bp->current_interval = (poll ? poll : timer_interval);
+       DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
+          (set ? "setting" : "clearing"),
+          config->config_table[0].cam_entry.msb_mac_addr,
+          config->config_table[0].cam_entry.middle_mac_addr,
+          config->config_table[0].cam_entry.lsb_mac_addr);
 
-       init_timer(&bp->timer);
-       bp->timer.expires = jiffies + bp->current_interval;
-       bp->timer.data = (unsigned long) bp;
-       bp->timer.function = bnx2x_timer;
+       /* broadcast */
+       if (with_bcast) {
+               config->config_table[1].cam_entry.msb_mac_addr =
+                       cpu_to_le16(0xffff);
+               config->config_table[1].cam_entry.middle_mac_addr =
+                       cpu_to_le16(0xffff);
+               config->config_table[1].cam_entry.lsb_mac_addr =
+                       cpu_to_le16(0xffff);
+               config->config_table[1].cam_entry.flags = cpu_to_le16(port);
+               if (set)
+                       config->config_table[1].target_table_entry.flags =
+                                       TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
+               else
+                       CAM_INVALIDATE(config->config_table[1]);
+               config->config_table[1].target_table_entry.clients_bit_vector =
+                                                       cpu_to_le32(cl_bit_vec);
+               config->config_table[1].target_table_entry.vlan_id = 0;
+       }
 
-       return rc;
+       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+                     U64_HI(bnx2x_sp_mapping(bp, mac_config)),
+                     U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
 }
 
-/*
- * ethtool service functions
+/**
+ * Sets a MAC in a CAM for a few L2 Clients for E1H chip
+ *
+ * @param bp driver descriptor
+ * @param set set or clear an entry (1 or 0)
+ * @param mac pointer to a buffer containing a MAC
+ * @param cl_bit_vec bit vector of clients to register a MAC for
+ * @param cam_offset offset in a CAM to use
  */
-
-/* All ethtool functions called with rtnl_lock */
-
-static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
+                                      u32 cl_bit_vec, u8 cam_offset)
 {
-       struct bnx2x *bp = netdev_priv(dev);
+       struct mac_configuration_cmd_e1h *config =
+               (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
 
-       cmd->supported = bp->port.supported;
-       cmd->advertising = bp->port.advertising;
+       config->hdr.length = 1;
+       config->hdr.offset = cam_offset;
+       config->hdr.client_id = 0xff;
+       config->hdr.reserved1 = 0;
 
-       if ((bp->state == BNX2X_STATE_OPEN) &&
-           !(bp->flags & MF_FUNC_DIS) &&
-           (bp->link_vars.link_up)) {
-               cmd->speed = bp->link_vars.line_speed;
-               cmd->duplex = bp->link_vars.duplex;
-               if (IS_E1HMF(bp)) {
-                       u16 vn_max_rate;
+       /* primary MAC */
+       config->config_table[0].msb_mac_addr =
+                                       swab16(*(u16 *)&mac[0]);
+       config->config_table[0].middle_mac_addr =
+                                       swab16(*(u16 *)&mac[2]);
+       config->config_table[0].lsb_mac_addr =
+                                       swab16(*(u16 *)&mac[4]);
+       config->config_table[0].clients_bit_vector =
+                                       cpu_to_le32(cl_bit_vec);
+       config->config_table[0].vlan_id = 0;
+       config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
+       if (set)
+               config->config_table[0].flags = BP_PORT(bp);
+       else
+               config->config_table[0].flags =
+                               MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
 
-                       vn_max_rate =
-                               ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
-                               FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
-                       if (vn_max_rate < cmd->speed)
-                               cmd->speed = vn_max_rate;
-               }
-       } else {
-               cmd->speed = -1;
-               cmd->duplex = -1;
-       }
+       DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
+          (set ? "setting" : "clearing"),
+          config->config_table[0].msb_mac_addr,
+          config->config_table[0].middle_mac_addr,
+          config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
 
-       if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
-               u32 ext_phy_type =
-                       XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+                     U64_HI(bnx2x_sp_mapping(bp, mac_config)),
+                     U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
+}
 
-               switch (ext_phy_type) {
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
-                       cmd->port = PORT_FIBRE;
-                       break;
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+                            int *state_p, int poll)
+{
+       /* can take a while if any port is running */
+       int cnt = 5000;
 
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
-                       cmd->port = PORT_TP;
-                       break;
+       DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
+          poll ? "polling" : "waiting", state, idx);
 
-               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
-                       BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
-                                 bp->link_params.ext_phy_config);
-                       break;
+       might_sleep();
+       while (cnt--) {
+               if (poll) {
+                       bnx2x_rx_int(bp->fp, 10);
+                       /* if index is different from 0
+                        * the reply for some commands will
+                        * be on the non default queue
+                        */
+                       if (idx)
+                               bnx2x_rx_int(&bp->fp[idx], 10);
+               }
 
-               default:
-                       DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
-                          bp->link_params.ext_phy_config);
-                       break;
+               mb(); /* state is changed by bnx2x_sp_event() */
+               if (*state_p == state) {
+#ifdef BNX2X_STOP_ON_ERROR
+                       DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
+#endif
+                       return 0;
                }
-       } else
-               cmd->port = PORT_TP;
 
-       cmd->phy_address = bp->mdio.prtad;
-       cmd->transceiver = XCVR_INTERNAL;
+               msleep(1);
+
+               if (bp->panic)
+                       return -EIO;
+       }
 
-       if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
-               cmd->autoneg = AUTONEG_ENABLE;
-       else
-               cmd->autoneg = AUTONEG_DISABLE;
+       /* timeout! */
+       BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
+                 poll ? "polling" : "waiting", state, idx);
+#ifdef BNX2X_STOP_ON_ERROR
+       bnx2x_panic();
+#endif
+
+       return -EBUSY;
+}
 
-       cmd->maxtxpkt = 0;
-       cmd->maxrxpkt = 0;
+void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
+{
+       bp->set_mac_pending++;
+       smp_wmb();
 
-       DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
-          DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
-          DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
-          DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
-          cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
-          cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
-          cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+       bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
+                                  (1 << bp->fp->cl_id), BP_FUNC(bp));
 
-       return 0;
+       /* Wait for a completion */
+       bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
 }
 
-static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
 {
-       struct bnx2x *bp = netdev_priv(dev);
-       u32 advertising;
+       bp->set_mac_pending++;
+       smp_wmb();
 
-       if (IS_E1HMF(bp))
-               return 0;
+       bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
+                                 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
+                                 1);
 
-       DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
-          DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
-          DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
-          DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
-          cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
-          cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
-          cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
-
-       if (cmd->autoneg == AUTONEG_ENABLE) {
-               if (!(bp->port.supported & SUPPORTED_Autoneg)) {
-                       DP(NETIF_MSG_LINK, "Autoneg not supported\n");
-                       return -EINVAL;
-               }
+       /* Wait for a completion */
+       bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
+}
 
-               /* advertise the requested speed and duplex if supported */
-               cmd->advertising &= bp->port.supported;
+#ifdef BCM_CNIC
+/**
+ * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
+ * MAC(s). This function will wait until the ramdord completion
+ * returns.
+ *
+ * @param bp driver handle
+ * @param set set or clear the CAM entry
+ *
+ * @return 0 if cussess, -ENODEV if ramrod doesn't return.
+ */
+int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
+{
+       u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
 
-               bp->link_params.req_line_speed = SPEED_AUTO_NEG;
-               bp->link_params.req_duplex = DUPLEX_FULL;
-               bp->port.advertising |= (ADVERTISED_Autoneg |
-                                        cmd->advertising);
-
-       } else { /* forced speed */
-               /* advertise the requested speed and duplex if supported */
-               switch (cmd->speed) {
-               case SPEED_10:
-                       if (cmd->duplex == DUPLEX_FULL) {
-                               if (!(bp->port.supported &
-                                     SUPPORTED_10baseT_Full)) {
-                                       DP(NETIF_MSG_LINK,
-                                          "10M full not supported\n");
-                                       return -EINVAL;
-                               }
+       bp->set_mac_pending++;
+       smp_wmb();
 
-                               advertising = (ADVERTISED_10baseT_Full |
-                                              ADVERTISED_TP);
-                       } else {
-                               if (!(bp->port.supported &
-                                     SUPPORTED_10baseT_Half)) {
-                                       DP(NETIF_MSG_LINK,
-                                          "10M half not supported\n");
-                                       return -EINVAL;
-                               }
+       /* Send a SET_MAC ramrod */
+       if (CHIP_IS_E1(bp))
+               bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
+                                 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
+                                 1);
+       else
+               /* CAM allocation for E1H
+               * unicasts: by func number
+               * multicast: 20+FUNC*20, 20 each
+               */
+               bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
+                                  cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
 
-                               advertising = (ADVERTISED_10baseT_Half |
-                                              ADVERTISED_TP);
-                       }
-                       break;
+       /* Wait for a completion when setting */
+       bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
 
-               case SPEED_100:
-                       if (cmd->duplex == DUPLEX_FULL) {
-                               if (!(bp->port.supported &
-                                               SUPPORTED_100baseT_Full)) {
-                                       DP(NETIF_MSG_LINK,
-                                          "100M full not supported\n");
-                                       return -EINVAL;
-                               }
+       return 0;
+}
+#endif
 
-                               advertising = (ADVERTISED_100baseT_Full |
-                                              ADVERTISED_TP);
-                       } else {
-                               if (!(bp->port.supported &
-                                               SUPPORTED_100baseT_Half)) {
-                                       DP(NETIF_MSG_LINK,
-                                          "100M half not supported\n");
-                                       return -EINVAL;
-                               }
+int bnx2x_setup_leading(struct bnx2x *bp)
+{
+       int rc;
 
-                               advertising = (ADVERTISED_100baseT_Half |
-                                              ADVERTISED_TP);
-                       }
-                       break;
+       /* reset IGU state */
+       bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
 
-               case SPEED_1000:
-                       if (cmd->duplex != DUPLEX_FULL) {
-                               DP(NETIF_MSG_LINK, "1G half not supported\n");
-                               return -EINVAL;
-                       }
+       /* SETUP ramrod */
+       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
 
-                       if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
-                               DP(NETIF_MSG_LINK, "1G full not supported\n");
-                               return -EINVAL;
-                       }
+       /* Wait for completion */
+       rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
 
-                       advertising = (ADVERTISED_1000baseT_Full |
-                                      ADVERTISED_TP);
-                       break;
+       return rc;
+}
 
-               case SPEED_2500:
-                       if (cmd->duplex != DUPLEX_FULL) {
-                               DP(NETIF_MSG_LINK,
-                                  "2.5G half not supported\n");
-                               return -EINVAL;
-                       }
+int bnx2x_setup_multi(struct bnx2x *bp, int index)
+{
+       struct bnx2x_fastpath *fp = &bp->fp[index];
 
-                       if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
-                               DP(NETIF_MSG_LINK,
-                                  "2.5G full not supported\n");
-                               return -EINVAL;
-                       }
+       /* reset IGU state */
+       bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
 
-                       advertising = (ADVERTISED_2500baseX_Full |
-                                      ADVERTISED_TP);
-                       break;
+       /* SETUP ramrod */
+       fp->state = BNX2X_FP_STATE_OPENING;
+       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
+                     fp->cl_id, 0);
 
-               case SPEED_10000:
-                       if (cmd->duplex != DUPLEX_FULL) {
-                               DP(NETIF_MSG_LINK, "10G half not supported\n");
-                               return -EINVAL;
-                       }
+       /* Wait for completion */
+       return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
+                                &(fp->state), 0);
+}
 
-                       if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
-                               DP(NETIF_MSG_LINK, "10G full not supported\n");
-                               return -EINVAL;
-                       }
 
-                       advertising = (ADVERTISED_10000baseT_Full |
-                                      ADVERTISED_FIBRE);
-                       break;
+void bnx2x_set_num_queues_msix(struct bnx2x *bp)
+{
 
-               default:
-                       DP(NETIF_MSG_LINK, "Unsupported speed\n");
-                       return -EINVAL;
-               }
+       switch (bp->multi_mode) {
+       case ETH_RSS_MODE_DISABLED:
+               bp->num_queues = 1;
+               break;
 
-               bp->link_params.req_line_speed = cmd->speed;
-               bp->link_params.req_duplex = cmd->duplex;
-               bp->port.advertising = advertising;
-       }
+       case ETH_RSS_MODE_REGULAR:
+               if (num_queues)
+                       bp->num_queues = min_t(u32, num_queues,
+                                                 BNX2X_MAX_QUEUES(bp));
+               else
+                       bp->num_queues = min_t(u32, num_online_cpus(),
+                                                 BNX2X_MAX_QUEUES(bp));
+               break;
 
-       DP(NETIF_MSG_LINK, "req_line_speed %d\n"
-          DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
-          bp->link_params.req_line_speed, bp->link_params.req_duplex,
-          bp->port.advertising);
 
-       if (netif_running(dev)) {
-               bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-               bnx2x_link_set(bp);
+       default:
+               bp->num_queues = 1;
+               break;
        }
-
-       return 0;
 }
 
-#define IS_E1_ONLINE(info)     (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
-#define IS_E1H_ONLINE(info)    (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
 
-static int bnx2x_get_regs_len(struct net_device *dev)
-{
-       struct bnx2x *bp = netdev_priv(dev);
-       int regdump_len = 0;
-       int i;
 
-       if (CHIP_IS_E1(bp)) {
-               for (i = 0; i < REGS_COUNT; i++)
-                       if (IS_E1_ONLINE(reg_addrs[i].info))
-                               regdump_len += reg_addrs[i].size;
+static int bnx2x_stop_multi(struct bnx2x *bp, int index)
+{
+       struct bnx2x_fastpath *fp = &bp->fp[index];
+       int rc;
 
-               for (i = 0; i < WREGS_COUNT_E1; i++)
-                       if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
-                               regdump_len += wreg_addrs_e1[i].size *
-                                       (1 + wreg_addrs_e1[i].read_regs_count);
+       /* halt the connection */
+       fp->state = BNX2X_FP_STATE_HALTING;
+       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
 
-       } else { /* E1H */
-               for (i = 0; i < REGS_COUNT; i++)
-                       if (IS_E1H_ONLINE(reg_addrs[i].info))
-                               regdump_len += reg_addrs[i].size;
+       /* Wait for completion */
+       rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
+                              &(fp->state), 1);
+       if (rc) /* timeout */
+               return rc;
 
-               for (i = 0; i < WREGS_COUNT_E1H; i++)
-                       if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
-                               regdump_len += wreg_addrs_e1h[i].size *
-                                       (1 + wreg_addrs_e1h[i].read_regs_count);
-       }
-       regdump_len *= 4;
-       regdump_len += sizeof(struct dump_hdr);
+       /* delete cfc entry */
+       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
 
-       return regdump_len;
+       /* Wait for completion */
+       rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
+                              &(fp->state), 1);
+       return rc;
 }
 
-static void bnx2x_get_regs(struct net_device *dev,
-                          struct ethtool_regs *regs, void *_p)
+static int bnx2x_stop_leading(struct bnx2x *bp)
 {
-       u32 *p = _p, i, j;
-       struct bnx2x *bp = netdev_priv(dev);
-       struct dump_hdr dump_hdr = {0};
+       __le16 dsb_sp_prod_idx;
+       /* if the other port is handling traffic,
+          this can take a lot of time */
+       int cnt = 500;
+       int rc;
 
-       regs->version = 0;
-       memset(p, 0, regs->len);
+       might_sleep();
 
-       if (!netif_running(bp->dev))
-               return;
+       /* Send HALT ramrod */
+       bp->fp[0].state = BNX2X_FP_STATE_HALTING;
+       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
 
-       dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
-       dump_hdr.dump_sign = dump_sign_all;
-       dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
-       dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
-       dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
-       dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
-       dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
+       /* Wait for completion */
+       rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
+                              &(bp->fp[0].state), 1);
+       if (rc) /* timeout */
+               return rc;
 
-       memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
-       p += dump_hdr.hdr_size + 1;
+       dsb_sp_prod_idx = *bp->dsb_sp_prod;
 
-       if (CHIP_IS_E1(bp)) {
-               for (i = 0; i < REGS_COUNT; i++)
-                       if (IS_E1_ONLINE(reg_addrs[i].info))
-                               for (j = 0; j < reg_addrs[i].size; j++)
-                                       *p++ = REG_RD(bp,
-                                                     reg_addrs[i].addr + j*4);
+       /* Send PORT_DELETE ramrod */
+       bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
 
-       } else { /* E1H */
-               for (i = 0; i < REGS_COUNT; i++)
-                       if (IS_E1H_ONLINE(reg_addrs[i].info))
-                               for (j = 0; j < reg_addrs[i].size; j++)
-                                       *p++ = REG_RD(bp,
-                                                     reg_addrs[i].addr + j*4);
+       /* Wait for completion to arrive on default status block
+          we are going to reset the chip anyway
+          so there is not much to do if this times out
+        */
+       while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
+               if (!cnt) {
+                       DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
+                          "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
+                          *bp->dsb_sp_prod, dsb_sp_prod_idx);
+#ifdef BNX2X_STOP_ON_ERROR
+                       bnx2x_panic();
+#endif
+                       rc = -EBUSY;
+                       break;
+               }
+               cnt--;
+               msleep(1);
+               rmb(); /* Refresh the dsb_sp_prod */
        }
-}
+       bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
+       bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
 
-#define PHY_FW_VER_LEN                 10
+       return rc;
+}
 
-static void bnx2x_get_drvinfo(struct net_device *dev,
-                             struct ethtool_drvinfo *info)
+static void bnx2x_reset_func(struct bnx2x *bp)
 {
-       struct bnx2x *bp = netdev_priv(dev);
-       u8 phy_fw_ver[PHY_FW_VER_LEN];
+       int port = BP_PORT(bp);
+       int func = BP_FUNC(bp);
+       int base, i;
 
-       strcpy(info->driver, DRV_MODULE_NAME);
-       strcpy(info->version, DRV_MODULE_VERSION);
+       /* Configure IGU */
+       REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+       REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
 
-       phy_fw_ver[0] = '\0';
-       if (bp->port.pmf) {
-               bnx2x_acquire_phy_lock(bp);
-               bnx2x_get_ext_phy_fw_version(&bp->link_params,
-                                            (bp->state != BNX2X_STATE_CLOSED),
-                                            phy_fw_ver, PHY_FW_VER_LEN);
-               bnx2x_release_phy_lock(bp);
+#ifdef BCM_CNIC
+       /* Disable Timer scan */
+       REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+       /*
+        * Wait for at least 10ms and up to 2 second for the timers scan to
+        * complete
+        */
+       for (i = 0; i < 200; i++) {
+               msleep(10);
+               if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
+                       break;
        }
-
-       strncpy(info->fw_version, bp->fw_ver, 32);
-       snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
-                "bc %d.%d.%d%s%s",
-                (bp->common.bc_ver & 0xff0000) >> 16,
-                (bp->common.bc_ver & 0xff00) >> 8,
-                (bp->common.bc_ver & 0xff),
-                ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
-       strcpy(info->bus_info, pci_name(bp->pdev));
-       info->n_stats = BNX2X_NUM_STATS;
-       info->testinfo_len = BNX2X_NUM_TESTS;
-       info->eedump_len = bp->common.flash_size;
-       info->regdump_len = bnx2x_get_regs_len(dev);
+#endif
+       /* Clear ILT */
+       base = FUNC_ILT_BASE(func);
+       for (i = base; i < base + ILT_PER_FUNC; i++)
+               bnx2x_ilt_wr(bp, i, 0);
 }
 
-static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+static void bnx2x_reset_port(struct bnx2x *bp)
 {
-       struct bnx2x *bp = netdev_priv(dev);
-
-       if (bp->flags & NO_WOL_FLAG) {
-               wol->supported = 0;
-               wol->wolopts = 0;
-       } else {
-               wol->supported = WAKE_MAGIC;
-               if (bp->wol)
-                       wol->wolopts = WAKE_MAGIC;
-               else
-                       wol->wolopts = 0;
-       }
-       memset(&wol->sopass, 0, sizeof(wol->sopass));
-}
+       int port = BP_PORT(bp);
+       u32 val;
 
-static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
-       struct bnx2x *bp = netdev_priv(dev);
+       REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
 
-       if (wol->wolopts & ~WAKE_MAGIC)
-               return -EINVAL;
+       /* Do not rcv packets to BRB */
+       REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
+       /* Do not direct rcv packets that are not for MCP to the BRB */
+       REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+                          NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
 
-       if (wol->wolopts & WAKE_MAGIC) {
-               if (bp->flags & NO_WOL_FLAG)
-                       return -EINVAL;
+       /* Configure AEU */
+       REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
 
-               bp->wol = 1;
-       } else
-               bp->wol = 0;
+       msleep(100);
+       /* Check for BRB port occupancy */
+       val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
+       if (val)
+               DP(NETIF_MSG_IFDOWN,
+                  "BRB1 is not empty  %d blocks are occupied\n", val);
 
-       return 0;
+       /* TODO: Close Doorbell port? */
 }
 
-static u32 bnx2x_get_msglevel(struct net_device *dev)
+static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
 {
-       struct bnx2x *bp = netdev_priv(dev);
+       DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
+          BP_FUNC(bp), reset_code);
 
-       return bp->msg_enable;
-}
+       switch (reset_code) {
+       case FW_MSG_CODE_DRV_UNLOAD_COMMON:
+               bnx2x_reset_port(bp);
+               bnx2x_reset_func(bp);
+               bnx2x_reset_common(bp);
+               break;
 
-static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
-{
-       struct bnx2x *bp = netdev_priv(dev);
+       case FW_MSG_CODE_DRV_UNLOAD_PORT:
+               bnx2x_reset_port(bp);
+               bnx2x_reset_func(bp);
+               break;
+
+       case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
+               bnx2x_reset_func(bp);
+               break;
 
-       if (capable(CAP_NET_ADMIN))
-               bp->msg_enable = level;
+       default:
+               BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
+               break;
+       }
 }
 
-static int bnx2x_nway_reset(struct net_device *dev)
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
 {
-       struct bnx2x *bp = netdev_priv(dev);
+       int port = BP_PORT(bp);
+       u32 reset_code = 0;
+       int i, cnt, rc;
 
-       if (!bp->port.pmf)
-               return 0;
+       /* Wait until tx fastpath tasks complete */
+       for_each_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
 
-       if (netif_running(dev)) {
-               bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-               bnx2x_link_set(bp);
-       }
+               cnt = 1000;
+               while (bnx2x_has_tx_work_unload(fp)) {
 
-       return 0;
-}
+                       bnx2x_tx_int(fp);
+                       if (!cnt) {
+                               BNX2X_ERR("timeout waiting for queue[%d]\n",
+                                         i);
+#ifdef BNX2X_STOP_ON_ERROR
+                               bnx2x_panic();
+                               return -EBUSY;
+#else
+                               break;
+#endif
+                       }
+                       cnt--;
+                       msleep(1);
+               }
+       }
+       /* Give HW time to discard old tx messages */
+       msleep(1);
 
-static u32 bnx2x_get_link(struct net_device *dev)
-{
-       struct bnx2x *bp = netdev_priv(dev);
+       if (CHIP_IS_E1(bp)) {
+               struct mac_configuration_cmd *config =
+                                               bnx2x_sp(bp, mcast_config);
 
-       if (bp->flags & MF_FUNC_DIS)
-               return 0;
+               bnx2x_set_eth_mac_addr_e1(bp, 0);
 
-       return bp->link_vars.link_up;
-}
+               for (i = 0; i < config->hdr.length; i++)
+                       CAM_INVALIDATE(config->config_table[i]);
 
-static int bnx2x_get_eeprom_len(struct net_device *dev)
-{
-       struct bnx2x *bp = netdev_priv(dev);
+               config->hdr.length = i;
+               if (CHIP_REV_IS_SLOW(bp))
+                       config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
+               else
+                       config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
+               config->hdr.client_id = bp->fp->cl_id;
+               config->hdr.reserved1 = 0;
 
-       return bp->common.flash_size;
-}
+               bp->set_mac_pending++;
+               smp_wmb();
 
-static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
-{
-       int port = BP_PORT(bp);
-       int count, i;
-       u32 val = 0;
+               bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
+                             U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
+                             U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
 
-       /* adjust timeout for emulation/FPGA */
-       count = NVRAM_TIMEOUT_COUNT;
-       if (CHIP_REV_IS_SLOW(bp))
-               count *= 100;
+       } else { /* E1H */
+               REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
 
-       /* request access to nvram interface */
-       REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
-              (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
+               bnx2x_set_eth_mac_addr_e1h(bp, 0);
 
-       for (i = 0; i < count*10; i++) {
-               val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
-               if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
-                       break;
+               for (i = 0; i < MC_HASH_SIZE; i++)
+                       REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
 
-               udelay(5);
+               REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
        }
-
-       if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
-               DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
-               return -EBUSY;
+#ifdef BCM_CNIC
+       /* Clear iSCSI L2 MAC */
+       mutex_lock(&bp->cnic_mutex);
+       if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
+               bnx2x_set_iscsi_eth_mac_addr(bp, 0);
+               bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
        }
+       mutex_unlock(&bp->cnic_mutex);
+#endif
 
-       return 0;
-}
+       if (unload_mode == UNLOAD_NORMAL)
+               reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 
-static int bnx2x_release_nvram_lock(struct bnx2x *bp)
-{
-       int port = BP_PORT(bp);
-       int count, i;
-       u32 val = 0;
+       else if (bp->flags & NO_WOL_FLAG)
+               reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
 
-       /* adjust timeout for emulation/FPGA */
-       count = NVRAM_TIMEOUT_COUNT;
-       if (CHIP_REV_IS_SLOW(bp))
-               count *= 100;
+       else if (bp->wol) {
+               u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+               u8 *mac_addr = bp->dev->dev_addr;
+               u32 val;
+               /* The mac address is written to entries 1-4 to
+                  preserve entry 0 which is used by the PMF */
+               u8 entry = (BP_E1HVN(bp) + 1)*8;
 
-       /* relinquish nvram interface */
-       REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
-              (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
+               val = (mac_addr[0] << 8) | mac_addr[1];
+               EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
 
-       for (i = 0; i < count*10; i++) {
-               val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
-               if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
-                       break;
+               val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+                     (mac_addr[4] << 8) | mac_addr[5];
+               EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
 
-               udelay(5);
-       }
+               reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
 
-       if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
-               DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
-               return -EBUSY;
-       }
+       } else
+               reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 
-       return 0;
-}
+       /* Close multi and leading connections
+          Completions for ramrods are collected in a synchronous way */
+       for_each_nondefault_queue(bp, i)
+               if (bnx2x_stop_multi(bp, i))
+                       goto unload_error;
 
-static void bnx2x_enable_nvram_access(struct bnx2x *bp)
-{
-       u32 val;
+       rc = bnx2x_stop_leading(bp);
+       if (rc) {
+               BNX2X_ERR("Stop leading failed!\n");
+#ifdef BNX2X_STOP_ON_ERROR
+               return -EBUSY;
+#else
+               goto unload_error;
+#endif
+       }
 
-       val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+unload_error:
+       if (!BP_NOMCP(bp))
+               reset_code = bnx2x_fw_command(bp, reset_code);
+       else {
+               DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
+                  load_count[0], load_count[1], load_count[2]);
+               load_count[0]--;
+               load_count[1 + port]--;
+               DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
+                  load_count[0], load_count[1], load_count[2]);
+               if (load_count[0] == 0)
+                       reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
+               else if (load_count[1 + port] == 0)
+                       reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
+               else
+                       reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
+       }
 
-       /* enable both bits, even on read */
-       REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
-              (val | MCPR_NVM_ACCESS_ENABLE_EN |
-                     MCPR_NVM_ACCESS_ENABLE_WR_EN));
-}
+       if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
+           (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
+               bnx2x__link_reset(bp);
 
-static void bnx2x_disable_nvram_access(struct bnx2x *bp)
-{
-       u32 val;
+       /* Reset the chip */
+       bnx2x_reset_chip(bp, reset_code);
 
-       val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+       /* Report UNLOAD_DONE to MCP */
+       if (!BP_NOMCP(bp))
+               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
 
-       /* disable both bits, even after read */
-       REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
-              (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
-                       MCPR_NVM_ACCESS_ENABLE_WR_EN)));
 }
 
-static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
-                                 u32 cmd_flags)
+void bnx2x_disable_close_the_gate(struct bnx2x *bp)
 {
-       int count, i, rc;
        u32 val;
 
-       /* build the command word */
-       cmd_flags |= MCPR_NVM_COMMAND_DOIT;
-
-       /* need to clear DONE bit separately */
-       REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
-
-       /* address of the NVRAM to read from */
-       REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
-              (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
-
-       /* issue a read command */
-       REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+       DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
 
-       /* adjust timeout for emulation/FPGA */
-       count = NVRAM_TIMEOUT_COUNT;
-       if (CHIP_REV_IS_SLOW(bp))
-               count *= 100;
+       if (CHIP_IS_E1(bp)) {
+               int port = BP_PORT(bp);
+               u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+                       MISC_REG_AEU_MASK_ATTN_FUNC_0;
 
-       /* wait for completion */
-       *ret_val = 0;
-       rc = -EBUSY;
-       for (i = 0; i < count; i++) {
-               udelay(5);
-               val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
-
-               if (val & MCPR_NVM_COMMAND_DONE) {
-                       val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
-                       /* we read nvram data in cpu order
-                        * but ethtool sees it as an array of bytes
-                        * converting to big-endian will do the work */
-                       *ret_val = cpu_to_be32(val);
-                       rc = 0;
-                       break;
-               }
+               val = REG_RD(bp, addr);
+               val &= ~(0x300);
+               REG_WR(bp, addr, val);
+       } else if (CHIP_IS_E1H(bp)) {
+               val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
+               val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
+                        MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
+               REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
        }
-
-       return rc;
 }
 
-static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
-                           int buf_size)
-{
-       int rc;
-       u32 cmd_flags;
-       __be32 val;
 
-       if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
-               DP(BNX2X_MSG_NVM,
-                  "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
-                  offset, buf_size);
-               return -EINVAL;
-       }
+/* Close gates #2, #3 and #4: */
+static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
+{
+       u32 val, addr;
 
-       if (offset + buf_size > bp->common.flash_size) {
-               DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
-                                 " buf_size (0x%x) > flash_size (0x%x)\n",
-                  offset, buf_size, bp->common.flash_size);
-               return -EINVAL;
+       /* Gates #2 and #4a are closed/opened for "not E1" only */
+       if (!CHIP_IS_E1(bp)) {
+               /* #4 */
+               val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
+               REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
+                      close ? (val | 0x1) : (val & (~(u32)1)));
+               /* #2 */
+               val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
+               REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
+                      close ? (val | 0x1) : (val & (~(u32)1)));
        }
 
-       /* request access to nvram interface */
-       rc = bnx2x_acquire_nvram_lock(bp);
-       if (rc)
-               return rc;
-
-       /* enable access to nvram interface */
-       bnx2x_enable_nvram_access(bp);
-
-       /* read the first word(s) */
-       cmd_flags = MCPR_NVM_COMMAND_FIRST;
-       while ((buf_size > sizeof(u32)) && (rc == 0)) {
-               rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
-               memcpy(ret_buf, &val, 4);
-
-               /* advance to the next dword */
-               offset += sizeof(u32);
-               ret_buf += sizeof(u32);
-               buf_size -= sizeof(u32);
-               cmd_flags = 0;
-       }
+       /* #3 */
+       addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+       val = REG_RD(bp, addr);
+       REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
 
-       if (rc == 0) {
-               cmd_flags |= MCPR_NVM_COMMAND_LAST;
-               rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
-               memcpy(ret_buf, &val, 4);
-       }
+       DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
+               close ? "closing" : "opening");
+       mmiowb();
+}
 
-       /* disable access to nvram interface */
-       bnx2x_disable_nvram_access(bp);
-       bnx2x_release_nvram_lock(bp);
+#define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
 
-       return rc;
+static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
+{
+       /* Do some magic... */
+       u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+       *magic_val = val & SHARED_MF_CLP_MAGIC;
+       MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
 }
 
-static int bnx2x_get_eeprom(struct net_device *dev,
-                           struct ethtool_eeprom *eeprom, u8 *eebuf)
+/* Restore the value of the `magic' bit.
+ *
+ * @param pdev Device handle.
+ * @param magic_val Old value of the `magic' bit.
+ */
+static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
 {
-       struct bnx2x *bp = netdev_priv(dev);
-       int rc;
-
-       if (!netif_running(dev))
-               return -EAGAIN;
-
-       DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
-          DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
-          eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
-          eeprom->len, eeprom->len);
-
-       /* parameters already validated in ethtool_get_eeprom */
-
-       rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
-
-       return rc;
+       /* Restore the `magic' bit value... */
+       /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
+       SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
+               (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
+       u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+       MF_CFG_WR(bp, shared_mf_config.clp_mb,
+               (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
 }
 
-static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
-                                  u32 cmd_flags)
+/* Prepares for MCP reset: takes care of CLP configurations.
+ *
+ * @param bp
+ * @param magic_val Old value of 'magic' bit.
+ */
+static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
 {
-       int count, i, rc;
+       u32 shmem;
+       u32 validity_offset;
 
-       /* build the command word */
-       cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
+       DP(NETIF_MSG_HW, "Starting\n");
 
-       /* need to clear DONE bit separately */
-       REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+       /* Set `magic' bit in order to save MF config */
+       if (!CHIP_IS_E1(bp))
+               bnx2x_clp_reset_prep(bp, magic_val);
 
-       /* write the data */
-       REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
+       /* Get shmem offset */
+       shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+       validity_offset = offsetof(struct shmem_region, validity_map[0]);
 
-       /* address of the NVRAM to write to */
-       REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
-              (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+       /* Clear validity map flags */
+       if (shmem > 0)
+               REG_WR(bp, shmem + validity_offset, 0);
+}
 
-       /* issue the write command */
-       REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
+#define MCP_ONE_TIMEOUT  100    /* 100 ms */
 
-       /* adjust timeout for emulation/FPGA */
-       count = NVRAM_TIMEOUT_COUNT;
+/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
+ * depending on the HW type.
+ *
+ * @param bp
+ */
+static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
+{
+       /* special handling for emulation and FPGA,
+          wait 10 times longer */
        if (CHIP_REV_IS_SLOW(bp))
-               count *= 100;
-
-       /* wait for completion */
-       rc = -EBUSY;
-       for (i = 0; i < count; i++) {
-               udelay(5);
-               val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
-               if (val & MCPR_NVM_COMMAND_DONE) {
-                       rc = 0;
-                       break;
-               }
-       }
-
-       return rc;
+               msleep(MCP_ONE_TIMEOUT*10);
+       else
+               msleep(MCP_ONE_TIMEOUT);
 }
 
-#define BYTE_OFFSET(offset)            (8 * (offset & 0x03))
-
-static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
-                             int buf_size)
+static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
 {
-       int rc;
-       u32 cmd_flags;
-       u32 align_offset;
-       __be32 val;
-
-       if (offset + buf_size > bp->common.flash_size) {
-               DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
-                                 " buf_size (0x%x) > flash_size (0x%x)\n",
-                  offset, buf_size, bp->common.flash_size);
-               return -EINVAL;
-       }
+       u32 shmem, cnt, validity_offset, val;
+       int rc = 0;
 
-       /* request access to nvram interface */
-       rc = bnx2x_acquire_nvram_lock(bp);
-       if (rc)
-               return rc;
+       msleep(100);
+
+       /* Get shmem offset */
+       shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+       if (shmem == 0) {
+               BNX2X_ERR("Shmem 0 return failure\n");
+               rc = -ENOTTY;
+               goto exit_lbl;
+       }
 
-       /* enable access to nvram interface */
-       bnx2x_enable_nvram_access(bp);
+       validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+       /* Wait for MCP to come up */
+       for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
+               /* TBD: its best to check validity map of last port.
+                * currently checks on port 0.
+                */
+               val = REG_RD(bp, shmem + validity_offset);
+               DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
+                  shmem + validity_offset, val);
 
-       cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
-       align_offset = (offset & ~0x03);
-       rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
+               /* check that shared memory is valid. */
+               if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+                   == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+                       break;
 
-       if (rc == 0) {
-               val &= ~(0xff << BYTE_OFFSET(offset));
-               val |= (*data_buf << BYTE_OFFSET(offset));
+               bnx2x_mcp_wait_one(bp);
+       }
 
-               /* nvram data is returned as an array of bytes
-                * convert it back to cpu order */
-               val = be32_to_cpu(val);
+       DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
 
-               rc = bnx2x_nvram_write_dword(bp, align_offset, val,
-                                            cmd_flags);
+       /* Check that shared memory is valid. This indicates that MCP is up. */
+       if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
+           (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
+               BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
+               rc = -ENOTTY;
+               goto exit_lbl;
        }
 
-       /* disable access to nvram interface */
-       bnx2x_disable_nvram_access(bp);
-       bnx2x_release_nvram_lock(bp);
+exit_lbl:
+       /* Restore the `magic' bit value */
+       if (!CHIP_IS_E1(bp))
+               bnx2x_clp_reset_done(bp, magic_val);
 
        return rc;
 }
 
-static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
-                            int buf_size)
+static void bnx2x_pxp_prep(struct bnx2x *bp)
 {
-       int rc;
-       u32 cmd_flags;
-       u32 val;
-       u32 written_so_far;
-
-       if (buf_size == 1)      /* ethtool */
-               return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
-
-       if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
-               DP(BNX2X_MSG_NVM,
-                  "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
-                  offset, buf_size);
-               return -EINVAL;
-       }
-
-       if (offset + buf_size > bp->common.flash_size) {
-               DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
-                                 " buf_size (0x%x) > flash_size (0x%x)\n",
-                  offset, buf_size, bp->common.flash_size);
-               return -EINVAL;
+       if (!CHIP_IS_E1(bp)) {
+               REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
+               REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
+               REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
+               mmiowb();
        }
+}
 
-       /* request access to nvram interface */
-       rc = bnx2x_acquire_nvram_lock(bp);
-       if (rc)
-               return rc;
+/*
+ * Reset the whole chip except for:
+ *      - PCIE core
+ *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
+ *              one reset bit)
+ *      - IGU
+ *      - MISC (including AEU)
+ *      - GRC
+ *      - RBCN, RBCP
+ */
+static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
+{
+       u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
 
-       /* enable access to nvram interface */
-       bnx2x_enable_nvram_access(bp);
+       not_reset_mask1 =
+               MISC_REGISTERS_RESET_REG_1_RST_HC |
+               MISC_REGISTERS_RESET_REG_1_RST_PXPV |
+               MISC_REGISTERS_RESET_REG_1_RST_PXP;
 
-       written_so_far = 0;
-       cmd_flags = MCPR_NVM_COMMAND_FIRST;
-       while ((written_so_far < buf_size) && (rc == 0)) {
-               if (written_so_far == (buf_size - sizeof(u32)))
-                       cmd_flags |= MCPR_NVM_COMMAND_LAST;
-               else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
-                       cmd_flags |= MCPR_NVM_COMMAND_LAST;
-               else if ((offset % NVRAM_PAGE_SIZE) == 0)
-                       cmd_flags |= MCPR_NVM_COMMAND_FIRST;
+       not_reset_mask2 =
+               MISC_REGISTERS_RESET_REG_2_RST_MDIO |
+               MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
+               MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
+               MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
+               MISC_REGISTERS_RESET_REG_2_RST_RBCN |
+               MISC_REGISTERS_RESET_REG_2_RST_GRC  |
+               MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
+               MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
 
-               memcpy(&val, data_buf, 4);
+       reset_mask1 = 0xffffffff;
 
-               rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
+       if (CHIP_IS_E1(bp))
+               reset_mask2 = 0xffff;
+       else
+               reset_mask2 = 0x1ffff;
 
-               /* advance to the next dword */
-               offset += sizeof(u32);
-               data_buf += sizeof(u32);
-               written_so_far += sizeof(u32);
-               cmd_flags = 0;
-       }
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+              reset_mask1 & (~not_reset_mask1));
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+              reset_mask2 & (~not_reset_mask2));
 
-       /* disable access to nvram interface */
-       bnx2x_disable_nvram_access(bp);
-       bnx2x_release_nvram_lock(bp);
+       barrier();
+       mmiowb();
 
-       return rc;
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
+       REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
+       mmiowb();
 }
 
-static int bnx2x_set_eeprom(struct net_device *dev,
-                           struct ethtool_eeprom *eeprom, u8 *eebuf)
+static int bnx2x_process_kill(struct bnx2x *bp)
 {
-       struct bnx2x *bp = netdev_priv(dev);
-       int port = BP_PORT(bp);
-       int rc = 0;
+       int cnt = 1000;
+       u32 val = 0;
+       u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
 
-       if (!netif_running(dev))
-               return -EAGAIN;
 
-       DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
-          DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
-          eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
-          eeprom->len, eeprom->len);
+       /* Empty the Tetris buffer, wait for 1s */
+       do {
+               sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
+               blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
+               port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
+               port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
+               pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
+               if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
+                   ((port_is_idle_0 & 0x1) == 0x1) &&
+                   ((port_is_idle_1 & 0x1) == 0x1) &&
+                   (pgl_exp_rom2 == 0xffffffff))
+                       break;
+               msleep(1);
+       } while (cnt-- > 0);
 
-       /* parameters already validated in ethtool_set_eeprom */
+       if (cnt <= 0) {
+               DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
+                         " are still"
+                         " outstanding read requests after 1s!\n");
+               DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
+                         " port_is_idle_0=0x%08x,"
+                         " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+                         sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
+                         pgl_exp_rom2);
+               return -EAGAIN;
+       }
 
-       /* PHY eeprom can be accessed only by the PMF */
-       if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
-           !bp->port.pmf)
-               return -EINVAL;
+       barrier();
 
-       if (eeprom->magic == 0x50485950) {
-               /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
-               bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+       /* Close gates #2, #3 and #4 */
+       bnx2x_set_234_gates(bp, true);
 
-               bnx2x_acquire_phy_lock(bp);
-               rc |= bnx2x_link_reset(&bp->link_params,
-                                      &bp->link_vars, 0);
-               if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
-                                       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
-                       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-                                      MISC_REGISTERS_GPIO_HIGH, port);
-               bnx2x_release_phy_lock(bp);
-               bnx2x_link_report(bp);
+       /* TBD: Indicate that "process kill" is in progress to MCP */
 
-       } else if (eeprom->magic == 0x50485952) {
-               /* 'PHYR' (0x50485952): re-init link after FW upgrade */
-               if (bp->state == BNX2X_STATE_OPEN) {
-                       bnx2x_acquire_phy_lock(bp);
-                       rc |= bnx2x_link_reset(&bp->link_params,
-                                              &bp->link_vars, 1);
+       /* Clear "unprepared" bit */
+       REG_WR(bp, MISC_REG_UNPREPARED, 0);
+       barrier();
 
-                       rc |= bnx2x_phy_init(&bp->link_params,
-                                            &bp->link_vars);
-                       bnx2x_release_phy_lock(bp);
-                       bnx2x_calc_fc_adv(bp);
-               }
-       } else if (eeprom->magic == 0x53985943) {
-               /* 'PHYC' (0x53985943): PHY FW upgrade completed */
-               if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
-                                      PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
-                       u8 ext_phy_addr =
-                            XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
+       /* Make sure all is written to the chip before the reset */
+       mmiowb();
 
-                       /* DSP Remove Download Mode */
-                       bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-                                      MISC_REGISTERS_GPIO_LOW, port);
+       /* Wait for 1ms to empty GLUE and PCI-E core queues,
+        * PSWHST, GRC and PSWRD Tetris buffer.
+        */
+       msleep(1);
 
-                       bnx2x_acquire_phy_lock(bp);
+       /* Prepare to chip reset: */
+       /* MCP */
+       bnx2x_reset_mcp_prep(bp, &val);
 
-                       bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
+       /* PXP */
+       bnx2x_pxp_prep(bp);
+       barrier();
 
-                       /* wait 0.5 sec to allow it to run */
-                       msleep(500);
-                       bnx2x_ext_phy_hw_reset(bp, port);
-                       msleep(500);
-                       bnx2x_release_phy_lock(bp);
-               }
-       } else
-               rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
+       /* reset the chip */
+       bnx2x_process_kill_chip_reset(bp);
+       barrier();
 
-       return rc;
-}
+       /* Recover after reset: */
+       /* MCP */
+       if (bnx2x_reset_mcp_comp(bp, val))
+               return -EAGAIN;
 
-static int bnx2x_get_coalesce(struct net_device *dev,
-                             struct ethtool_coalesce *coal)
-{
-       struct bnx2x *bp = netdev_priv(dev);
+       /* PXP */
+       bnx2x_pxp_prep(bp);
 
-       memset(coal, 0, sizeof(struct ethtool_coalesce));
+       /* Open the gates #2, #3 and #4 */
+       bnx2x_set_234_gates(bp, false);
 
-       coal->rx_coalesce_usecs = bp->rx_ticks;
-       coal->tx_coalesce_usecs = bp->tx_ticks;
+       /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
+        * reset state, re-enable attentions. */
 
        return 0;
 }
 
-static int bnx2x_set_coalesce(struct net_device *dev,
-                             struct ethtool_coalesce *coal)
+static int bnx2x_leader_reset(struct bnx2x *bp)
 {
-       struct bnx2x *bp = netdev_priv(dev);
-
-       bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
-       if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
-               bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
-
-       bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
-       if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
-               bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
+       int rc = 0;
+       /* Try to recover after the failure */
+       if (bnx2x_process_kill(bp)) {
+               printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
+                      bp->dev->name);
+               rc = -EAGAIN;
+               goto exit_leader_reset;
+       }
 
-       if (netif_running(dev))
-               bnx2x_update_coalesce(bp);
+       /* Clear "reset is in progress" bit and update the driver state */
+       bnx2x_set_reset_done(bp);
+       bp->recovery_state = BNX2X_RECOVERY_DONE;
 
-       return 0;
+exit_leader_reset:
+       bp->is_leader = 0;
+       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
+       smp_wmb();
+       return rc;
 }
 
-static void bnx2x_get_ringparam(struct net_device *dev,
-                               struct ethtool_ringparam *ering)
+/* Assumption: runs under rtnl lock. This together with the fact
+ * that it's called only from bnx2x_reset_task() ensure that it
+ * will never be called when netif_running(bp->dev) is false.
+ */
+static void bnx2x_parity_recover(struct bnx2x *bp)
 {
-       struct bnx2x *bp = netdev_priv(dev);
+       DP(NETIF_MSG_HW, "Handling parity\n");
+       while (1) {
+               switch (bp->recovery_state) {
+               case BNX2X_RECOVERY_INIT:
+                       DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
+                       /* Try to get a LEADER_LOCK HW lock */
+                       if (bnx2x_trylock_hw_lock(bp,
+                               HW_LOCK_RESOURCE_RESERVED_08))
+                               bp->is_leader = 1;
+
+                       /* Stop the driver */
+                       /* If interface has been removed - break */
+                       if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
+                               return;
+
+                       bp->recovery_state = BNX2X_RECOVERY_WAIT;
+                       /* Ensure "is_leader" and "recovery_state"
+                        *  update values are seen on other CPUs
+                        */
+                       smp_wmb();
+                       break;
+
+               case BNX2X_RECOVERY_WAIT:
+                       DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
+                       if (bp->is_leader) {
+                               u32 load_counter = bnx2x_get_load_cnt(bp);
+                               if (load_counter) {
+                                       /* Wait until all other functions get
+                                        * down.
+                                        */
+                                       schedule_delayed_work(&bp->reset_task,
+                                                               HZ/10);
+                                       return;
+                               } else {
+                                       /* If all other functions got down -
+                                        * try to bring the chip back to
+                                        * normal. In any case it's an exit
+                                        * point for a leader.
+                                        */
+                                       if (bnx2x_leader_reset(bp) ||
+                                       bnx2x_nic_load(bp, LOAD_NORMAL)) {
+                                               printk(KERN_ERR"%s: Recovery "
+                                               "has failed. Power cycle is "
+                                               "needed.\n", bp->dev->name);
+                                               /* Disconnect this device */
+                                               netif_device_detach(bp->dev);
+                                               /* Block ifup for all function
+                                                * of this ASIC until
+                                                * "process kill" or power
+                                                * cycle.
+                                                */
+                                               bnx2x_set_reset_in_progress(bp);
+                                               /* Shut down the power */
+                                               bnx2x_set_power_state(bp,
+                                                               PCI_D3hot);
+                                               return;
+                                       }
 
-       ering->rx_max_pending = MAX_RX_AVAIL;
-       ering->rx_mini_max_pending = 0;
-       ering->rx_jumbo_max_pending = 0;
+                                       return;
+                               }
+                       } else { /* non-leader */
+                               if (!bnx2x_reset_is_done(bp)) {
+                                       /* Try to get a LEADER_LOCK HW lock as
+                                        * long as a former leader may have
+                                        * been unloaded by the user or
+                                        * released a leadership by another
+                                        * reason.
+                                        */
+                                       if (bnx2x_trylock_hw_lock(bp,
+                                           HW_LOCK_RESOURCE_RESERVED_08)) {
+                                               /* I'm a leader now! Restart a
+                                                * switch case.
+                                                */
+                                               bp->is_leader = 1;
+                                               break;
+                                       }
 
-       ering->rx_pending = bp->rx_ring_size;
-       ering->rx_mini_pending = 0;
-       ering->rx_jumbo_pending = 0;
+                                       schedule_delayed_work(&bp->reset_task,
+                                                               HZ/10);
+                                       return;
 
-       ering->tx_max_pending = MAX_TX_AVAIL;
-       ering->tx_pending = bp->tx_ring_size;
+                               } else { /* A leader has completed
+                                         * the "process kill". It's an exit
+                                         * point for a non-leader.
+                                         */
+                                       bnx2x_nic_load(bp, LOAD_NORMAL);
+                                       bp->recovery_state =
+                                               BNX2X_RECOVERY_DONE;
+                                       smp_wmb();
+                                       return;
+                               }
+                       }
+               default:
+                       return;
+               }
+       }
 }
 
-static int bnx2x_set_ringparam(struct net_device *dev,
-                              struct ethtool_ringparam *ering)
+/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
+ * scheduled on a general queue in order to prevent a dead lock.
+ */
+static void bnx2x_reset_task(struct work_struct *work)
 {
-       struct bnx2x *bp = netdev_priv(dev);
-       int rc = 0;
+       struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
 
-       if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-               printk(KERN_ERR "Handling parity error recovery. Try again later\n");
-               return -EAGAIN;
-       }
+#ifdef BNX2X_STOP_ON_ERROR
+       BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
+                 " so reset not done to allow debug dump,\n"
+        KERN_ERR " you will need to reboot when done\n");
+       return;
+#endif
 
-       if ((ering->rx_pending > MAX_RX_AVAIL) ||
-           (ering->tx_pending > MAX_TX_AVAIL) ||
-           (ering->tx_pending <= MAX_SKB_FRAGS + 4))
-               return -EINVAL;
+       rtnl_lock();
 
-       bp->rx_ring_size = ering->rx_pending;
-       bp->tx_ring_size = ering->tx_pending;
+       if (!netif_running(bp->dev))
+               goto reset_task_exit;
 
-       if (netif_running(dev)) {
+       if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
+               bnx2x_parity_recover(bp);
+       else {
                bnx2x_nic_unload(bp, UNLOAD_NORMAL);
-               rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+               bnx2x_nic_load(bp, LOAD_NORMAL);
        }
 
-       return rc;
+reset_task_exit:
+       rtnl_unlock();
 }
 
-static void bnx2x_get_pauseparam(struct net_device *dev,
-                                struct ethtool_pauseparam *epause)
-{
-       struct bnx2x *bp = netdev_priv(dev);
-
-       epause->autoneg = (bp->link_params.req_flow_ctrl ==
-                          BNX2X_FLOW_CTRL_AUTO) &&
-                         (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
-
-       epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
-                           BNX2X_FLOW_CTRL_RX);
-       epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
-                           BNX2X_FLOW_CTRL_TX);
+/* end of nic load/unload */
 
-       DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
-          DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
-          epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
-}
+/*
+ * Init service functions
+ */
 
-static int bnx2x_set_pauseparam(struct net_device *dev,
-                               struct ethtool_pauseparam *epause)
+static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
 {
-       struct bnx2x *bp = netdev_priv(dev);
-
-       if (IS_E1HMF(bp))
-               return 0;
-
-       DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
-          DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
-          epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
-
-       bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
-
-       if (epause->rx_pause)
-               bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
-
-       if (epause->tx_pause)
-               bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
-
-       if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
-               bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-
-       if (epause->autoneg) {
-               if (!(bp->port.supported & SUPPORTED_Autoneg)) {
-                       DP(NETIF_MSG_LINK, "autoneg not supported\n");
-                       return -EINVAL;
-               }
-
-               if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
-                       bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
-       }
-
-       DP(NETIF_MSG_LINK,
-          "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
-
-       if (netif_running(dev)) {
-               bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-               bnx2x_link_set(bp);
+       switch (func) {
+       case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
+       case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
+       case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
+       case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
+       case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
+       case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
+       case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
+       case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
+       default:
+               BNX2X_ERR("Unsupported function index: %d\n", func);
+               return (u32)(-1);
        }
-
-       return 0;
 }
 
-static int bnx2x_set_flags(struct net_device *dev, u32 data)
+static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
 {
-       struct bnx2x *bp = netdev_priv(dev);
-       int changed = 0;
-       int rc = 0;
+       u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
 
-       if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
-               return -EINVAL;
+       /* Flush all outstanding writes */
+       mmiowb();
 
-       if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-               printk(KERN_ERR "Handling parity error recovery. Try again later\n");
-               return -EAGAIN;
+       /* Pretend to be function 0 */
+       REG_WR(bp, reg, 0);
+       /* Flush the GRC transaction (in the chip) */
+       new_val = REG_RD(bp, reg);
+       if (new_val != 0) {
+               BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
+                         new_val);
+               BUG();
        }
 
-       /* TPA requires Rx CSUM offloading */
-       if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
-               if (!disable_tpa) {
-                       if (!(dev->features & NETIF_F_LRO)) {
-                               dev->features |= NETIF_F_LRO;
-                               bp->flags |= TPA_ENABLE_FLAG;
-                               changed = 1;
-                       }
-               } else
-                       rc = -EINVAL;
-       } else if (dev->features & NETIF_F_LRO) {
-               dev->features &= ~NETIF_F_LRO;
-               bp->flags &= ~TPA_ENABLE_FLAG;
-               changed = 1;
-       }
+       /* From now we are in the "like-E1" mode */
+       bnx2x_int_disable(bp);
 
-       if (data & ETH_FLAG_RXHASH)
-               dev->features |= NETIF_F_RXHASH;
-       else
-               dev->features &= ~NETIF_F_RXHASH;
+       /* Flush all outstanding writes */
+       mmiowb();
 
-       if (changed && netif_running(dev)) {
-               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
-               rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+       /* Restore the original funtion settings */
+       REG_WR(bp, reg, orig_func);
+       new_val = REG_RD(bp, reg);
+       if (new_val != orig_func) {
+               BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
+                         orig_func, new_val);
+               BUG();
        }
-
-       return rc;
 }
 
-static u32 bnx2x_get_rx_csum(struct net_device *dev)
+static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
 {
-       struct bnx2x *bp = netdev_priv(dev);
-
-       return bp->rx_csum;
+       if (CHIP_IS_E1H(bp))
+               bnx2x_undi_int_disable_e1h(bp, func);
+       else
+               bnx2x_int_disable(bp);
 }
 
-static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
+static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
 {
-       struct bnx2x *bp = netdev_priv(dev);
-       int rc = 0;
-
-       if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-               printk(KERN_ERR "Handling parity error recovery. Try again later\n");
-               return -EAGAIN;
-       }
-
-       bp->rx_csum = data;
-
-       /* Disable TPA, when Rx CSUM is disabled. Otherwise all
-          TPA'ed packets will be discarded due to wrong TCP CSUM */
-       if (!data) {
-               u32 flags = ethtool_op_get_flags(dev);
+       u32 val;
 
-               rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
-       }
+       /* Check if there is any driver already loaded */
+       val = REG_RD(bp, MISC_REG_UNPREPARED);
+       if (val == 0x1) {
+               /* Check if it is the UNDI driver
+                * UNDI driver initializes CID offset for normal bell to 0x7
+                */
+               bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+               val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
+               if (val == 0x7) {
+                       u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+                       /* save our func */
+                       int func = BP_FUNC(bp);
+                       u32 swap_en;
+                       u32 swap_val;
 
-       return rc;
-}
+                       /* clear the UNDI indication */
+                       REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
 
-static int bnx2x_set_tso(struct net_device *dev, u32 data)
-{
-       if (data) {
-               dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
-               dev->features |= NETIF_F_TSO6;
-       } else {
-               dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
-               dev->features &= ~NETIF_F_TSO6;
-       }
+                       BNX2X_DEV_INFO("UNDI is active! reset device\n");
 
-       return 0;
-}
+                       /* try unload UNDI on port 0 */
+                       bp->func = 0;
+                       bp->fw_seq =
+                              (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+                               DRV_MSG_SEQ_NUMBER_MASK);
+                       reset_code = bnx2x_fw_command(bp, reset_code);
 
-static const struct {
-       char string[ETH_GSTRING_LEN];
-} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
-       { "register_test (offline)" },
-       { "memory_test (offline)" },
-       { "loopback_test (offline)" },
-       { "nvram_test (online)" },
-       { "interrupt_test (online)" },
-       { "link_test (online)" },
-       { "idle check (online)" }
-};
+                       /* if UNDI is loaded on the other port */
+                       if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
 
-static int bnx2x_test_registers(struct bnx2x *bp)
-{
-       int idx, i, rc = -ENODEV;
-       u32 wr_val = 0;
-       int port = BP_PORT(bp);
-       static const struct {
-               u32 offset0;
-               u32 offset1;
-               u32 mask;
-       } reg_tbl[] = {
-/* 0 */                { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
-               { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
-               { HC_REG_AGG_INT_0,                    4, 0x000003ff },
-               { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
-               { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
-               { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
-               { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
-               { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
-               { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
-               { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
-/* 10 */       { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
-               { QM_REG_CONNNUM_0,                    4, 0x000fffff },
-               { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
-               { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
-               { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
-               { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
-               { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
-               { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
-               { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
-               { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
-/* 20 */       { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
-               { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
-               { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
-               { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
-               { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
-               { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
-               { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
-               { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
-               { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
-               { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
-/* 30 */       { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
-               { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
-               { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
-               { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
-               { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
-               { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
-               { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
-
-               { 0xffffffff, 0, 0x00000000 }
-       };
+                               /* send "DONE" for previous unload */
+                               bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
 
-       if (!netif_running(bp->dev))
-               return rc;
+                               /* unload UNDI on port 1 */
+                               bp->func = 1;
+                               bp->fw_seq =
+                              (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+                                       DRV_MSG_SEQ_NUMBER_MASK);
+                               reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
 
-       /* Repeat the test twice:
-          First by writing 0x00000000, second by writing 0xffffffff */
-       for (idx = 0; idx < 2; idx++) {
+                               bnx2x_fw_command(bp, reset_code);
+                       }
 
-               switch (idx) {
-               case 0:
-                       wr_val = 0;
-                       break;
-               case 1:
-                       wr_val = 0xffffffff;
-                       break;
-               }
+                       /* now it's safe to release the lock */
+                       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
 
-               for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
-                       u32 offset, mask, save_val, val;
+                       bnx2x_undi_int_disable(bp, func);
 
-                       offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
-                       mask = reg_tbl[i].mask;
+                       /* close input traffic and wait for it */
+                       /* Do not rcv packets to BRB */
+                       REG_WR(bp,
+                             (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
+                                            NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
+                       /* Do not direct rcv packets that are not for MCP to
+                        * the BRB */
+                       REG_WR(bp,
+                              (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
+                                             NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+                       /* clear AEU */
+                       REG_WR(bp,
+                            (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+                                           MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
+                       msleep(10);
 
-                       save_val = REG_RD(bp, offset);
+                       /* save NIG port swap info */
+                       swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+                       swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+                       /* reset device */
+                       REG_WR(bp,
+                              GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+                              0xd3ffffff);
+                       REG_WR(bp,
+                              GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+                              0x1403);
+                       /* take the NIG out of reset and restore swap values */
+                       REG_WR(bp,
+                              GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+                              MISC_REGISTERS_RESET_REG_1_RST_NIG);
+                       REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
+                       REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
 
-                       REG_WR(bp, offset, (wr_val & mask));
-                       val = REG_RD(bp, offset);
+                       /* send unload done to the MCP */
+                       bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
 
-                       /* Restore the original register's value */
-                       REG_WR(bp, offset, save_val);
+                       /* restore our func and fw_seq */
+                       bp->func = func;
+                       bp->fw_seq =
+                              (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
+                               DRV_MSG_SEQ_NUMBER_MASK);
 
-                       /* verify value is as expected */
-                       if ((val & mask) != (wr_val & mask)) {
-                               DP(NETIF_MSG_PROBE,
-                                  "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
-                                  offset, val, wr_val, mask);
-                               goto test_reg_exit;
-                       }
-               }
+               } else
+                       bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
        }
-
-       rc = 0;
-
-test_reg_exit:
-       return rc;
 }
 
-static int bnx2x_test_memory(struct bnx2x *bp)
+static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
 {
-       int i, j, rc = -ENODEV;
-       u32 val;
-       static const struct {
-               u32 offset;
-               int size;
-       } mem_tbl[] = {
-               { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
-               { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
-               { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
-               { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
-               { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
-               { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
-               { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
-
-               { 0xffffffff, 0 }
-       };
-       static const struct {
-               char *name;
-               u32 offset;
-               u32 e1_mask;
-               u32 e1h_mask;
-       } prty_tbl[] = {
-               { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
-               { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
-               { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
-               { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
-               { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
-               { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
-
-               { NULL, 0xffffffff, 0, 0 }
-       };
+       u32 val, val2, val3, val4, id;
+       u16 pmc;
 
-       if (!netif_running(bp->dev))
-               return rc;
+       /* Get the chip revision id and number. */
+       /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+       val = REG_RD(bp, MISC_REG_CHIP_NUM);
+       id = ((val & 0xffff) << 16);
+       val = REG_RD(bp, MISC_REG_CHIP_REV);
+       id |= ((val & 0xf) << 12);
+       val = REG_RD(bp, MISC_REG_CHIP_METAL);
+       id |= ((val & 0xff) << 4);
+       val = REG_RD(bp, MISC_REG_BOND_ID);
+       id |= (val & 0xf);
+       bp->common.chip_id = id;
+       bp->link_params.chip_id = bp->common.chip_id;
+       BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
 
-       /* Go through all the memories */
-       for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
-               for (j = 0; j < mem_tbl[i].size; j++)
-                       REG_RD(bp, mem_tbl[i].offset + j*4);
-
-       /* Check the parity status */
-       for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
-               val = REG_RD(bp, prty_tbl[i].offset);
-               if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
-                   (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
-                       DP(NETIF_MSG_HW,
-                          "%s is 0x%x\n", prty_tbl[i].name, val);
-                       goto test_mem_exit;
-               }
+       val = (REG_RD(bp, 0x2874) & 0x55);
+       if ((bp->common.chip_id & 0x1) ||
+           (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
+               bp->flags |= ONE_PORT_FLAG;
+               BNX2X_DEV_INFO("single port device\n");
        }
 
-       rc = 0;
-
-test_mem_exit:
-       return rc;
-}
-
-static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
-{
-       int cnt = 1000;
+       val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
+       bp->common.flash_size = (NVRAM_1MB_SIZE <<
+                                (val & MCPR_NVM_CFG4_FLASH_SIZE));
+       BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
+                      bp->common.flash_size, bp->common.flash_size);
 
-       if (link_up)
-               while (bnx2x_link_test(bp) && cnt--)
-                       msleep(10);
-}
+       bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+       bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
+       bp->link_params.shmem_base = bp->common.shmem_base;
+       BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
+                      bp->common.shmem_base, bp->common.shmem2_base);
 
-static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
-{
-       unsigned int pkt_size, num_pkts, i;
-       struct sk_buff *skb;
-       unsigned char *packet;
-       struct bnx2x_fastpath *fp_rx = &bp->fp[0];
-       struct bnx2x_fastpath *fp_tx = &bp->fp[0];
-       u16 tx_start_idx, tx_idx;
-       u16 rx_start_idx, rx_idx;
-       u16 pkt_prod, bd_prod;
-       struct sw_tx_bd *tx_buf;
-       struct eth_tx_start_bd *tx_start_bd;
-       struct eth_tx_parse_bd *pbd = NULL;
-       dma_addr_t mapping;
-       union eth_rx_cqe *cqe;
-       u8 cqe_fp_flags;
-       struct sw_rx_bd *rx_buf;
-       u16 len;
-       int rc = -ENODEV;
-
-       /* check the loopback mode */
-       switch (loopback_mode) {
-       case BNX2X_PHY_LOOPBACK:
-               if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
-                       return -EINVAL;
-               break;
-       case BNX2X_MAC_LOOPBACK:
-               bp->link_params.loopback_mode = LOOPBACK_BMAC;
-               bnx2x_phy_init(&bp->link_params, &bp->link_vars);
-               break;
-       default:
-               return -EINVAL;
+       if (!bp->common.shmem_base ||
+           (bp->common.shmem_base < 0xA0000) ||
+           (bp->common.shmem_base >= 0xC0000)) {
+               BNX2X_DEV_INFO("MCP not active\n");
+               bp->flags |= NO_MCP_FLAG;
+               return;
        }
 
-       /* prepare the loopback packet */
-       pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
-                    bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
-       skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
-       if (!skb) {
-               rc = -ENOMEM;
-               goto test_loopback_exit;
-       }
-       packet = skb_put(skb, pkt_size);
-       memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
-       memset(packet + ETH_ALEN, 0, ETH_ALEN);
-       memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
-       for (i = ETH_HLEN; i < pkt_size; i++)
-               packet[i] = (unsigned char) (i & 0xff);
-
-       /* send the loopback packet */
-       num_pkts = 0;
-       tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
-       rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
-
-       pkt_prod = fp_tx->tx_pkt_prod++;
-       tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
-       tx_buf->first_bd = fp_tx->tx_bd_prod;
-       tx_buf->skb = skb;
-       tx_buf->flags = 0;
-
-       bd_prod = TX_BD(fp_tx->tx_bd_prod);
-       tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
-       mapping = dma_map_single(&bp->pdev->dev, skb->data,
-                                skb_headlen(skb), DMA_TO_DEVICE);
-       tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
-       tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-       tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
-       tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
-       tx_start_bd->vlan = cpu_to_le16(pkt_prod);
-       tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
-       tx_start_bd->general_data = ((UNICAST_ADDRESS <<
-                               ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
-
-       /* turn on parsing and get a BD */
-       bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
-       pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
-
-       memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
+       val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+       if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+               != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+               BNX2X_ERROR("BAD MCP validity signature\n");
 
-       wmb();
+       bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
+       BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
 
-       fp_tx->tx_db.data.prod += 2;
-       barrier();
-       DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
+       bp->link_params.hw_led_mode = ((bp->common.hw_config &
+                                       SHARED_HW_CFG_LED_MODE_MASK) >>
+                                      SHARED_HW_CFG_LED_MODE_SHIFT);
 
-       mmiowb();
+       bp->link_params.feature_config_flags = 0;
+       val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
+       if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
+               bp->link_params.feature_config_flags |=
+                               FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+       else
+               bp->link_params.feature_config_flags &=
+                               ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
 
-       num_pkts++;
-       fp_tx->tx_bd_prod += 2; /* start + pbd */
+       val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
+       bp->common.bc_ver = val;
+       BNX2X_DEV_INFO("bc_ver %X\n", val);
+       if (val < BNX2X_BC_VER) {
+               /* for now only warn
+                * later we might need to enforce this */
+               BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
+                           "please upgrade BC\n", BNX2X_BC_VER, val);
+       }
+       bp->link_params.feature_config_flags |=
+               (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
+               FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
 
-       udelay(100);
+       if (BP_E1HVN(bp) == 0) {
+               pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+               bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
+       } else {
+               /* no WOL capability for E1HVN != 0 */
+               bp->flags |= NO_WOL_FLAG;
+       }
+       BNX2X_DEV_INFO("%sWoL capable\n",
+                      (bp->flags & NO_WOL_FLAG) ? "not " : "");
 
-       tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
-       if (tx_idx != tx_start_idx + num_pkts)
-               goto test_loopback_exit;
+       val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
+       val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
+       val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
+       val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
 
-       rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
-       if (rx_idx != rx_start_idx + num_pkts)
-               goto test_loopback_exit;
+       dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
+                val, val2, val3, val4);
+}
 
-       cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
-       cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
-       if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
-               goto test_loopback_rx_exit;
+static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
+                                                   u32 switch_cfg)
+{
+       int port = BP_PORT(bp);
+       u32 ext_phy_type;
 
-       len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
-       if (len != pkt_size)
-               goto test_loopback_rx_exit;
+       switch (switch_cfg) {
+       case SWITCH_CFG_1G:
+               BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
 
-       rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
-       skb = rx_buf->skb;
-       skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
-       for (i = ETH_HLEN; i < pkt_size; i++)
-               if (*(skb->data + i) != (unsigned char) (i & 0xff))
-                       goto test_loopback_rx_exit;
+               ext_phy_type =
+                       SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+               switch (ext_phy_type) {
+               case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
+                                      ext_phy_type);
 
-       rc = 0;
+                       bp->port.supported |= (SUPPORTED_10baseT_Half |
+                                              SUPPORTED_10baseT_Full |
+                                              SUPPORTED_100baseT_Half |
+                                              SUPPORTED_100baseT_Full |
+                                              SUPPORTED_1000baseT_Full |
+                                              SUPPORTED_2500baseX_Full |
+                                              SUPPORTED_TP |
+                                              SUPPORTED_FIBRE |
+                                              SUPPORTED_Autoneg |
+                                              SUPPORTED_Pause |
+                                              SUPPORTED_Asym_Pause);
+                       break;
 
-test_loopback_rx_exit:
+               case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
+                                      ext_phy_type);
 
-       fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
-       fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
-       fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
-       fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
+                       bp->port.supported |= (SUPPORTED_10baseT_Half |
+                                              SUPPORTED_10baseT_Full |
+                                              SUPPORTED_100baseT_Half |
+                                              SUPPORTED_100baseT_Full |
+                                              SUPPORTED_1000baseT_Full |
+                                              SUPPORTED_TP |
+                                              SUPPORTED_FIBRE |
+                                              SUPPORTED_Autoneg |
+                                              SUPPORTED_Pause |
+                                              SUPPORTED_Asym_Pause);
+                       break;
 
-       /* Update producers */
-       bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
-                            fp_rx->rx_sge_prod);
+               default:
+                       BNX2X_ERR("NVRAM config error. "
+                                 "BAD SerDes ext_phy_config 0x%x\n",
+                                 bp->link_params.ext_phy_config);
+                       return;
+               }
 
-test_loopback_exit:
-       bp->link_params.loopback_mode = LOOPBACK_NONE;
+               bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
+                                          port*0x10);
+               BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
+               break;
 
-       return rc;
-}
+       case SWITCH_CFG_10G:
+               BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
 
-static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
-{
-       int rc = 0, res;
+               ext_phy_type =
+                       XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+               switch (ext_phy_type) {
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
+                                      ext_phy_type);
 
-       if (BP_NOMCP(bp))
-               return rc;
+                       bp->port.supported |= (SUPPORTED_10baseT_Half |
+                                              SUPPORTED_10baseT_Full |
+                                              SUPPORTED_100baseT_Half |
+                                              SUPPORTED_100baseT_Full |
+                                              SUPPORTED_1000baseT_Full |
+                                              SUPPORTED_2500baseX_Full |
+                                              SUPPORTED_10000baseT_Full |
+                                              SUPPORTED_TP |
+                                              SUPPORTED_FIBRE |
+                                              SUPPORTED_Autoneg |
+                                              SUPPORTED_Pause |
+                                              SUPPORTED_Asym_Pause);
+                       break;
 
-       if (!netif_running(bp->dev))
-               return BNX2X_LOOPBACK_FAILED;
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
+                                      ext_phy_type);
 
-       bnx2x_netif_stop(bp, 1);
-       bnx2x_acquire_phy_lock(bp);
+                       bp->port.supported |= (SUPPORTED_10000baseT_Full |
+                                              SUPPORTED_1000baseT_Full |
+                                              SUPPORTED_FIBRE |
+                                              SUPPORTED_Autoneg |
+                                              SUPPORTED_Pause |
+                                              SUPPORTED_Asym_Pause);
+                       break;
 
-       res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
-       if (res) {
-               DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
-               rc |= BNX2X_PHY_LOOPBACK_FAILED;
-       }
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
+                                      ext_phy_type);
 
-       res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
-       if (res) {
-               DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
-               rc |= BNX2X_MAC_LOOPBACK_FAILED;
-       }
+                       bp->port.supported |= (SUPPORTED_10000baseT_Full |
+                                              SUPPORTED_2500baseX_Full |
+                                              SUPPORTED_1000baseT_Full |
+                                              SUPPORTED_FIBRE |
+                                              SUPPORTED_Autoneg |
+                                              SUPPORTED_Pause |
+                                              SUPPORTED_Asym_Pause);
+                       break;
 
-       bnx2x_release_phy_lock(bp);
-       bnx2x_netif_start(bp);
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
+                                      ext_phy_type);
 
-       return rc;
-}
+                       bp->port.supported |= (SUPPORTED_10000baseT_Full |
+                                              SUPPORTED_FIBRE |
+                                              SUPPORTED_Pause |
+                                              SUPPORTED_Asym_Pause);
+                       break;
 
-#define CRC32_RESIDUAL                 0xdebb20e3
-
-static int bnx2x_test_nvram(struct bnx2x *bp)
-{
-       static const struct {
-               int offset;
-               int size;
-       } nvram_tbl[] = {
-               {     0,  0x14 }, /* bootstrap */
-               {  0x14,  0xec }, /* dir */
-               { 0x100, 0x350 }, /* manuf_info */
-               { 0x450,  0xf0 }, /* feature_info */
-               { 0x640,  0x64 }, /* upgrade_key_info */
-               { 0x6a4,  0x64 },
-               { 0x708,  0x70 }, /* manuf_key_info */
-               { 0x778,  0x70 },
-               {     0,     0 }
-       };
-       __be32 buf[0x350 / 4];
-       u8 *data = (u8 *)buf;
-       int i, rc;
-       u32 magic, crc;
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
+                                      ext_phy_type);
 
-       if (BP_NOMCP(bp))
-               return 0;
+                       bp->port.supported |= (SUPPORTED_10000baseT_Full |
+                                              SUPPORTED_1000baseT_Full |
+                                              SUPPORTED_FIBRE |
+                                              SUPPORTED_Pause |
+                                              SUPPORTED_Asym_Pause);
+                       break;
 
-       rc = bnx2x_nvram_read(bp, 0, data, 4);
-       if (rc) {
-               DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
-               goto test_nvram_exit;
-       }
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
+                                      ext_phy_type);
 
-       magic = be32_to_cpu(buf[0]);
-       if (magic != 0x669955aa) {
-               DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
-               rc = -ENODEV;
-               goto test_nvram_exit;
-       }
+                       bp->port.supported |= (SUPPORTED_10000baseT_Full |
+                                              SUPPORTED_1000baseT_Full |
+                                              SUPPORTED_Autoneg |
+                                              SUPPORTED_FIBRE |
+                                              SUPPORTED_Pause |
+                                              SUPPORTED_Asym_Pause);
+                       break;
 
-       for (i = 0; nvram_tbl[i].size; i++) {
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
+                                      ext_phy_type);
 
-               rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
-                                     nvram_tbl[i].size);
-               if (rc) {
-                       DP(NETIF_MSG_PROBE,
-                          "nvram_tbl[%d] read data (rc %d)\n", i, rc);
-                       goto test_nvram_exit;
-               }
+                       bp->port.supported |= (SUPPORTED_10000baseT_Full |
+                                              SUPPORTED_1000baseT_Full |
+                                              SUPPORTED_Autoneg |
+                                              SUPPORTED_FIBRE |
+                                              SUPPORTED_Pause |
+                                              SUPPORTED_Asym_Pause);
+                       break;
 
-               crc = ether_crc_le(nvram_tbl[i].size, data);
-               if (crc != CRC32_RESIDUAL) {
-                       DP(NETIF_MSG_PROBE,
-                          "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
-                       rc = -ENODEV;
-                       goto test_nvram_exit;
-               }
-       }
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
+                                      ext_phy_type);
 
-test_nvram_exit:
-       return rc;
-}
+                       bp->port.supported |= (SUPPORTED_10000baseT_Full |
+                                              SUPPORTED_TP |
+                                              SUPPORTED_Autoneg |
+                                              SUPPORTED_Pause |
+                                              SUPPORTED_Asym_Pause);
+                       break;
 
-static int bnx2x_test_intr(struct bnx2x *bp)
-{
-       struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
-       int i, rc;
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+                       BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
+                                      ext_phy_type);
 
-       if (!netif_running(bp->dev))
-               return -ENODEV;
+                       bp->port.supported |= (SUPPORTED_10baseT_Half |
+                                              SUPPORTED_10baseT_Full |
+                                              SUPPORTED_100baseT_Half |
+                                              SUPPORTED_100baseT_Full |
+                                              SUPPORTED_1000baseT_Full |
+                                              SUPPORTED_10000baseT_Full |
+                                              SUPPORTED_TP |
+                                              SUPPORTED_Autoneg |
+                                              SUPPORTED_Pause |
+                                              SUPPORTED_Asym_Pause);
+                       break;
 
-       config->hdr.length = 0;
-       if (CHIP_IS_E1(bp))
-               /* use last unicast entries */
-               config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
-       else
-               config->hdr.offset = BP_FUNC(bp);
-       config->hdr.client_id = bp->fp->cl_id;
-       config->hdr.reserved1 = 0;
+               case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+                       BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
+                                 bp->link_params.ext_phy_config);
+                       break;
 
-       bp->set_mac_pending++;
-       smp_wmb();
-       rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
-                          U64_HI(bnx2x_sp_mapping(bp, mac_config)),
-                          U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
-       if (rc == 0) {
-               for (i = 0; i < 10; i++) {
-                       if (!bp->set_mac_pending)
-                               break;
-                       smp_rmb();
-                       msleep_interruptible(10);
+               default:
+                       BNX2X_ERR("NVRAM config error. "
+                                 "BAD XGXS ext_phy_config 0x%x\n",
+                                 bp->link_params.ext_phy_config);
+                       return;
                }
-               if (i == 10)
-                       rc = -ENODEV;
-       }
 
-       return rc;
-}
+               bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
+                                          port*0x18);
+               BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
 
-static void bnx2x_self_test(struct net_device *dev,
-                           struct ethtool_test *etest, u64 *buf)
-{
-       struct bnx2x *bp = netdev_priv(dev);
+               break;
 
-       if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-               printk(KERN_ERR "Handling parity error recovery. Try again later\n");
-               etest->flags |= ETH_TEST_FL_FAILED;
+       default:
+               BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
+                         bp->port.link_config);
                return;
        }
+       bp->link_params.phy_addr = bp->port.phy_addr;
 
-       memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
-
-       if (!netif_running(dev))
-               return;
-
-       /* offline tests are not supported in MF mode */
-       if (IS_E1HMF(bp))
-               etest->flags &= ~ETH_TEST_FL_OFFLINE;
-
-       if (etest->flags & ETH_TEST_FL_OFFLINE) {
-               int port = BP_PORT(bp);
-               u32 val;
-               u8 link_up;
+       /* mask what we support according to speed_cap_mask */
+       if (!(bp->link_params.speed_cap_mask &
+                               PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
+               bp->port.supported &= ~SUPPORTED_10baseT_Half;
 
-               /* save current value of input enable for TX port IF */
-               val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
-               /* disable input for TX port IF */
-               REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
+       if (!(bp->link_params.speed_cap_mask &
+                               PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
+               bp->port.supported &= ~SUPPORTED_10baseT_Full;
 
-               link_up = (bnx2x_link_test(bp) == 0);
-               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
-               bnx2x_nic_load(bp, LOAD_DIAG);
-               /* wait until link state is restored */
-               bnx2x_wait_for_link(bp, link_up);
+       if (!(bp->link_params.speed_cap_mask &
+                               PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
+               bp->port.supported &= ~SUPPORTED_100baseT_Half;
 
-               if (bnx2x_test_registers(bp) != 0) {
-                       buf[0] = 1;
-                       etest->flags |= ETH_TEST_FL_FAILED;
-               }
-               if (bnx2x_test_memory(bp) != 0) {
-                       buf[1] = 1;
-                       etest->flags |= ETH_TEST_FL_FAILED;
-               }
-               buf[2] = bnx2x_test_loopback(bp, link_up);
-               if (buf[2] != 0)
-                       etest->flags |= ETH_TEST_FL_FAILED;
+       if (!(bp->link_params.speed_cap_mask &
+                               PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
+               bp->port.supported &= ~SUPPORTED_100baseT_Full;
 
-               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+       if (!(bp->link_params.speed_cap_mask &
+                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
+               bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
+                                       SUPPORTED_1000baseT_Full);
 
-               /* restore input for TX port IF */
-               REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
+       if (!(bp->link_params.speed_cap_mask &
+                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+               bp->port.supported &= ~SUPPORTED_2500baseX_Full;
 
-               bnx2x_nic_load(bp, LOAD_NORMAL);
-               /* wait until link state is restored */
-               bnx2x_wait_for_link(bp, link_up);
-       }
-       if (bnx2x_test_nvram(bp) != 0) {
-               buf[3] = 1;
-               etest->flags |= ETH_TEST_FL_FAILED;
-       }
-       if (bnx2x_test_intr(bp) != 0) {
-               buf[4] = 1;
-               etest->flags |= ETH_TEST_FL_FAILED;
-       }
-       if (bp->port.pmf)
-               if (bnx2x_link_test(bp) != 0) {
-                       buf[5] = 1;
-                       etest->flags |= ETH_TEST_FL_FAILED;
-               }
+       if (!(bp->link_params.speed_cap_mask &
+                                       PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
+               bp->port.supported &= ~SUPPORTED_10000baseT_Full;
 
-#ifdef BNX2X_EXTRA_DEBUG
-       bnx2x_panic_dump(bp);
-#endif
+       BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
 }
 
-static const struct {
-       long offset;
-       int size;
-       u8 string[ETH_GSTRING_LEN];
-} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
-/* 1 */        { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
-       { Q_STATS_OFFSET32(error_bytes_received_hi),
-                                               8, "[%d]: rx_error_bytes" },
-       { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
-                                               8, "[%d]: rx_ucast_packets" },
-       { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
-                                               8, "[%d]: rx_mcast_packets" },
-       { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
-                                               8, "[%d]: rx_bcast_packets" },
-       { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
-       { Q_STATS_OFFSET32(rx_err_discard_pkt),
-                                        4, "[%d]: rx_phy_ip_err_discards"},
-       { Q_STATS_OFFSET32(rx_skb_alloc_failed),
-                                        4, "[%d]: rx_skb_alloc_discard" },
-       { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
-
-/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi),        8, "[%d]: tx_bytes" },
-       { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
-                                               8, "[%d]: tx_ucast_packets" },
-       { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
-                                               8, "[%d]: tx_mcast_packets" },
-       { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
-                                               8, "[%d]: tx_bcast_packets" }
-};
-
-static const struct {
-       long offset;
-       int size;
-       u32 flags;
-#define STATS_FLAGS_PORT               1
-#define STATS_FLAGS_FUNC               2
-#define STATS_FLAGS_BOTH               (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
-       u8 string[ETH_GSTRING_LEN];
-} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
-/* 1 */        { STATS_OFFSET32(total_bytes_received_hi),
-                               8, STATS_FLAGS_BOTH, "rx_bytes" },
-       { STATS_OFFSET32(error_bytes_received_hi),
-                               8, STATS_FLAGS_BOTH, "rx_error_bytes" },
-       { STATS_OFFSET32(total_unicast_packets_received_hi),
-                               8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
-       { STATS_OFFSET32(total_multicast_packets_received_hi),
-                               8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
-       { STATS_OFFSET32(total_broadcast_packets_received_hi),
-                               8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
-       { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
-                               8, STATS_FLAGS_PORT, "rx_crc_errors" },
-       { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
-                               8, STATS_FLAGS_PORT, "rx_align_errors" },
-       { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
-                               8, STATS_FLAGS_PORT, "rx_undersize_packets" },
-       { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
-                               8, STATS_FLAGS_PORT, "rx_oversize_packets" },
-/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
-                               8, STATS_FLAGS_PORT, "rx_fragments" },
-       { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
-                               8, STATS_FLAGS_PORT, "rx_jabbers" },
-       { STATS_OFFSET32(no_buff_discard_hi),
-                               8, STATS_FLAGS_BOTH, "rx_discards" },
-       { STATS_OFFSET32(mac_filter_discard),
-                               4, STATS_FLAGS_PORT, "rx_filtered_packets" },
-       { STATS_OFFSET32(xxoverflow_discard),
-                               4, STATS_FLAGS_PORT, "rx_fw_discards" },
-       { STATS_OFFSET32(brb_drop_hi),
-                               8, STATS_FLAGS_PORT, "rx_brb_discard" },
-       { STATS_OFFSET32(brb_truncate_hi),
-                               8, STATS_FLAGS_PORT, "rx_brb_truncate" },
-       { STATS_OFFSET32(pause_frames_received_hi),
-                               8, STATS_FLAGS_PORT, "rx_pause_frames" },
-       { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
-                               8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
-       { STATS_OFFSET32(nig_timer_max),
-                       4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
-/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
-                               4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
-       { STATS_OFFSET32(rx_skb_alloc_failed),
-                               4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
-       { STATS_OFFSET32(hw_csum_err),
-                               4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
-
-       { STATS_OFFSET32(total_bytes_transmitted_hi),
-                               8, STATS_FLAGS_BOTH, "tx_bytes" },
-       { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
-                               8, STATS_FLAGS_PORT, "tx_error_bytes" },
-       { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
-                               8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
-       { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
-                               8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
-       { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
-                               8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
-       { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
-                               8, STATS_FLAGS_PORT, "tx_mac_errors" },
-       { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
-                               8, STATS_FLAGS_PORT, "tx_carrier_errors" },
-/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
-                               8, STATS_FLAGS_PORT, "tx_single_collisions" },
-       { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
-                               8, STATS_FLAGS_PORT, "tx_multi_collisions" },
-       { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
-                               8, STATS_FLAGS_PORT, "tx_deferred" },
-       { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
-                               8, STATS_FLAGS_PORT, "tx_excess_collisions" },
-       { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
-                               8, STATS_FLAGS_PORT, "tx_late_collisions" },
-       { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
-                               8, STATS_FLAGS_PORT, "tx_total_collisions" },
-       { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
-                               8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
-       { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
-                       8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
-       { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
-                       8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
-       { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
-                       8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
-/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
-                       8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
-       { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
-                       8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
-       { STATS_OFFSET32(etherstatspktsover1522octets_hi),
-                       8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
-       { STATS_OFFSET32(pause_frames_sent_hi),
-                               8, STATS_FLAGS_PORT, "tx_pause_frames" }
-};
-
-#define IS_PORT_STAT(i) \
-       ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
-#define IS_FUNC_STAT(i)                (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
-#define IS_E1HMF_MODE_STAT(bp) \
-                       (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
-
-static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
+static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
 {
-       struct bnx2x *bp = netdev_priv(dev);
-       int i, num_stats;
-
-       switch (stringset) {
-       case ETH_SS_STATS:
-               if (is_multi(bp)) {
-                       num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
-                       if (!IS_E1HMF_MODE_STAT(bp))
-                               num_stats += BNX2X_NUM_STATS;
-               } else {
-                       if (IS_E1HMF_MODE_STAT(bp)) {
-                               num_stats = 0;
-                               for (i = 0; i < BNX2X_NUM_STATS; i++)
-                                       if (IS_FUNC_STAT(i))
-                                               num_stats++;
-                       } else
-                               num_stats = BNX2X_NUM_STATS;
-               }
-               return num_stats;
-
-       case ETH_SS_TEST:
-               return BNX2X_NUM_TESTS;
+       bp->link_params.req_duplex = DUPLEX_FULL;
 
-       default:
-               return -EINVAL;
-       }
-}
+       switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+       case PORT_FEATURE_LINK_SPEED_AUTO:
+               if (bp->port.supported & SUPPORTED_Autoneg) {
+                       bp->link_params.req_line_speed = SPEED_AUTO_NEG;
+                       bp->port.advertising = bp->port.supported;
+               } else {
+                       u32 ext_phy_type =
+                           XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
 
-static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
-{
-       struct bnx2x *bp = netdev_priv(dev);
-       int i, j, k;
-
-       switch (stringset) {
-       case ETH_SS_STATS:
-               if (is_multi(bp)) {
-                       k = 0;
-                       for_each_queue(bp, i) {
-                               for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
-                                       sprintf(buf + (k + j)*ETH_GSTRING_LEN,
-                                               bnx2x_q_stats_arr[j].string, i);
-                               k += BNX2X_NUM_Q_STATS;
-                       }
-                       if (IS_E1HMF_MODE_STAT(bp))
+                       if ((ext_phy_type ==
+                            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
+                           (ext_phy_type ==
+                            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
+                               /* force 10G, no AN */
+                               bp->link_params.req_line_speed = SPEED_10000;
+                               bp->port.advertising =
+                                               (ADVERTISED_10000baseT_Full |
+                                                ADVERTISED_FIBRE);
                                break;
-                       for (j = 0; j < BNX2X_NUM_STATS; j++)
-                               strcpy(buf + (k + j)*ETH_GSTRING_LEN,
-                                      bnx2x_stats_arr[j].string);
-               } else {
-                       for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
-                               if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
-                                       continue;
-                               strcpy(buf + j*ETH_GSTRING_LEN,
-                                      bnx2x_stats_arr[i].string);
-                               j++;
                        }
+                       BNX2X_ERR("NVRAM config error. "
+                                 "Invalid link_config 0x%x"
+                                 "  Autoneg not supported\n",
+                                 bp->port.link_config);
+                       return;
                }
                break;
 
-       case ETH_SS_TEST:
-               memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
+       case PORT_FEATURE_LINK_SPEED_10M_FULL:
+               if (bp->port.supported & SUPPORTED_10baseT_Full) {
+                       bp->link_params.req_line_speed = SPEED_10;
+                       bp->port.advertising = (ADVERTISED_10baseT_Full |
+                                               ADVERTISED_TP);
+               } else {
+                       BNX2X_ERROR("NVRAM config error. "
+                                   "Invalid link_config 0x%x"
+                                   "  speed_cap_mask 0x%x\n",
+                                   bp->port.link_config,
+                                   bp->link_params.speed_cap_mask);
+                       return;
+               }
                break;
-       }
-}
-
-static void bnx2x_get_ethtool_stats(struct net_device *dev,
-                                   struct ethtool_stats *stats, u64 *buf)
-{
-       struct bnx2x *bp = netdev_priv(dev);
-       u32 *hw_stats, *offset;
-       int i, j, k;
 
-       if (is_multi(bp)) {
-               k = 0;
-               for_each_queue(bp, i) {
-                       hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
-                       for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
-                               if (bnx2x_q_stats_arr[j].size == 0) {
-                                       /* skip this counter */
-                                       buf[k + j] = 0;
-                                       continue;
-                               }
-                               offset = (hw_stats +
-                                         bnx2x_q_stats_arr[j].offset);
-                               if (bnx2x_q_stats_arr[j].size == 4) {
-                                       /* 4-byte counter */
-                                       buf[k + j] = (u64) *offset;
-                                       continue;
-                               }
-                               /* 8-byte counter */
-                               buf[k + j] = HILO_U64(*offset, *(offset + 1));
-                       }
-                       k += BNX2X_NUM_Q_STATS;
-               }
-               if (IS_E1HMF_MODE_STAT(bp))
+       case PORT_FEATURE_LINK_SPEED_10M_HALF:
+               if (bp->port.supported & SUPPORTED_10baseT_Half) {
+                       bp->link_params.req_line_speed = SPEED_10;
+                       bp->link_params.req_duplex = DUPLEX_HALF;
+                       bp->port.advertising = (ADVERTISED_10baseT_Half |
+                                               ADVERTISED_TP);
+               } else {
+                       BNX2X_ERROR("NVRAM config error. "
+                                   "Invalid link_config 0x%x"
+                                   "  speed_cap_mask 0x%x\n",
+                                   bp->port.link_config,
+                                   bp->link_params.speed_cap_mask);
                        return;
-               hw_stats = (u32 *)&bp->eth_stats;
-               for (j = 0; j < BNX2X_NUM_STATS; j++) {
-                       if (bnx2x_stats_arr[j].size == 0) {
-                               /* skip this counter */
-                               buf[k + j] = 0;
-                               continue;
-                       }
-                       offset = (hw_stats + bnx2x_stats_arr[j].offset);
-                       if (bnx2x_stats_arr[j].size == 4) {
-                               /* 4-byte counter */
-                               buf[k + j] = (u64) *offset;
-                               continue;
-                       }
-                       /* 8-byte counter */
-                       buf[k + j] = HILO_U64(*offset, *(offset + 1));
-               }
-       } else {
-               hw_stats = (u32 *)&bp->eth_stats;
-               for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
-                       if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
-                               continue;
-                       if (bnx2x_stats_arr[i].size == 0) {
-                               /* skip this counter */
-                               buf[j] = 0;
-                               j++;
-                               continue;
-                       }
-                       offset = (hw_stats + bnx2x_stats_arr[i].offset);
-                       if (bnx2x_stats_arr[i].size == 4) {
-                               /* 4-byte counter */
-                               buf[j] = (u64) *offset;
-                               j++;
-                               continue;
-                       }
-                       /* 8-byte counter */
-                       buf[j] = HILO_U64(*offset, *(offset + 1));
-                       j++;
                }
-       }
-}
-
-static int bnx2x_phys_id(struct net_device *dev, u32 data)
-{
-       struct bnx2x *bp = netdev_priv(dev);
-       int i;
-
-       if (!netif_running(dev))
-               return 0;
-
-       if (!bp->port.pmf)
-               return 0;
-
-       if (data == 0)
-               data = 2;
-
-       for (i = 0; i < (data * 2); i++) {
-               if ((i % 2) == 0)
-                       bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
-                                     SPEED_1000);
-               else
-                       bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
-
-               msleep_interruptible(500);
-               if (signal_pending(current))
-                       break;
-       }
-
-       if (bp->link_vars.link_up)
-               bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
-                             bp->link_vars.line_speed);
-
-       return 0;
-}
-
-static const struct ethtool_ops bnx2x_ethtool_ops = {
-       .get_settings           = bnx2x_get_settings,
-       .set_settings           = bnx2x_set_settings,
-       .get_drvinfo            = bnx2x_get_drvinfo,
-       .get_regs_len           = bnx2x_get_regs_len,
-       .get_regs               = bnx2x_get_regs,
-       .get_wol                = bnx2x_get_wol,
-       .set_wol                = bnx2x_set_wol,
-       .get_msglevel           = bnx2x_get_msglevel,
-       .set_msglevel           = bnx2x_set_msglevel,
-       .nway_reset             = bnx2x_nway_reset,
-       .get_link               = bnx2x_get_link,
-       .get_eeprom_len         = bnx2x_get_eeprom_len,
-       .get_eeprom             = bnx2x_get_eeprom,
-       .set_eeprom             = bnx2x_set_eeprom,
-       .get_coalesce           = bnx2x_get_coalesce,
-       .set_coalesce           = bnx2x_set_coalesce,
-       .get_ringparam          = bnx2x_get_ringparam,
-       .set_ringparam          = bnx2x_set_ringparam,
-       .get_pauseparam         = bnx2x_get_pauseparam,
-       .set_pauseparam         = bnx2x_set_pauseparam,
-       .get_rx_csum            = bnx2x_get_rx_csum,
-       .set_rx_csum            = bnx2x_set_rx_csum,
-       .get_tx_csum            = ethtool_op_get_tx_csum,
-       .set_tx_csum            = ethtool_op_set_tx_hw_csum,
-       .set_flags              = bnx2x_set_flags,
-       .get_flags              = ethtool_op_get_flags,
-       .get_sg                 = ethtool_op_get_sg,
-       .set_sg                 = ethtool_op_set_sg,
-       .get_tso                = ethtool_op_get_tso,
-       .set_tso                = bnx2x_set_tso,
-       .self_test              = bnx2x_self_test,
-       .get_sset_count         = bnx2x_get_sset_count,
-       .get_strings            = bnx2x_get_strings,
-       .phys_id                = bnx2x_phys_id,
-       .get_ethtool_stats      = bnx2x_get_ethtool_stats,
-};
-
-/* end of ethtool_ops */
-
-/****************************************************************************
-* General service functions
-****************************************************************************/
-
-static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
-{
-       u16 pmcsr;
+               break;
 
-       pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+       case PORT_FEATURE_LINK_SPEED_100M_FULL:
+               if (bp->port.supported & SUPPORTED_100baseT_Full) {
+                       bp->link_params.req_line_speed = SPEED_100;
+                       bp->port.advertising = (ADVERTISED_100baseT_Full |
+                                               ADVERTISED_TP);
+               } else {
+                       BNX2X_ERROR("NVRAM config error. "
+                                   "Invalid link_config 0x%x"
+                                   "  speed_cap_mask 0x%x\n",
+                                   bp->port.link_config,
+                                   bp->link_params.speed_cap_mask);
+                       return;
+               }
+               break;
 
-       switch (state) {
-       case PCI_D0:
-               pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
-                                     ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
-                                      PCI_PM_CTRL_PME_STATUS));
+       case PORT_FEATURE_LINK_SPEED_100M_HALF:
+               if (bp->port.supported & SUPPORTED_100baseT_Half) {
+                       bp->link_params.req_line_speed = SPEED_100;
+                       bp->link_params.req_duplex = DUPLEX_HALF;
+                       bp->port.advertising = (ADVERTISED_100baseT_Half |
+                                               ADVERTISED_TP);
+               } else {
+                       BNX2X_ERROR("NVRAM config error. "
+                                   "Invalid link_config 0x%x"
+                                   "  speed_cap_mask 0x%x\n",
+                                   bp->port.link_config,
+                                   bp->link_params.speed_cap_mask);
+                       return;
+               }
+               break;
 
-               if (pmcsr & PCI_PM_CTRL_STATE_MASK)
-                       /* delay required during transition out of D3hot */
-                       msleep(20);
+       case PORT_FEATURE_LINK_SPEED_1G:
+               if (bp->port.supported & SUPPORTED_1000baseT_Full) {
+                       bp->link_params.req_line_speed = SPEED_1000;
+                       bp->port.advertising = (ADVERTISED_1000baseT_Full |
+                                               ADVERTISED_TP);
+               } else {
+                       BNX2X_ERROR("NVRAM config error. "
+                                   "Invalid link_config 0x%x"
+                                   "  speed_cap_mask 0x%x\n",
+                                   bp->port.link_config,
+                                   bp->link_params.speed_cap_mask);
+                       return;
+               }
                break;
 
-       case PCI_D3hot:
-               /* If there are other clients above don't
-                  shut down the power */
-               if (atomic_read(&bp->pdev->enable_cnt) != 1)
-                       return 0;
-               /* Don't shut down the power for emulation and FPGA */
-               if (CHIP_REV_IS_SLOW(bp))
-                       return 0;
-
-               pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
-               pmcsr |= 3;
-
-               if (bp->wol)
-                       pmcsr |= PCI_PM_CTRL_PME_ENABLE;
-
-               pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
-                                     pmcsr);
+       case PORT_FEATURE_LINK_SPEED_2_5G:
+               if (bp->port.supported & SUPPORTED_2500baseX_Full) {
+                       bp->link_params.req_line_speed = SPEED_2500;
+                       bp->port.advertising = (ADVERTISED_2500baseX_Full |
+                                               ADVERTISED_TP);
+               } else {
+                       BNX2X_ERROR("NVRAM config error. "
+                                   "Invalid link_config 0x%x"
+                                   "  speed_cap_mask 0x%x\n",
+                                   bp->port.link_config,
+                                   bp->link_params.speed_cap_mask);
+                       return;
+               }
+               break;
 
-               /* No more memory access after this point until
-               * device is brought back to D0.
-               */
+       case PORT_FEATURE_LINK_SPEED_10G_CX4:
+       case PORT_FEATURE_LINK_SPEED_10G_KX4:
+       case PORT_FEATURE_LINK_SPEED_10G_KR:
+               if (bp->port.supported & SUPPORTED_10000baseT_Full) {
+                       bp->link_params.req_line_speed = SPEED_10000;
+                       bp->port.advertising = (ADVERTISED_10000baseT_Full |
+                                               ADVERTISED_FIBRE);
+               } else {
+                       BNX2X_ERROR("NVRAM config error. "
+                                   "Invalid link_config 0x%x"
+                                   "  speed_cap_mask 0x%x\n",
+                                   bp->port.link_config,
+                                   bp->link_params.speed_cap_mask);
+                       return;
+               }
                break;
 
        default:
-               return -EINVAL;
+               BNX2X_ERROR("NVRAM config error. "
+                           "BAD link speed link_config 0x%x\n",
+                           bp->port.link_config);
+               bp->link_params.req_line_speed = SPEED_AUTO_NEG;
+               bp->port.advertising = bp->port.supported;
+               break;
        }
-       return 0;
-}
 
-static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
-{
-       u16 rx_cons_sb;
+       bp->link_params.req_flow_ctrl = (bp->port.link_config &
+                                        PORT_FEATURE_FLOW_CONTROL_MASK);
+       if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
+           !(bp->port.supported & SUPPORTED_Autoneg))
+               bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 
-       /* Tell compiler that status block fields can change */
-       barrier();
-       rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
-       if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
-               rx_cons_sb++;
-       return (fp->rx_comp_cons != rx_cons_sb);
+       BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
+                      "  advertising 0x%x\n",
+                      bp->link_params.req_line_speed,
+                      bp->link_params.req_duplex,
+                      bp->link_params.req_flow_ctrl, bp->port.advertising);
 }
 
-/*
- * net_device service functions
- */
-
-static int bnx2x_poll(struct napi_struct *napi, int budget)
+static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
 {
-       int work_done = 0;
-       struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
-                                                napi);
-       struct bnx2x *bp = fp->bp;
-
-       while (1) {
-#ifdef BNX2X_STOP_ON_ERROR
-               if (unlikely(bp->panic)) {
-                       napi_complete(napi);
-                       return 0;
-               }
-#endif
-
-               if (bnx2x_has_tx_work(fp))
-                       bnx2x_tx_int(fp);
+       mac_hi = cpu_to_be16(mac_hi);
+       mac_lo = cpu_to_be32(mac_lo);
+       memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
+       memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
+}
 
-               if (bnx2x_has_rx_work(fp)) {
-                       work_done += bnx2x_rx_int(fp, budget - work_done);
+static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
+{
+       int port = BP_PORT(bp);
+       u32 val, val2;
+       u32 config;
+       u16 i;
+       u32 ext_phy_type;
 
-                       /* must not complete if we consumed full budget */
-                       if (work_done >= budget)
-                               break;
-               }
+       bp->link_params.bp = bp;
+       bp->link_params.port = port;
 
-               /* Fall out from the NAPI loop if needed */
-               if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
-                       bnx2x_update_fpsb_idx(fp);
-               /* bnx2x_has_rx_work() reads the status block, thus we need
-                * to ensure that status block indices have been actually read
-                * (bnx2x_update_fpsb_idx) prior to this check
-                * (bnx2x_has_rx_work) so that we won't write the "newer"
-                * value of the status block to IGU (if there was a DMA right
-                * after bnx2x_has_rx_work and if there is no rmb, the memory
-                * reading (bnx2x_update_fpsb_idx) may be postponed to right
-                * before bnx2x_ack_sb). In this case there will never be
-                * another interrupt until there is another update of the
-                * status block, while there is still unhandled work.
-                */
-                       rmb();
-
-                       if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
-                               napi_complete(napi);
-                               /* Re-enable interrupts */
-                               bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
-                                            le16_to_cpu(fp->fp_c_idx),
-                                            IGU_INT_NOP, 1);
-                               bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
-                                            le16_to_cpu(fp->fp_u_idx),
-                                            IGU_INT_ENABLE, 1);
-                               break;
-                       }
-               }
+       bp->link_params.lane_config =
+               SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
+       bp->link_params.ext_phy_config =
+               SHMEM_RD(bp,
+                        dev_info.port_hw_config[port].external_phy_config);
+       /* BCM8727_NOC => BCM8727 no over current */
+       if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
+           PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
+               bp->link_params.ext_phy_config &=
+                       ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+               bp->link_params.ext_phy_config |=
+                       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
+               bp->link_params.feature_config_flags |=
+                       FEATURE_CONFIG_BCM8727_NOC;
        }
 
-       return work_done;
-}
-
-
-/* we split the first BD into headers and data BDs
- * to ease the pain of our fellow microcode engineers
- * we use one mapping for both BDs
- * So far this has only been observed to happen
- * in Other Operating Systems(TM)
- */
-static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
-                                  struct bnx2x_fastpath *fp,
-                                  struct sw_tx_bd *tx_buf,
-                                  struct eth_tx_start_bd **tx_bd, u16 hlen,
-                                  u16 bd_prod, int nbd)
-{
-       struct eth_tx_start_bd *h_tx_bd = *tx_bd;
-       struct eth_tx_bd *d_tx_bd;
-       dma_addr_t mapping;
-       int old_len = le16_to_cpu(h_tx_bd->nbytes);
-
-       /* first fix first BD */
-       h_tx_bd->nbd = cpu_to_le16(nbd);
-       h_tx_bd->nbytes = cpu_to_le16(hlen);
+       bp->link_params.speed_cap_mask =
+               SHMEM_RD(bp,
+                        dev_info.port_hw_config[port].speed_capability_mask);
 
-       DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
-          "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
-          h_tx_bd->addr_lo, h_tx_bd->nbd);
+       bp->port.link_config =
+               SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
 
-       /* now get a new data BD
-        * (after the pbd) and fill it */
-       bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
-       d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
+       /* Get the 4 lanes xgxs config rx and tx */
+       for (i = 0; i < 2; i++) {
+               val = SHMEM_RD(bp,
+                          dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
+               bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
+               bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
 
-       mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
-                          le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
+               val = SHMEM_RD(bp,
+                          dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
+               bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
+               bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
+       }
 
-       d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
-       d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-       d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
+       /* If the device is capable of WoL, set the default state according
+        * to the HW
+        */
+       config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
+       bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
+                  (config & PORT_FEATURE_WOL_ENABLED));
 
-       /* this marks the BD as one that has no individual mapping */
-       tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
+       BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
+                      "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
+                      bp->link_params.lane_config,
+                      bp->link_params.ext_phy_config,
+                      bp->link_params.speed_cap_mask, bp->port.link_config);
 
-       DP(NETIF_MSG_TX_QUEUED,
-          "TSO split data size is %d (%x:%x)\n",
-          d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
+       bp->link_params.switch_cfg |= (bp->port.link_config &
+                                      PORT_FEATURE_CONNECTED_SWITCH_MASK);
+       bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
 
-       /* update tx_bd */
-       *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
+       bnx2x_link_settings_requested(bp);
 
-       return bd_prod;
-}
+       /*
+        * If connected directly, work with the internal PHY, otherwise, work
+        * with the external PHY
+        */
+       ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
+       if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+               bp->mdio.prtad = bp->link_params.phy_addr;
 
-static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
-{
-       if (fix > 0)
-               csum = (u16) ~csum_fold(csum_sub(csum,
-                               csum_partial(t_header - fix, fix, 0)));
+       else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
+                (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+               bp->mdio.prtad =
+                       XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
 
-       else if (fix < 0)
-               csum = (u16) ~csum_fold(csum_add(csum,
-                               csum_partial(t_header, -fix, 0)));
+       val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+       val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+       bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+       memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
+       memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
 
-       return swab16(csum);
+#ifdef BCM_CNIC
+       val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
+       val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
+       bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+#endif
 }
 
-static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
 {
-       u32 rc;
-
-       if (skb->ip_summed != CHECKSUM_PARTIAL)
-               rc = XMIT_PLAIN;
-
-       else {
-               if (skb->protocol == htons(ETH_P_IPV6)) {
-                       rc = XMIT_CSUM_V6;
-                       if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
-                               rc |= XMIT_CSUM_TCP;
-
-               } else {
-                       rc = XMIT_CSUM_V4;
-                       if (ip_hdr(skb)->protocol == IPPROTO_TCP)
-                               rc |= XMIT_CSUM_TCP;
-               }
-       }
-
-       if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
-               rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
-
-       else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
-               rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
-
-       return rc;
-}
+       int func = BP_FUNC(bp);
+       u32 val, val2;
+       int rc = 0;
 
-#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
-/* check if packet requires linearization (packet is too fragmented)
-   no need to check fragmentation if page size > 8K (there will be no
-   violation to FW restrictions) */
-static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
-                            u32 xmit_type)
-{
-       int to_copy = 0;
-       int hlen = 0;
-       int first_bd_sz = 0;
-
-       /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
-       if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
-
-               if (xmit_type & XMIT_GSO) {
-                       unsigned short lso_mss = skb_shinfo(skb)->gso_size;
-                       /* Check if LSO packet needs to be copied:
-                          3 = 1 (for headers BD) + 2 (for PBD and last BD) */
-                       int wnd_size = MAX_FETCH_BD - 3;
-                       /* Number of windows to check */
-                       int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
-                       int wnd_idx = 0;
-                       int frag_idx = 0;
-                       u32 wnd_sum = 0;
-
-                       /* Headers length */
-                       hlen = (int)(skb_transport_header(skb) - skb->data) +
-                               tcp_hdrlen(skb);
-
-                       /* Amount of data (w/o headers) on linear part of SKB*/
-                       first_bd_sz = skb_headlen(skb) - hlen;
-
-                       wnd_sum  = first_bd_sz;
-
-                       /* Calculate the first sum - it's special */
-                       for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
-                               wnd_sum +=
-                                       skb_shinfo(skb)->frags[frag_idx].size;
-
-                       /* If there was data on linear skb data - check it */
-                       if (first_bd_sz > 0) {
-                               if (unlikely(wnd_sum < lso_mss)) {
-                                       to_copy = 1;
-                                       goto exit_lbl;
-                               }
+       bnx2x_get_common_hwinfo(bp);
 
-                               wnd_sum -= first_bd_sz;
-                       }
+       bp->e1hov = 0;
+       bp->e1hmf = 0;
+       if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
+               bp->mf_config =
+                       SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
 
-                       /* Others are easier: run through the frag list and
-                          check all windows */
-                       for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
-                               wnd_sum +=
-                         skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
+               val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
+                      FUNC_MF_CFG_E1HOV_TAG_MASK);
+               if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
+                       bp->e1hmf = 1;
+               BNX2X_DEV_INFO("%s function mode\n",
+                              IS_E1HMF(bp) ? "multi" : "single");
 
-                               if (unlikely(wnd_sum < lso_mss)) {
-                                       to_copy = 1;
-                                       break;
-                               }
-                               wnd_sum -=
-                                       skb_shinfo(skb)->frags[wnd_idx].size;
+               if (IS_E1HMF(bp)) {
+                       val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
+                                                               e1hov_tag) &
+                              FUNC_MF_CFG_E1HOV_TAG_MASK);
+                       if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+                               bp->e1hov = val;
+                               BNX2X_DEV_INFO("E1HOV for func %d is %d "
+                                              "(0x%04x)\n",
+                                              func, bp->e1hov, bp->e1hov);
+                       } else {
+                               BNX2X_ERROR("No valid E1HOV for func %d,"
+                                           "  aborting\n", func);
+                               rc = -EPERM;
                        }
                } else {
-                       /* in non-LSO too fragmented packet should always
-                          be linearized */
-                       to_copy = 1;
+                       if (BP_E1HVN(bp)) {
+                               BNX2X_ERROR("VN %d in single function mode,"
+                                           "  aborting\n", BP_E1HVN(bp));
+                               rc = -EPERM;
+                       }
                }
        }
 
-exit_lbl:
-       if (unlikely(to_copy))
-               DP(NETIF_MSG_TX_QUEUED,
-                  "Linearization IS REQUIRED for %s packet. "
-                  "num_frags %d  hlen %d  first_bd_sz %d\n",
-                  (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
-                  skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
-
-       return to_copy;
-}
-#endif
-
-/* called with netif_tx_lock
- * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
- * netif_wake_queue()
- */
-static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct bnx2x *bp = netdev_priv(dev);
-       struct bnx2x_fastpath *fp;
-       struct netdev_queue *txq;
-       struct sw_tx_bd *tx_buf;
-       struct eth_tx_start_bd *tx_start_bd;
-       struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
-       struct eth_tx_parse_bd *pbd = NULL;
-       u16 pkt_prod, bd_prod;
-       int nbd, fp_index;
-       dma_addr_t mapping;
-       u32 xmit_type = bnx2x_xmit_type(bp, skb);
-       int i;
-       u8 hlen = 0;
-       __le16 pkt_size = 0;
-       struct ethhdr *eth;
-       u8 mac_type = UNICAST_ADDRESS;
-
-#ifdef BNX2X_STOP_ON_ERROR
-       if (unlikely(bp->panic))
-               return NETDEV_TX_BUSY;
-#endif
-
-       fp_index = skb_get_queue_mapping(skb);
-       txq = netdev_get_tx_queue(dev, fp_index);
-
-       fp = &bp->fp[fp_index];
+       if (!BP_NOMCP(bp)) {
+               bnx2x_get_port_hwinfo(bp);
 
-       if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
-               fp->eth_q_stats.driver_xoff++;
-               netif_tx_stop_queue(txq);
-               BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
-               return NETDEV_TX_BUSY;
+               bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
+                             DRV_MSG_SEQ_NUMBER_MASK);
+               BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
        }
 
-       DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
-          "  gso type %x  xmit_type %x\n",
-          skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
-          ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
+       if (IS_E1HMF(bp)) {
+               val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
+               val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
+               if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
+                   (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
+                       bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
+                       bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
+                       bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
+                       bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
+                       bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
+                       bp->dev->dev_addr[5] = (u8)(val & 0xff);
+                       memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
+                              ETH_ALEN);
+                       memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
+                              ETH_ALEN);
+               }
 
-       eth = (struct ethhdr *)skb->data;
+               return rc;
+       }
 
-       /* set flag according to packet type (UNICAST_ADDRESS is default)*/
-       if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
-               if (is_broadcast_ether_addr(eth->h_dest))
-                       mac_type = BROADCAST_ADDRESS;
-               else
-                       mac_type = MULTICAST_ADDRESS;
-       }
-
-#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
-       /* First, check if we need to linearize the skb (due to FW
-          restrictions). No need to check fragmentation if page size > 8K
-          (there will be no violation to FW restrictions) */
-       if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
-               /* Statistics of linearization */
-               bp->lin_cnt++;
-               if (skb_linearize(skb) != 0) {
-                       DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
-                          "silently dropping this SKB\n");
-                       dev_kfree_skb_any(skb);
-                       return NETDEV_TX_OK;
-               }
+       if (BP_NOMCP(bp)) {
+               /* only supposed to happen on emulation/FPGA */
+               BNX2X_ERROR("warning: random MAC workaround active\n");
+               random_ether_addr(bp->dev->dev_addr);
+               memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
        }
-#endif
 
-       /*
-       Please read carefully. First we use one BD which we mark as start,
-       then we have a parsing info BD (used for TSO or xsum),
-       and only then we have the rest of the TSO BDs.
-       (don't forget to mark the last one as last,
-       and to unmap only AFTER you write to the BD ...)
-       And above all, all pdb sizes are in words - NOT DWORDS!
-       */
-
-       pkt_prod = fp->tx_pkt_prod++;
-       bd_prod = TX_BD(fp->tx_bd_prod);
-
-       /* get a tx_buf and first BD */
-       tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
-       tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
-
-       tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
-       tx_start_bd->general_data =  (mac_type <<
-                                       ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
-       /* header nbd */
-       tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
-
-       /* remember the first BD of the packet */
-       tx_buf->first_bd = fp->tx_bd_prod;
-       tx_buf->skb = skb;
-       tx_buf->flags = 0;
-
-       DP(NETIF_MSG_TX_QUEUED,
-          "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
-          pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
+       return rc;
+}
 
-#ifdef BCM_VLAN
-       if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
-           (bp->flags & HW_VLAN_TX_FLAG)) {
-               tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
-               tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
-       } else
-#endif
-               tx_start_bd->vlan = cpu_to_le16(pkt_prod);
+static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
+{
+       int cnt, i, block_end, rodi;
+       char vpd_data[BNX2X_VPD_LEN+1];
+       char str_id_reg[VENDOR_ID_LEN+1];
+       char str_id_cap[VENDOR_ID_LEN+1];
+       u8 len;
 
-       /* turn on parsing and get a BD */
-       bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
-       pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
+       cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+       memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
 
-       memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
+       if (cnt < BNX2X_VPD_LEN)
+               goto out_not_found;
 
-       if (xmit_type & XMIT_CSUM) {
-               hlen = (skb_network_header(skb) - skb->data) / 2;
+       i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+                            PCI_VPD_LRDT_RO_DATA);
+       if (i < 0)
+               goto out_not_found;
 
-               /* for now NS flag is not used in Linux */
-               pbd->global_data =
-                       (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
-                                ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
 
-               pbd->ip_hlen = (skb_transport_header(skb) -
-                               skb_network_header(skb)) / 2;
+       block_end = i + PCI_VPD_LRDT_TAG_SIZE +
+                   pci_vpd_lrdt_size(&vpd_data[i]);
 
-               hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
+       i += PCI_VPD_LRDT_TAG_SIZE;
 
-               pbd->total_hlen = cpu_to_le16(hlen);
-               hlen = hlen*2;
+       if (block_end > BNX2X_VPD_LEN)
+               goto out_not_found;
 
-               tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
+       rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+                                  PCI_VPD_RO_KEYWORD_MFR_ID);
+       if (rodi < 0)
+               goto out_not_found;
 
-               if (xmit_type & XMIT_CSUM_V4)
-                       tx_start_bd->bd_flags.as_bitfield |=
-                                               ETH_TX_BD_FLAGS_IP_CSUM;
-               else
-                       tx_start_bd->bd_flags.as_bitfield |=
-                                               ETH_TX_BD_FLAGS_IPV6;
+       len = pci_vpd_info_field_size(&vpd_data[rodi]);
 
-               if (xmit_type & XMIT_CSUM_TCP) {
-                       pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
+       if (len != VENDOR_ID_LEN)
+               goto out_not_found;
 
-               } else {
-                       s8 fix = SKB_CS_OFF(skb); /* signed! */
+       rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
 
-                       pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
+       /* vendor specific info */
+       snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+       snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
+       if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
+           !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
 
-                       DP(NETIF_MSG_TX_QUEUED,
-                          "hlen %d  fix %d  csum before fix %x\n",
-                          le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
+               rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+                                               PCI_VPD_RO_KEYWORD_VENDOR0);
+               if (rodi >= 0) {
+                       len = pci_vpd_info_field_size(&vpd_data[rodi]);
 
-                       /* HW bug: fixup the CSUM */
-                       pbd->tcp_pseudo_csum =
-                               bnx2x_csum_fix(skb_transport_header(skb),
-                                              SKB_CS(skb), fix);
+                       rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
 
-                       DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
-                          pbd->tcp_pseudo_csum);
+                       if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
+                               memcpy(bp->fw_ver, &vpd_data[rodi], len);
+                               bp->fw_ver[len] = ' ';
+                       }
                }
+               return;
        }
+out_not_found:
+       return;
+}
 
-       mapping = dma_map_single(&bp->pdev->dev, skb->data,
-                                skb_headlen(skb), DMA_TO_DEVICE);
-
-       tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
-       tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-       nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
-       tx_start_bd->nbd = cpu_to_le16(nbd);
-       tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
-       pkt_size = tx_start_bd->nbytes;
-
-       DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
-          "  nbytes %d  flags %x  vlan %x\n",
-          tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
-          le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
-          tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
+static int __devinit bnx2x_init_bp(struct bnx2x *bp)
+{
+       int func = BP_FUNC(bp);
+       int timer_interval;
+       int rc;
 
-       if (xmit_type & XMIT_GSO) {
+       /* Disable interrupt handling until HW is initialized */
+       atomic_set(&bp->intr_sem, 1);
+       smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
 
-               DP(NETIF_MSG_TX_QUEUED,
-                  "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
-                  skb->len, hlen, skb_headlen(skb),
-                  skb_shinfo(skb)->gso_size);
+       mutex_init(&bp->port.phy_mutex);
+       mutex_init(&bp->fw_mb_mutex);
+       spin_lock_init(&bp->stats_lock);
+#ifdef BCM_CNIC
+       mutex_init(&bp->cnic_mutex);
+#endif
 
-               tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
+       INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
+       INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
 
-               if (unlikely(skb_headlen(skb) > hlen))
-                       bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
-                                                hlen, bd_prod, ++nbd);
+       rc = bnx2x_get_hwinfo(bp);
 
-               pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
-               pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
-               pbd->tcp_flags = pbd_tcp_flags(skb);
+       bnx2x_read_fwinfo(bp);
+       /* need to reset chip if undi was active */
+       if (!BP_NOMCP(bp))
+               bnx2x_undi_unload(bp);
 
-               if (xmit_type & XMIT_GSO_V4) {
-                       pbd->ip_id = swab16(ip_hdr(skb)->id);
-                       pbd->tcp_pseudo_csum =
-                               swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
-                                                         ip_hdr(skb)->daddr,
-                                                         0, IPPROTO_TCP, 0));
+       if (CHIP_REV_IS_FPGA(bp))
+               dev_err(&bp->pdev->dev, "FPGA detected\n");
 
-               } else
-                       pbd->tcp_pseudo_csum =
-                               swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                       &ipv6_hdr(skb)->daddr,
-                                                       0, IPPROTO_TCP, 0));
+       if (BP_NOMCP(bp) && (func == 0))
+               dev_err(&bp->pdev->dev, "MCP disabled, "
+                                       "must load devices in order!\n");
 
-               pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
+       /* Set multi queue mode */
+       if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
+           ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
+               dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
+                                       "requested is not MSI-X\n");
+               multi_mode = ETH_RSS_MODE_DISABLED;
        }
-       tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
-
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
-               bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
-               tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
-               if (total_pkt_bd == NULL)
-                       total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
-
-               mapping = dma_map_page(&bp->pdev->dev, frag->page,
-                                      frag->page_offset,
-                                      frag->size, DMA_TO_DEVICE);
+       bp->multi_mode = multi_mode;
+       bp->int_mode = int_mode;
 
-               tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
-               tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-               tx_data_bd->nbytes = cpu_to_le16(frag->size);
-               le16_add_cpu(&pkt_size, frag->size);
+       bp->dev->features |= NETIF_F_GRO;
 
-               DP(NETIF_MSG_TX_QUEUED,
-                  "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
-                  i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
-                  le16_to_cpu(tx_data_bd->nbytes));
+       /* Set TPA flags */
+       if (disable_tpa) {
+               bp->flags &= ~TPA_ENABLE_FLAG;
+               bp->dev->features &= ~NETIF_F_LRO;
+       } else {
+               bp->flags |= TPA_ENABLE_FLAG;
+               bp->dev->features |= NETIF_F_LRO;
        }
+       bp->disable_tpa = disable_tpa;
 
-       DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
-
-       bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
-
-       /* now send a tx doorbell, counting the next BD
-        * if the packet contains or ends with it
-        */
-       if (TX_BD_POFF(bd_prod) < nbd)
-               nbd++;
-
-       if (total_pkt_bd != NULL)
-               total_pkt_bd->total_pkt_bytes = pkt_size;
-
-       if (pbd)
-               DP(NETIF_MSG_TX_QUEUED,
-                  "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
-                  "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
-                  pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
-                  pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
-                  pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
+       if (CHIP_IS_E1(bp))
+               bp->dropless_fc = 0;
+       else
+               bp->dropless_fc = dropless_fc;
 
-       DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
+       bp->mrrs = mrrs;
 
-       /*
-        * Make sure that the BD data is updated before updating the producer
-        * since FW might read the BD right after the producer is updated.
-        * This is only applicable for weak-ordered memory model archs such
-        * as IA-64. The following barrier is also mandatory since FW will
-        * assumes packets must have BDs.
-        */
-       wmb();
+       bp->tx_ring_size = MAX_TX_AVAIL;
+       bp->rx_ring_size = MAX_RX_AVAIL;
 
-       fp->tx_db.data.prod += nbd;
-       barrier();
-       DOORBELL(bp, fp->index, fp->tx_db.raw);
+       bp->rx_csum = 1;
 
-       mmiowb();
+       /* make sure that the numbers are in the right granularity */
+       bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
+       bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
 
-       fp->tx_bd_prod += nbd;
+       timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
+       bp->current_interval = (poll ? poll : timer_interval);
 
-       if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
-               netif_tx_stop_queue(txq);
+       init_timer(&bp->timer);
+       bp->timer.expires = jiffies + bp->current_interval;
+       bp->timer.data = (unsigned long) bp;
+       bp->timer.function = bnx2x_timer;
 
-               /* paired memory barrier is in bnx2x_tx_int(), we have to keep
-                * ordering of set_bit() in netif_tx_stop_queue() and read of
-                * fp->bd_tx_cons */
-               smp_mb();
+       return rc;
+}
 
-               fp->eth_q_stats.driver_xoff++;
-               if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
-                       netif_tx_wake_queue(txq);
-       }
-       fp->tx_pkt++;
 
-       return NETDEV_TX_OK;
-}
+/****************************************************************************
+* General service functions
+****************************************************************************/
 
 /* called with rtnl_lock */
 static int bnx2x_open(struct net_device *dev)
@@ -12591,7 +6849,7 @@ static int bnx2x_close(struct net_device *dev)
 }
 
 /* called with netif_tx_lock from dev_mcast.c */
-static void bnx2x_set_rx_mode(struct net_device *dev)
+void bnx2x_set_rx_mode(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
        u32 rx_mode = BNX2X_RX_MODE_NORMAL;
@@ -12711,25 +6969,6 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
        bnx2x_set_storm_rx_mode(bp);
 }
 
-/* called with rtnl_lock */
-static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
-{
-       struct sockaddr *addr = p;
-       struct bnx2x *bp = netdev_priv(dev);
-
-       if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
-               return -EINVAL;
-
-       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
-       if (netif_running(dev)) {
-               if (CHIP_IS_E1(bp))
-                       bnx2x_set_eth_mac_addr_e1(bp, 1);
-               else
-                       bnx2x_set_eth_mac_addr_e1h(bp, 1);
-       }
-
-       return 0;
-}
 
 /* called with rtnl_lock */
 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
@@ -12805,71 +7044,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
 }
 
-/* called with rtnl_lock */
-static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
-{
-       struct bnx2x *bp = netdev_priv(dev);
-       int rc = 0;
-
-       if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-               printk(KERN_ERR "Handling parity error recovery. Try again later\n");
-               return -EAGAIN;
-       }
-
-       if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
-           ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
-               return -EINVAL;
-
-       /* This does not race with packet allocation
-        * because the actual alloc size is
-        * only updated as part of load
-        */
-       dev->mtu = new_mtu;
-
-       if (netif_running(dev)) {
-               bnx2x_nic_unload(bp, UNLOAD_NORMAL);
-               rc = bnx2x_nic_load(bp, LOAD_NORMAL);
-       }
-
-       return rc;
-}
-
-static void bnx2x_tx_timeout(struct net_device *dev)
-{
-       struct bnx2x *bp = netdev_priv(dev);
-
-#ifdef BNX2X_STOP_ON_ERROR
-       if (!bp->panic)
-               bnx2x_panic();
-#endif
-       /* This allows the netif to be shutdown gracefully before resetting */
-       schedule_delayed_work(&bp->reset_task, 0);
-}
-
-#ifdef BCM_VLAN
-/* called with rtnl_lock */
-static void bnx2x_vlan_rx_register(struct net_device *dev,
-                                  struct vlan_group *vlgrp)
-{
-       struct bnx2x *bp = netdev_priv(dev);
-
-       bp->vlgrp = vlgrp;
-
-       /* Set flags according to the required capabilities */
-       bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
-
-       if (dev->features & NETIF_F_HW_VLAN_TX)
-               bp->flags |= HW_VLAN_TX_FLAG;
-
-       if (dev->features & NETIF_F_HW_VLAN_RX)
-               bp->flags |= HW_VLAN_RX_FLAG;
-
-       if (netif_running(dev))
-               bnx2x_set_client_config(bp);
-}
-
-#endif
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void poll_bnx2x(struct net_device *dev)
 {
@@ -13018,7 +7192,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
        dev->watchdog_timeo = TX_TIMEOUT;
 
        dev->netdev_ops = &bnx2x_netdev_ops;
-       dev->ethtool_ops = &bnx2x_ethtool_ops;
+       bnx2x_set_ethtool_ops(dev);
        dev->features |= NETIF_F_SG;
        dev->features |= NETIF_F_HW_CSUM;
        if (bp->flags & USING_DAC_FLAG)
@@ -13371,73 +7545,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
        pci_set_drvdata(pdev, NULL);
 }
 
-static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct bnx2x *bp;
-
-       if (!dev) {
-               dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
-               return -ENODEV;
-       }
-       bp = netdev_priv(dev);
-
-       rtnl_lock();
-
-       pci_save_state(pdev);
-
-       if (!netif_running(dev)) {
-               rtnl_unlock();
-               return 0;
-       }
-
-       netif_device_detach(dev);
-
-       bnx2x_nic_unload(bp, UNLOAD_CLOSE);
-
-       bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
-
-       rtnl_unlock();
-
-       return 0;
-}
-
-static int bnx2x_resume(struct pci_dev *pdev)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct bnx2x *bp;
-       int rc;
-
-       if (!dev) {
-               dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
-               return -ENODEV;
-       }
-       bp = netdev_priv(dev);
-
-       if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
-               printk(KERN_ERR "Handling parity error recovery. Try again later\n");
-               return -EAGAIN;
-       }
-
-       rtnl_lock();
-
-       pci_restore_state(pdev);
-
-       if (!netif_running(dev)) {
-               rtnl_unlock();
-               return 0;
-       }
-
-       bnx2x_set_power_state(bp, PCI_D0);
-       netif_device_attach(dev);
-
-       rc = bnx2x_nic_load(bp, LOAD_OPEN);
-
-       rtnl_unlock();
-
-       return rc;
-}
-
 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
 {
        int i;
@@ -13759,7 +7866,7 @@ static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
 /*
  * for commands that have no data
  */
-static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
+int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
 {
        struct cnic_ctl_info ctl = {0};
 
@@ -13827,7 +7934,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
        return rc;
 }
 
-static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
+void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
 {
        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
new file mode 100644 (file)
index 0000000..c747244
--- /dev/null
@@ -0,0 +1,1411 @@
+/* bnx2x_stats.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+ #include "bnx2x_cmn.h"
+ #include "bnx2x_stats.h"
+
+/* Statistics */
+
+/****************************************************************************
+* Macros
+****************************************************************************/
+
+/* sum[hi:lo] += add[hi:lo] */
+#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
+       do { \
+               s_lo += a_lo; \
+               s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
+       } while (0)
+
+/* difference = minuend - subtrahend */
+#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
+       do { \
+               if (m_lo < s_lo) { \
+                       /* underflow */ \
+                       d_hi = m_hi - s_hi; \
+                       if (d_hi > 0) { \
+                               /* we can 'loan' 1 */ \
+                               d_hi--; \
+                               d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
+                       } else { \
+                               /* m_hi <= s_hi */ \
+                               d_hi = 0; \
+                               d_lo = 0; \
+                       } \
+               } else { \
+                       /* m_lo >= s_lo */ \
+                       if (m_hi < s_hi) { \
+                               d_hi = 0; \
+                               d_lo = 0; \
+                       } else { \
+                               /* m_hi >= s_hi */ \
+                               d_hi = m_hi - s_hi; \
+                               d_lo = m_lo - s_lo; \
+                       } \
+               } \
+       } while (0)
+
+#define UPDATE_STAT64(s, t) \
+       do { \
+               DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
+                       diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
+               pstats->mac_stx[0].t##_hi = new->s##_hi; \
+               pstats->mac_stx[0].t##_lo = new->s##_lo; \
+               ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
+                      pstats->mac_stx[1].t##_lo, diff.lo); \
+       } while (0)
+
+#define UPDATE_STAT64_NIG(s, t) \
+       do { \
+               DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
+                       diff.lo, new->s##_lo, old->s##_lo); \
+               ADD_64(estats->t##_hi, diff.hi, \
+                      estats->t##_lo, diff.lo); \
+       } while (0)
+
+/* sum[hi:lo] += add */
+#define ADD_EXTEND_64(s_hi, s_lo, a) \
+       do { \
+               s_lo += a; \
+               s_hi += (s_lo < a) ? 1 : 0; \
+       } while (0)
+
+#define UPDATE_EXTEND_STAT(s) \
+       do { \
+               ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
+                             pstats->mac_stx[1].s##_lo, \
+                             new->s); \
+       } while (0)
+
+#define UPDATE_EXTEND_TSTAT(s, t) \
+       do { \
+               diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
+               old_tclient->s = tclient->s; \
+               ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+       } while (0)
+
+#define UPDATE_EXTEND_USTAT(s, t) \
+       do { \
+               diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+               old_uclient->s = uclient->s; \
+               ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+       } while (0)
+
+#define UPDATE_EXTEND_XSTAT(s, t) \
+       do { \
+               diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
+               old_xclient->s = xclient->s; \
+               ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+       } while (0)
+
+/* minuend -= subtrahend */
+#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
+       do { \
+               DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
+       } while (0)
+
+/* minuend[hi:lo] -= subtrahend */
+#define SUB_EXTEND_64(m_hi, m_lo, s) \
+       do { \
+               SUB_64(m_hi, 0, m_lo, s); \
+       } while (0)
+
+#define SUB_EXTEND_USTAT(s, t) \
+       do { \
+               diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+               SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+       } while (0)
+
+/*
+ * General service functions
+ */
+
+static inline long bnx2x_hilo(u32 *hiref)
+{
+       u32 lo = *(hiref + 1);
+#if (BITS_PER_LONG == 64)
+       u32 hi = *hiref;
+
+       return HILO_U64(hi, lo);
+#else
+       return lo;
+#endif
+}
+
+/*
+ * Init service functions
+ */
+
+
+static void bnx2x_storm_stats_post(struct bnx2x *bp)
+{
+       if (!bp->stats_pending) {
+               struct eth_query_ramrod_data ramrod_data = {0};
+               int i, rc;
+
+               spin_lock_bh(&bp->stats_lock);
+
+               ramrod_data.drv_counter = bp->stats_counter++;
+               ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
+               for_each_queue(bp, i)
+                       ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
+
+               rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
+                                  ((u32 *)&ramrod_data)[1],
+                                  ((u32 *)&ramrod_data)[0], 0);
+               if (rc == 0) {
+                       /* stats ramrod has it's own slot on the spq */
+                       bp->spq_left++;
+                       bp->stats_pending = 1;
+               }
+
+               spin_unlock_bh(&bp->stats_lock);
+       }
+}
+
+static void bnx2x_hw_stats_post(struct bnx2x *bp)
+{
+       struct dmae_command *dmae = &bp->stats_dmae;
+       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+       *stats_comp = DMAE_COMP_VAL;
+       if (CHIP_REV_IS_SLOW(bp))
+               return;
+
+       /* loader */
+       if (bp->executer_idx) {
+               int loader_idx = PMF_DMAE_C(bp);
+
+               memset(dmae, 0, sizeof(struct dmae_command));
+
+               dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+                               DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
+                               DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+                               DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+                               DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+                               (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
+                                              DMAE_CMD_PORT_0) |
+                               (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+               dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
+               dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
+               dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
+                                    sizeof(struct dmae_command) *
+                                    (loader_idx + 1)) >> 2;
+               dmae->dst_addr_hi = 0;
+               dmae->len = sizeof(struct dmae_command) >> 2;
+               if (CHIP_IS_E1(bp))
+                       dmae->len--;
+               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
+               dmae->comp_addr_hi = 0;
+               dmae->comp_val = 1;
+
+               *stats_comp = 0;
+               bnx2x_post_dmae(bp, dmae, loader_idx);
+
+       } else if (bp->func_stx) {
+               *stats_comp = 0;
+               bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+       }
+}
+
+static int bnx2x_stats_comp(struct bnx2x *bp)
+{
+       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+       int cnt = 10;
+
+       might_sleep();
+       while (*stats_comp != DMAE_COMP_VAL) {
+               if (!cnt) {
+                       BNX2X_ERR("timeout waiting for stats finished\n");
+                       break;
+               }
+               cnt--;
+               msleep(1);
+       }
+       return 1;
+}
+
+/*
+ * Statistics service functions
+ */
+
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+{
+       struct dmae_command *dmae;
+       u32 opcode;
+       int loader_idx = PMF_DMAE_C(bp);
+       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+       /* sanity */
+       if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
+               BNX2X_ERR("BUG!\n");
+               return;
+       }
+
+       bp->executer_idx = 0;
+
+       opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
+                 DMAE_CMD_C_ENABLE |
+                 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+                 DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+
+       dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+       dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
+       dmae->src_addr_lo = bp->port.port_stx >> 2;
+       dmae->src_addr_hi = 0;
+       dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+       dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+       dmae->len = DMAE_LEN32_RD_MAX;
+       dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+       dmae->comp_addr_hi = 0;
+       dmae->comp_val = 1;
+
+       dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+       dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
+       dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
+       dmae->src_addr_hi = 0;
+       dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
+                                  DMAE_LEN32_RD_MAX * 4);
+       dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
+                                  DMAE_LEN32_RD_MAX * 4);
+       dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
+       dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+       dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+       dmae->comp_val = DMAE_COMP_VAL;
+
+       *stats_comp = 0;
+       bnx2x_hw_stats_post(bp);
+       bnx2x_stats_comp(bp);
+}
+
+static void bnx2x_port_stats_init(struct bnx2x *bp)
+{
+       struct dmae_command *dmae;
+       int port = BP_PORT(bp);
+       int vn = BP_E1HVN(bp);
+       u32 opcode;
+       int loader_idx = PMF_DMAE_C(bp);
+       u32 mac_addr;
+       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+       /* sanity */
+       if (!bp->link_vars.link_up || !bp->port.pmf) {
+               BNX2X_ERR("BUG!\n");
+               return;
+       }
+
+       bp->executer_idx = 0;
+
+       /* MCP */
+       opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
+                 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+                 DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+                 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+                 (vn << DMAE_CMD_E1HVN_SHIFT));
+
+       if (bp->port.port_stx) {
+
+               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+               dmae->opcode = opcode;
+               dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+               dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+               dmae->dst_addr_lo = bp->port.port_stx >> 2;
+               dmae->dst_addr_hi = 0;
+               dmae->len = sizeof(struct host_port_stats) >> 2;
+               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+               dmae->comp_addr_hi = 0;
+               dmae->comp_val = 1;
+       }
+
+       if (bp->func_stx) {
+
+               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+               dmae->opcode = opcode;
+               dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+               dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+               dmae->dst_addr_lo = bp->func_stx >> 2;
+               dmae->dst_addr_hi = 0;
+               dmae->len = sizeof(struct host_func_stats) >> 2;
+               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+               dmae->comp_addr_hi = 0;
+               dmae->comp_val = 1;
+       }
+
+       /* MAC */
+       opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
+                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
+                 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+                 DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+                 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+                 (vn << DMAE_CMD_E1HVN_SHIFT));
+
+       if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
+
+               mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
+                                  NIG_REG_INGRESS_BMAC0_MEM);
+
+               /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
+                  BIGMAC_REGISTER_TX_STAT_GTBYT */
+               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+               dmae->opcode = opcode;
+               dmae->src_addr_lo = (mac_addr +
+                                    BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+               dmae->src_addr_hi = 0;
+               dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+               dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+               dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
+                            BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+               dmae->comp_addr_hi = 0;
+               dmae->comp_val = 1;
+
+               /* BIGMAC_REGISTER_RX_STAT_GR64 ..
+                  BIGMAC_REGISTER_RX_STAT_GRIPJ */
+               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+               dmae->opcode = opcode;
+               dmae->src_addr_lo = (mac_addr +
+                                    BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+               dmae->src_addr_hi = 0;
+               dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+                               offsetof(struct bmac_stats, rx_stat_gr64_lo));
+               dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+                               offsetof(struct bmac_stats, rx_stat_gr64_lo));
+               dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
+                            BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+               dmae->comp_addr_hi = 0;
+               dmae->comp_val = 1;
+
+       } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
+
+               mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
+
+               /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
+               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+               dmae->opcode = opcode;
+               dmae->src_addr_lo = (mac_addr +
+                                    EMAC_REG_EMAC_RX_STAT_AC) >> 2;
+               dmae->src_addr_hi = 0;
+               dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+               dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+               dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
+               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+               dmae->comp_addr_hi = 0;
+               dmae->comp_val = 1;
+
+               /* EMAC_REG_EMAC_RX_STAT_AC_28 */
+               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+               dmae->opcode = opcode;
+               dmae->src_addr_lo = (mac_addr +
+                                    EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
+               dmae->src_addr_hi = 0;
+               dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+                    offsetof(struct emac_stats, rx_stat_falsecarriererrors));
+               dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+                    offsetof(struct emac_stats, rx_stat_falsecarriererrors));
+               dmae->len = 1;
+               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+               dmae->comp_addr_hi = 0;
+               dmae->comp_val = 1;
+
+               /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
+               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+               dmae->opcode = opcode;
+               dmae->src_addr_lo = (mac_addr +
+                                    EMAC_REG_EMAC_TX_STAT_AC) >> 2;
+               dmae->src_addr_hi = 0;
+               dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+                       offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
+               dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+                       offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
+               dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
+               dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+               dmae->comp_addr_hi = 0;
+               dmae->comp_val = 1;
+       }
+
+       /* NIG */
+       dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+       dmae->opcode = opcode;
+       dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
+                                   NIG_REG_STAT0_BRB_DISCARD) >> 2;
+       dmae->src_addr_hi = 0;
+       dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
+       dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
+       dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
+       dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+       dmae->comp_addr_hi = 0;
+       dmae->comp_val = 1;
+
+       dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+       dmae->opcode = opcode;
+       dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
+                                   NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
+       dmae->src_addr_hi = 0;
+       dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+                       offsetof(struct nig_stats, egress_mac_pkt0_lo));
+       dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+                       offsetof(struct nig_stats, egress_mac_pkt0_lo));
+       dmae->len = (2*sizeof(u32)) >> 2;
+       dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+       dmae->comp_addr_hi = 0;
+       dmae->comp_val = 1;
+
+       dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+       dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
+                       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+                       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+                       DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+                       DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+                       (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+                       (vn << DMAE_CMD_E1HVN_SHIFT));
+       dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
+                                   NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
+       dmae->src_addr_hi = 0;
+       dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+                       offsetof(struct nig_stats, egress_mac_pkt1_lo));
+       dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+                       offsetof(struct nig_stats, egress_mac_pkt1_lo));
+       dmae->len = (2*sizeof(u32)) >> 2;
+       dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+       dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+       dmae->comp_val = DMAE_COMP_VAL;
+
+       *stats_comp = 0;
+}
+
+static void bnx2x_func_stats_init(struct bnx2x *bp)
+{
+       struct dmae_command *dmae = &bp->stats_dmae;
+       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+       /* sanity */
+       if (!bp->func_stx) {
+               BNX2X_ERR("BUG!\n");
+               return;
+       }
+
+       bp->executer_idx = 0;
+       memset(dmae, 0, sizeof(struct dmae_command));
+
+       dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+                       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+                       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+                       DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+                       DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+                       (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+                       (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+       dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+       dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+       dmae->dst_addr_lo = bp->func_stx >> 2;
+       dmae->dst_addr_hi = 0;
+       dmae->len = sizeof(struct host_func_stats) >> 2;
+       dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+       dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+       dmae->comp_val = DMAE_COMP_VAL;
+
+       *stats_comp = 0;
+}
+
+static void bnx2x_stats_start(struct bnx2x *bp)
+{
+       if (bp->port.pmf)
+               bnx2x_port_stats_init(bp);
+
+       else if (bp->func_stx)
+               bnx2x_func_stats_init(bp);
+
+       bnx2x_hw_stats_post(bp);
+       bnx2x_storm_stats_post(bp);
+}
+
+static void bnx2x_stats_pmf_start(struct bnx2x *bp)
+{
+       bnx2x_stats_comp(bp);
+       bnx2x_stats_pmf_update(bp);
+       bnx2x_stats_start(bp);
+}
+
+static void bnx2x_stats_restart(struct bnx2x *bp)
+{
+       bnx2x_stats_comp(bp);
+       bnx2x_stats_start(bp);
+}
+
+static void bnx2x_bmac_stats_update(struct bnx2x *bp)
+{
+       struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
+       struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+       struct bnx2x_eth_stats *estats = &bp->eth_stats;
+       struct {
+               u32 lo;
+               u32 hi;
+       } diff;
+
+       UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+       UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+       UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+       UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+       UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+       UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+       UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+       UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+       UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
+       UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+       UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+       UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+       UPDATE_STAT64(tx_stat_gt127,
+                               tx_stat_etherstatspkts65octetsto127octets);
+       UPDATE_STAT64(tx_stat_gt255,
+                               tx_stat_etherstatspkts128octetsto255octets);
+       UPDATE_STAT64(tx_stat_gt511,
+                               tx_stat_etherstatspkts256octetsto511octets);
+       UPDATE_STAT64(tx_stat_gt1023,
+                               tx_stat_etherstatspkts512octetsto1023octets);
+       UPDATE_STAT64(tx_stat_gt1518,
+                               tx_stat_etherstatspkts1024octetsto1522octets);
+       UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
+       UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
+       UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
+       UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
+       UPDATE_STAT64(tx_stat_gterr,
+                               tx_stat_dot3statsinternalmactransmiterrors);
+       UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
+
+       estats->pause_frames_received_hi =
+                               pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
+       estats->pause_frames_received_lo =
+                               pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
+
+       estats->pause_frames_sent_hi =
+                               pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+       estats->pause_frames_sent_lo =
+                               pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+}
+
+static void bnx2x_emac_stats_update(struct bnx2x *bp)
+{
+       struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
+       struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+       struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+       UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
+       UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
+       UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
+       UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
+       UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
+       UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
+       UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
+       UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
+       UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
+       UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
+       UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
+       UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
+       UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
+       UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
+       UPDATE_EXTEND_STAT(tx_stat_outxonsent);
+       UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
+       UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
+       UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
+       UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
+       UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
+       UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
+       UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
+       UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
+       UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
+       UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
+       UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
+       UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
+       UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
+       UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
+       UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
+       UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
+
+       estats->pause_frames_received_hi =
+                       pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
+       estats->pause_frames_received_lo =
+                       pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
+       ADD_64(estats->pause_frames_received_hi,
+              pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
+              estats->pause_frames_received_lo,
+              pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
+
+       estats->pause_frames_sent_hi =
+                       pstats->mac_stx[1].tx_stat_outxonsent_hi;
+       estats->pause_frames_sent_lo =
+                       pstats->mac_stx[1].tx_stat_outxonsent_lo;
+       ADD_64(estats->pause_frames_sent_hi,
+              pstats->mac_stx[1].tx_stat_outxoffsent_hi,
+              estats->pause_frames_sent_lo,
+              pstats->mac_stx[1].tx_stat_outxoffsent_lo);
+}
+
+static int bnx2x_hw_stats_update(struct bnx2x *bp)
+{
+       struct nig_stats *new = bnx2x_sp(bp, nig_stats);
+       struct nig_stats *old = &(bp->port.old_nig_stats);
+       struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+       struct bnx2x_eth_stats *estats = &bp->eth_stats;
+       struct {
+               u32 lo;
+               u32 hi;
+       } diff;
+
+       if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
+               bnx2x_bmac_stats_update(bp);
+
+       else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
+               bnx2x_emac_stats_update(bp);
+
+       else { /* unreached */
+               BNX2X_ERR("stats updated by DMAE but no MAC active\n");
+               return -1;
+       }
+
+       ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
+                     new->brb_discard - old->brb_discard);
+       ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
+                     new->brb_truncate - old->brb_truncate);
+
+       UPDATE_STAT64_NIG(egress_mac_pkt0,
+                                       etherstatspkts1024octetsto1522octets);
+       UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
+
+       memcpy(old, new, sizeof(struct nig_stats));
+
+       memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
+              sizeof(struct mac_stx));
+       estats->brb_drop_hi = pstats->brb_drop_hi;
+       estats->brb_drop_lo = pstats->brb_drop_lo;
+
+       pstats->host_port_stats_start = ++pstats->host_port_stats_end;
+
+       if (!BP_NOMCP(bp)) {
+               u32 nig_timer_max =
+                       SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
+               if (nig_timer_max != estats->nig_timer_max) {
+                       estats->nig_timer_max = nig_timer_max;
+                       BNX2X_ERR("NIG timer max (%u)\n",
+                                 estats->nig_timer_max);
+               }
+       }
+
+       return 0;
+}
+
+static int bnx2x_storm_stats_update(struct bnx2x *bp)
+{
+       struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
+       struct tstorm_per_port_stats *tport =
+                                       &stats->tstorm_common.port_statistics;
+       struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
+       struct bnx2x_eth_stats *estats = &bp->eth_stats;
+       int i;
+       u16 cur_stats_counter;
+
+       /* Make sure we use the value of the counter
+        * used for sending the last stats ramrod.
+        */
+       spin_lock_bh(&bp->stats_lock);
+       cur_stats_counter = bp->stats_counter - 1;
+       spin_unlock_bh(&bp->stats_lock);
+
+       memcpy(&(fstats->total_bytes_received_hi),
+              &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
+              sizeof(struct host_func_stats) - 2*sizeof(u32));
+       estats->error_bytes_received_hi = 0;
+       estats->error_bytes_received_lo = 0;
+       estats->etherstatsoverrsizepkts_hi = 0;
+       estats->etherstatsoverrsizepkts_lo = 0;
+       estats->no_buff_discard_hi = 0;
+       estats->no_buff_discard_lo = 0;
+
+       for_each_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
+               int cl_id = fp->cl_id;
+               struct tstorm_per_client_stats *tclient =
+                               &stats->tstorm_common.client_statistics[cl_id];
+               struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
+               struct ustorm_per_client_stats *uclient =
+                               &stats->ustorm_common.client_statistics[cl_id];
+               struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
+               struct xstorm_per_client_stats *xclient =
+                               &stats->xstorm_common.client_statistics[cl_id];
+               struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
+               struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+               u32 diff;
+
+               /* are storm stats valid? */
+               if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) {
+                       DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
+                          "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
+                          i, xclient->stats_counter, cur_stats_counter + 1);
+                       return -1;
+               }
+               if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) {
+                       DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
+                          "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
+                          i, tclient->stats_counter, cur_stats_counter + 1);
+                       return -2;
+               }
+               if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) {
+                       DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
+                          "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
+                          i, uclient->stats_counter, cur_stats_counter + 1);
+                       return -4;
+               }
+
+               qstats->total_bytes_received_hi =
+                       le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
+               qstats->total_bytes_received_lo =
+                       le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
+
+               ADD_64(qstats->total_bytes_received_hi,
+                      le32_to_cpu(tclient->rcv_multicast_bytes.hi),
+                      qstats->total_bytes_received_lo,
+                      le32_to_cpu(tclient->rcv_multicast_bytes.lo));
+
+               ADD_64(qstats->total_bytes_received_hi,
+                      le32_to_cpu(tclient->rcv_unicast_bytes.hi),
+                      qstats->total_bytes_received_lo,
+                      le32_to_cpu(tclient->rcv_unicast_bytes.lo));
+
+               SUB_64(qstats->total_bytes_received_hi,
+                      le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
+                      qstats->total_bytes_received_lo,
+                      le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
+
+               SUB_64(qstats->total_bytes_received_hi,
+                      le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
+                      qstats->total_bytes_received_lo,
+                      le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
+
+               SUB_64(qstats->total_bytes_received_hi,
+                      le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
+                      qstats->total_bytes_received_lo,
+                      le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
+
+               qstats->valid_bytes_received_hi =
+                                       qstats->total_bytes_received_hi;
+               qstats->valid_bytes_received_lo =
+                                       qstats->total_bytes_received_lo;
+
+               qstats->error_bytes_received_hi =
+                               le32_to_cpu(tclient->rcv_error_bytes.hi);
+               qstats->error_bytes_received_lo =
+                               le32_to_cpu(tclient->rcv_error_bytes.lo);
+
+               ADD_64(qstats->total_bytes_received_hi,
+                      qstats->error_bytes_received_hi,
+                      qstats->total_bytes_received_lo,
+                      qstats->error_bytes_received_lo);
+
+               UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
+                                       total_unicast_packets_received);
+               UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
+                                       total_multicast_packets_received);
+               UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
+                                       total_broadcast_packets_received);
+               UPDATE_EXTEND_TSTAT(packets_too_big_discard,
+                                       etherstatsoverrsizepkts);
+               UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
+
+               SUB_EXTEND_USTAT(ucast_no_buff_pkts,
+                                       total_unicast_packets_received);
+               SUB_EXTEND_USTAT(mcast_no_buff_pkts,
+                                       total_multicast_packets_received);
+               SUB_EXTEND_USTAT(bcast_no_buff_pkts,
+                                       total_broadcast_packets_received);
+               UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
+               UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
+               UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
+
+               qstats->total_bytes_transmitted_hi =
+                               le32_to_cpu(xclient->unicast_bytes_sent.hi);
+               qstats->total_bytes_transmitted_lo =
+                               le32_to_cpu(xclient->unicast_bytes_sent.lo);
+
+               ADD_64(qstats->total_bytes_transmitted_hi,
+                      le32_to_cpu(xclient->multicast_bytes_sent.hi),
+                      qstats->total_bytes_transmitted_lo,
+                      le32_to_cpu(xclient->multicast_bytes_sent.lo));
+
+               ADD_64(qstats->total_bytes_transmitted_hi,
+                      le32_to_cpu(xclient->broadcast_bytes_sent.hi),
+                      qstats->total_bytes_transmitted_lo,
+                      le32_to_cpu(xclient->broadcast_bytes_sent.lo));
+
+               UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
+                                       total_unicast_packets_transmitted);
+               UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
+                                       total_multicast_packets_transmitted);
+               UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
+                                       total_broadcast_packets_transmitted);
+
+               old_tclient->checksum_discard = tclient->checksum_discard;
+               old_tclient->ttl0_discard = tclient->ttl0_discard;
+
+               ADD_64(fstats->total_bytes_received_hi,
+                      qstats->total_bytes_received_hi,
+                      fstats->total_bytes_received_lo,
+                      qstats->total_bytes_received_lo);
+               ADD_64(fstats->total_bytes_transmitted_hi,
+                      qstats->total_bytes_transmitted_hi,
+                      fstats->total_bytes_transmitted_lo,
+                      qstats->total_bytes_transmitted_lo);
+               ADD_64(fstats->total_unicast_packets_received_hi,
+                      qstats->total_unicast_packets_received_hi,
+                      fstats->total_unicast_packets_received_lo,
+                      qstats->total_unicast_packets_received_lo);
+               ADD_64(fstats->total_multicast_packets_received_hi,
+                      qstats->total_multicast_packets_received_hi,
+                      fstats->total_multicast_packets_received_lo,
+                      qstats->total_multicast_packets_received_lo);
+               ADD_64(fstats->total_broadcast_packets_received_hi,
+                      qstats->total_broadcast_packets_received_hi,
+                      fstats->total_broadcast_packets_received_lo,
+                      qstats->total_broadcast_packets_received_lo);
+               ADD_64(fstats->total_unicast_packets_transmitted_hi,
+                      qstats->total_unicast_packets_transmitted_hi,
+                      fstats->total_unicast_packets_transmitted_lo,
+                      qstats->total_unicast_packets_transmitted_lo);
+               ADD_64(fstats->total_multicast_packets_transmitted_hi,
+                      qstats->total_multicast_packets_transmitted_hi,
+                      fstats->total_multicast_packets_transmitted_lo,
+                      qstats->total_multicast_packets_transmitted_lo);
+               ADD_64(fstats->total_broadcast_packets_transmitted_hi,
+                      qstats->total_broadcast_packets_transmitted_hi,
+                      fstats->total_broadcast_packets_transmitted_lo,
+                      qstats->total_broadcast_packets_transmitted_lo);
+               ADD_64(fstats->valid_bytes_received_hi,
+                      qstats->valid_bytes_received_hi,
+                      fstats->valid_bytes_received_lo,
+                      qstats->valid_bytes_received_lo);
+
+               ADD_64(estats->error_bytes_received_hi,
+                      qstats->error_bytes_received_hi,
+                      estats->error_bytes_received_lo,
+                      qstats->error_bytes_received_lo);
+               ADD_64(estats->etherstatsoverrsizepkts_hi,
+                      qstats->etherstatsoverrsizepkts_hi,
+                      estats->etherstatsoverrsizepkts_lo,
+                      qstats->etherstatsoverrsizepkts_lo);
+               ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
+                      estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
+       }
+
+       ADD_64(fstats->total_bytes_received_hi,
+              estats->rx_stat_ifhcinbadoctets_hi,
+              fstats->total_bytes_received_lo,
+              estats->rx_stat_ifhcinbadoctets_lo);
+
+       memcpy(estats, &(fstats->total_bytes_received_hi),
+              sizeof(struct host_func_stats) - 2*sizeof(u32));
+
+       ADD_64(estats->etherstatsoverrsizepkts_hi,
+              estats->rx_stat_dot3statsframestoolong_hi,
+              estats->etherstatsoverrsizepkts_lo,
+              estats->rx_stat_dot3statsframestoolong_lo);
+       ADD_64(estats->error_bytes_received_hi,
+              estats->rx_stat_ifhcinbadoctets_hi,
+              estats->error_bytes_received_lo,
+              estats->rx_stat_ifhcinbadoctets_lo);
+
+       if (bp->port.pmf) {
+               estats->mac_filter_discard =
+                               le32_to_cpu(tport->mac_filter_discard);
+               estats->xxoverflow_discard =
+                               le32_to_cpu(tport->xxoverflow_discard);
+               estats->brb_truncate_discard =
+                               le32_to_cpu(tport->brb_truncate_discard);
+               estats->mac_discard = le32_to_cpu(tport->mac_discard);
+       }
+
+       fstats->host_func_stats_start = ++fstats->host_func_stats_end;
+
+       bp->stats_pending = 0;
+
+       return 0;
+}
+
+static void bnx2x_net_stats_update(struct bnx2x *bp)
+{
+       struct bnx2x_eth_stats *estats = &bp->eth_stats;
+       struct net_device_stats *nstats = &bp->dev->stats;
+       int i;
+
+       nstats->rx_packets =
+               bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
+               bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
+               bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
+
+       nstats->tx_packets =
+               bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
+               bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
+               bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
+
+       nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
+
+       nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
+
+       nstats->rx_dropped = estats->mac_discard;
+       for_each_queue(bp, i)
+               nstats->rx_dropped +=
+                       le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
+
+       nstats->tx_dropped = 0;
+
+       nstats->multicast =
+               bnx2x_hilo(&estats->total_multicast_packets_received_hi);
+
+       nstats->collisions =
+               bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
+
+       nstats->rx_length_errors =
+               bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
+               bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
+       nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
+                                bnx2x_hilo(&estats->brb_truncate_hi);
+       nstats->rx_crc_errors =
+               bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
+       nstats->rx_frame_errors =
+               bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
+       nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
+       nstats->rx_missed_errors = estats->xxoverflow_discard;
+
+       nstats->rx_errors = nstats->rx_length_errors +
+                           nstats->rx_over_errors +
+                           nstats->rx_crc_errors +
+                           nstats->rx_frame_errors +
+                           nstats->rx_fifo_errors +
+                           nstats->rx_missed_errors;
+
+       nstats->tx_aborted_errors =
+               bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
+               bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
+       nstats->tx_carrier_errors =
+               bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
+       nstats->tx_fifo_errors = 0;
+       nstats->tx_heartbeat_errors = 0;
+       nstats->tx_window_errors = 0;
+
+       nstats->tx_errors = nstats->tx_aborted_errors +
+                           nstats->tx_carrier_errors +
+           bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
+}
+
+static void bnx2x_drv_stats_update(struct bnx2x *bp)
+{
+       struct bnx2x_eth_stats *estats = &bp->eth_stats;
+       int i;
+
+       estats->driver_xoff = 0;
+       estats->rx_err_discard_pkt = 0;
+       estats->rx_skb_alloc_failed = 0;
+       estats->hw_csum_err = 0;
+       for_each_queue(bp, i) {
+               struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
+
+               estats->driver_xoff += qstats->driver_xoff;
+               estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
+               estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
+               estats->hw_csum_err += qstats->hw_csum_err;
+       }
+}
+
+static void bnx2x_stats_update(struct bnx2x *bp)
+{
+       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+       if (*stats_comp != DMAE_COMP_VAL)
+               return;
+
+       if (bp->port.pmf)
+               bnx2x_hw_stats_update(bp);
+
+       if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
+               BNX2X_ERR("storm stats were not updated for 3 times\n");
+               bnx2x_panic();
+               return;
+       }
+
+       bnx2x_net_stats_update(bp);
+       bnx2x_drv_stats_update(bp);
+
+       if (netif_msg_timer(bp)) {
+               struct bnx2x_eth_stats *estats = &bp->eth_stats;
+               int i;
+
+               printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
+                      bp->dev->name,
+                      estats->brb_drop_lo, estats->brb_truncate_lo);
+
+               for_each_queue(bp, i) {
+                       struct bnx2x_fastpath *fp = &bp->fp[i];
+                       struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+
+                       printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
+                                         "  rx pkt(%lu)  rx calls(%lu %lu)\n",
+                              fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
+                              fp->rx_comp_cons),
+                              le16_to_cpu(*fp->rx_cons_sb),
+                              bnx2x_hilo(&qstats->
+                                         total_unicast_packets_received_hi),
+                              fp->rx_calls, fp->rx_pkt);
+               }
+
+               for_each_queue(bp, i) {
+                       struct bnx2x_fastpath *fp = &bp->fp[i];
+                       struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+                       struct netdev_queue *txq =
+                               netdev_get_tx_queue(bp->dev, i);
+
+                       printk(KERN_DEBUG "%s: tx avail(%4u)  *tx_cons_sb(%u)"
+                                         "  tx pkt(%lu) tx calls (%lu)"
+                                         "  %s (Xoff events %u)\n",
+                              fp->name, bnx2x_tx_avail(fp),
+                              le16_to_cpu(*fp->tx_cons_sb),
+                              bnx2x_hilo(&qstats->
+                                         total_unicast_packets_transmitted_hi),
+                              fp->tx_pkt,
+                              (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
+                              qstats->driver_xoff);
+               }
+       }
+
+       bnx2x_hw_stats_post(bp);
+       bnx2x_storm_stats_post(bp);
+}
+
+static void bnx2x_port_stats_stop(struct bnx2x *bp)
+{
+       struct dmae_command *dmae;
+       u32 opcode;
+       int loader_idx = PMF_DMAE_C(bp);
+       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+       bp->executer_idx = 0;
+
+       opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+                 DMAE_CMD_C_ENABLE |
+                 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+                 DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+
+       if (bp->port.port_stx) {
+
+               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+               if (bp->func_stx)
+                       dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
+               else
+                       dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
+               dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+               dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+               dmae->dst_addr_lo = bp->port.port_stx >> 2;
+               dmae->dst_addr_hi = 0;
+               dmae->len = sizeof(struct host_port_stats) >> 2;
+               if (bp->func_stx) {
+                       dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+                       dmae->comp_addr_hi = 0;
+                       dmae->comp_val = 1;
+               } else {
+                       dmae->comp_addr_lo =
+                               U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+                       dmae->comp_addr_hi =
+                               U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+                       dmae->comp_val = DMAE_COMP_VAL;
+
+                       *stats_comp = 0;
+               }
+       }
+
+       if (bp->func_stx) {
+
+               dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+               dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
+               dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+               dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+               dmae->dst_addr_lo = bp->func_stx >> 2;
+               dmae->dst_addr_hi = 0;
+               dmae->len = sizeof(struct host_func_stats) >> 2;
+               dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+               dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+               dmae->comp_val = DMAE_COMP_VAL;
+
+               *stats_comp = 0;
+       }
+}
+
+static void bnx2x_stats_stop(struct bnx2x *bp)
+{
+       int update = 0;
+
+       bnx2x_stats_comp(bp);
+
+       if (bp->port.pmf)
+               update = (bnx2x_hw_stats_update(bp) == 0);
+
+       update |= (bnx2x_storm_stats_update(bp) == 0);
+
+       if (update) {
+               bnx2x_net_stats_update(bp);
+
+               if (bp->port.pmf)
+                       bnx2x_port_stats_stop(bp);
+
+               bnx2x_hw_stats_post(bp);
+               bnx2x_stats_comp(bp);
+       }
+}
+
+static void bnx2x_stats_do_nothing(struct bnx2x *bp)
+{
+}
+
+static const struct {
+       void (*action)(struct bnx2x *bp);
+       enum bnx2x_stats_state next_state;
+} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
+/* state       event   */
+{
+/* DISABLED    PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
+/*             LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
+/*             UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
+/*             STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
+},
+{
+/* ENABLED     PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
+/*             LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
+/*             UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
+/*             STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
+}
+};
+
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
+{
+       enum bnx2x_stats_state state;
+
+       if (unlikely(bp->panic))
+               return;
+
+       /* Protect a state change flow */
+       spin_lock_bh(&bp->stats_lock);
+       state = bp->stats_state;
+       bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+       spin_unlock_bh(&bp->stats_lock);
+
+       bnx2x_stats_stm[state][event].action(bp);
+
+       if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
+               DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
+                  state, event, bp->stats_state);
+}
+
+static void bnx2x_port_stats_base_init(struct bnx2x *bp)
+{
+       struct dmae_command *dmae;
+       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+       /* sanity */
+       if (!bp->port.pmf || !bp->port.port_stx) {
+               BNX2X_ERR("BUG!\n");
+               return;
+       }
+
+       bp->executer_idx = 0;
+
+       dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+       dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
+                       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+                       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+                       DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+                       DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+                       (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+                       (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+       dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+       dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+       dmae->dst_addr_lo = bp->port.port_stx >> 2;
+       dmae->dst_addr_hi = 0;
+       dmae->len = sizeof(struct host_port_stats) >> 2;
+       dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+       dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+       dmae->comp_val = DMAE_COMP_VAL;
+
+       *stats_comp = 0;
+       bnx2x_hw_stats_post(bp);
+       bnx2x_stats_comp(bp);
+}
+
+static void bnx2x_func_stats_base_init(struct bnx2x *bp)
+{
+       int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
+       int port = BP_PORT(bp);
+       int func;
+       u32 func_stx;
+
+       /* sanity */
+       if (!bp->port.pmf || !bp->func_stx) {
+               BNX2X_ERR("BUG!\n");
+               return;
+       }
+
+       /* save our func_stx */
+       func_stx = bp->func_stx;
+
+       for (vn = VN_0; vn < vn_max; vn++) {
+               func = 2*vn + port;
+
+               bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
+               bnx2x_func_stats_init(bp);
+               bnx2x_hw_stats_post(bp);
+               bnx2x_stats_comp(bp);
+       }
+
+       /* restore our func_stx */
+       bp->func_stx = func_stx;
+}
+
+static void bnx2x_func_stats_base_update(struct bnx2x *bp)
+{
+       struct dmae_command *dmae = &bp->stats_dmae;
+       u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+       /* sanity */
+       if (!bp->func_stx) {
+               BNX2X_ERR("BUG!\n");
+               return;
+       }
+
+       bp->executer_idx = 0;
+       memset(dmae, 0, sizeof(struct dmae_command));
+
+       dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
+                       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
+                       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
+#ifdef __BIG_ENDIAN
+                       DMAE_CMD_ENDIANITY_B_DW_SWAP |
+#else
+                       DMAE_CMD_ENDIANITY_DW_SWAP |
+#endif
+                       (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
+                       (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
+       dmae->src_addr_lo = bp->func_stx >> 2;
+       dmae->src_addr_hi = 0;
+       dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
+       dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
+       dmae->len = sizeof(struct host_func_stats) >> 2;
+       dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+       dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+       dmae->comp_val = DMAE_COMP_VAL;
+
+       *stats_comp = 0;
+       bnx2x_hw_stats_post(bp);
+       bnx2x_stats_comp(bp);
+}
+
+void bnx2x_stats_init(struct bnx2x *bp)
+{
+       int port = BP_PORT(bp);
+       int func = BP_FUNC(bp);
+       int i;
+
+       bp->stats_pending = 0;
+       bp->executer_idx = 0;
+       bp->stats_counter = 0;
+
+       /* port and func stats for management */
+       if (!BP_NOMCP(bp)) {
+               bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
+               bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
+
+       } else {
+               bp->port.port_stx = 0;
+               bp->func_stx = 0;
+       }
+       DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
+          bp->port.port_stx, bp->func_stx);
+
+       /* port stats */
+       memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
+       bp->port.old_nig_stats.brb_discard =
+                       REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
+       bp->port.old_nig_stats.brb_truncate =
+                       REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
+       REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
+                   &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
+       REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
+                   &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+
+       /* function stats */
+       for_each_queue(bp, i) {
+               struct bnx2x_fastpath *fp = &bp->fp[i];
+
+               memset(&fp->old_tclient, 0,
+                      sizeof(struct tstorm_per_client_stats));
+               memset(&fp->old_uclient, 0,
+                      sizeof(struct ustorm_per_client_stats));
+               memset(&fp->old_xclient, 0,
+                      sizeof(struct xstorm_per_client_stats));
+               memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
+       }
+
+       memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
+       memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
+
+       bp->stats_state = STATS_STATE_DISABLED;
+
+       if (bp->port.pmf) {
+               if (bp->port.port_stx)
+                       bnx2x_port_stats_base_init(bp);
+
+               if (bp->func_stx)
+                       bnx2x_func_stats_base_init(bp);
+
+       } else if (bp->func_stx)
+               bnx2x_func_stats_base_update(bp);
+}
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
new file mode 100644 (file)
index 0000000..38a4e90
--- /dev/null
@@ -0,0 +1,239 @@
+/* bnx2x_stats.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ */
+
+#ifndef BNX2X_STATS_H
+#define BNX2X_STATS_H
+
+#include <linux/types.h>
+
+struct bnx2x_eth_q_stats {
+       u32 total_bytes_received_hi;
+       u32 total_bytes_received_lo;
+       u32 total_bytes_transmitted_hi;
+       u32 total_bytes_transmitted_lo;
+       u32 total_unicast_packets_received_hi;
+       u32 total_unicast_packets_received_lo;
+       u32 total_multicast_packets_received_hi;
+       u32 total_multicast_packets_received_lo;
+       u32 total_broadcast_packets_received_hi;
+       u32 total_broadcast_packets_received_lo;
+       u32 total_unicast_packets_transmitted_hi;
+       u32 total_unicast_packets_transmitted_lo;
+       u32 total_multicast_packets_transmitted_hi;
+       u32 total_multicast_packets_transmitted_lo;
+       u32 total_broadcast_packets_transmitted_hi;
+       u32 total_broadcast_packets_transmitted_lo;
+       u32 valid_bytes_received_hi;
+       u32 valid_bytes_received_lo;
+
+       u32 error_bytes_received_hi;
+       u32 error_bytes_received_lo;
+       u32 etherstatsoverrsizepkts_hi;
+       u32 etherstatsoverrsizepkts_lo;
+       u32 no_buff_discard_hi;
+       u32 no_buff_discard_lo;
+
+       u32 driver_xoff;
+       u32 rx_err_discard_pkt;
+       u32 rx_skb_alloc_failed;
+       u32 hw_csum_err;
+};
+
+#define BNX2X_NUM_Q_STATS              13
+#define Q_STATS_OFFSET32(stat_name) \
+                       (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
+
+struct nig_stats {
+       u32 brb_discard;
+       u32 brb_packet;
+       u32 brb_truncate;
+       u32 flow_ctrl_discard;
+       u32 flow_ctrl_octets;
+       u32 flow_ctrl_packet;
+       u32 mng_discard;
+       u32 mng_octet_inp;
+       u32 mng_octet_out;
+       u32 mng_packet_inp;
+       u32 mng_packet_out;
+       u32 pbf_octets;
+       u32 pbf_packet;
+       u32 safc_inp;
+       u32 egress_mac_pkt0_lo;
+       u32 egress_mac_pkt0_hi;
+       u32 egress_mac_pkt1_lo;
+       u32 egress_mac_pkt1_hi;
+};
+
+
+enum bnx2x_stats_event {
+       STATS_EVENT_PMF = 0,
+       STATS_EVENT_LINK_UP,
+       STATS_EVENT_UPDATE,
+       STATS_EVENT_STOP,
+       STATS_EVENT_MAX
+};
+
+enum bnx2x_stats_state {
+       STATS_STATE_DISABLED = 0,
+       STATS_STATE_ENABLED,
+       STATS_STATE_MAX
+};
+
+struct bnx2x_eth_stats {
+       u32 total_bytes_received_hi;
+       u32 total_bytes_received_lo;
+       u32 total_bytes_transmitted_hi;
+       u32 total_bytes_transmitted_lo;
+       u32 total_unicast_packets_received_hi;
+       u32 total_unicast_packets_received_lo;
+       u32 total_multicast_packets_received_hi;
+       u32 total_multicast_packets_received_lo;
+       u32 total_broadcast_packets_received_hi;
+       u32 total_broadcast_packets_received_lo;
+       u32 total_unicast_packets_transmitted_hi;
+       u32 total_unicast_packets_transmitted_lo;
+       u32 total_multicast_packets_transmitted_hi;
+       u32 total_multicast_packets_transmitted_lo;
+       u32 total_broadcast_packets_transmitted_hi;
+       u32 total_broadcast_packets_transmitted_lo;
+       u32 valid_bytes_received_hi;
+       u32 valid_bytes_received_lo;
+
+       u32 error_bytes_received_hi;
+       u32 error_bytes_received_lo;
+       u32 etherstatsoverrsizepkts_hi;
+       u32 etherstatsoverrsizepkts_lo;
+       u32 no_buff_discard_hi;
+       u32 no_buff_discard_lo;
+
+       u32 rx_stat_ifhcinbadoctets_hi;
+       u32 rx_stat_ifhcinbadoctets_lo;
+       u32 tx_stat_ifhcoutbadoctets_hi;
+       u32 tx_stat_ifhcoutbadoctets_lo;
+       u32 rx_stat_dot3statsfcserrors_hi;
+       u32 rx_stat_dot3statsfcserrors_lo;
+       u32 rx_stat_dot3statsalignmenterrors_hi;
+       u32 rx_stat_dot3statsalignmenterrors_lo;
+       u32 rx_stat_dot3statscarriersenseerrors_hi;
+       u32 rx_stat_dot3statscarriersenseerrors_lo;
+       u32 rx_stat_falsecarriererrors_hi;
+       u32 rx_stat_falsecarriererrors_lo;
+       u32 rx_stat_etherstatsundersizepkts_hi;
+       u32 rx_stat_etherstatsundersizepkts_lo;
+       u32 rx_stat_dot3statsframestoolong_hi;
+       u32 rx_stat_dot3statsframestoolong_lo;
+       u32 rx_stat_etherstatsfragments_hi;
+       u32 rx_stat_etherstatsfragments_lo;
+       u32 rx_stat_etherstatsjabbers_hi;
+       u32 rx_stat_etherstatsjabbers_lo;
+       u32 rx_stat_maccontrolframesreceived_hi;
+       u32 rx_stat_maccontrolframesreceived_lo;
+       u32 rx_stat_bmac_xpf_hi;
+       u32 rx_stat_bmac_xpf_lo;
+       u32 rx_stat_bmac_xcf_hi;
+       u32 rx_stat_bmac_xcf_lo;
+       u32 rx_stat_xoffstateentered_hi;
+       u32 rx_stat_xoffstateentered_lo;
+       u32 rx_stat_xonpauseframesreceived_hi;
+       u32 rx_stat_xonpauseframesreceived_lo;
+       u32 rx_stat_xoffpauseframesreceived_hi;
+       u32 rx_stat_xoffpauseframesreceived_lo;
+       u32 tx_stat_outxonsent_hi;
+       u32 tx_stat_outxonsent_lo;
+       u32 tx_stat_outxoffsent_hi;
+       u32 tx_stat_outxoffsent_lo;
+       u32 tx_stat_flowcontroldone_hi;
+       u32 tx_stat_flowcontroldone_lo;
+       u32 tx_stat_etherstatscollisions_hi;
+       u32 tx_stat_etherstatscollisions_lo;
+       u32 tx_stat_dot3statssinglecollisionframes_hi;
+       u32 tx_stat_dot3statssinglecollisionframes_lo;
+       u32 tx_stat_dot3statsmultiplecollisionframes_hi;
+       u32 tx_stat_dot3statsmultiplecollisionframes_lo;
+       u32 tx_stat_dot3statsdeferredtransmissions_hi;
+       u32 tx_stat_dot3statsdeferredtransmissions_lo;
+       u32 tx_stat_dot3statsexcessivecollisions_hi;
+       u32 tx_stat_dot3statsexcessivecollisions_lo;
+       u32 tx_stat_dot3statslatecollisions_hi;
+       u32 tx_stat_dot3statslatecollisions_lo;
+       u32 tx_stat_etherstatspkts64octets_hi;
+       u32 tx_stat_etherstatspkts64octets_lo;
+       u32 tx_stat_etherstatspkts65octetsto127octets_hi;
+       u32 tx_stat_etherstatspkts65octetsto127octets_lo;
+       u32 tx_stat_etherstatspkts128octetsto255octets_hi;
+       u32 tx_stat_etherstatspkts128octetsto255octets_lo;
+       u32 tx_stat_etherstatspkts256octetsto511octets_hi;
+       u32 tx_stat_etherstatspkts256octetsto511octets_lo;
+       u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
+       u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
+       u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
+       u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
+       u32 tx_stat_etherstatspktsover1522octets_hi;
+       u32 tx_stat_etherstatspktsover1522octets_lo;
+       u32 tx_stat_bmac_2047_hi;
+       u32 tx_stat_bmac_2047_lo;
+       u32 tx_stat_bmac_4095_hi;
+       u32 tx_stat_bmac_4095_lo;
+       u32 tx_stat_bmac_9216_hi;
+       u32 tx_stat_bmac_9216_lo;
+       u32 tx_stat_bmac_16383_hi;
+       u32 tx_stat_bmac_16383_lo;
+       u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
+       u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
+       u32 tx_stat_bmac_ufl_hi;
+       u32 tx_stat_bmac_ufl_lo;
+
+       u32 pause_frames_received_hi;
+       u32 pause_frames_received_lo;
+       u32 pause_frames_sent_hi;
+       u32 pause_frames_sent_lo;
+
+       u32 etherstatspkts1024octetsto1522octets_hi;
+       u32 etherstatspkts1024octetsto1522octets_lo;
+       u32 etherstatspktsover1522octets_hi;
+       u32 etherstatspktsover1522octets_lo;
+
+       u32 brb_drop_hi;
+       u32 brb_drop_lo;
+       u32 brb_truncate_hi;
+       u32 brb_truncate_lo;
+
+       u32 mac_filter_discard;
+       u32 xxoverflow_discard;
+       u32 brb_truncate_discard;
+       u32 mac_discard;
+
+       u32 driver_xoff;
+       u32 rx_err_discard_pkt;
+       u32 rx_skb_alloc_failed;
+       u32 hw_csum_err;
+
+       u32 nig_timer_max;
+};
+
+#define BNX2X_NUM_STATS                        43
+#define STATS_OFFSET32(stat_name) \
+                       (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
+
+/* Forward declaration */
+struct bnx2x;
+
+
+void bnx2x_stats_init(struct bnx2x *bp);
+
+extern const u32 dmae_reg_go_c[];
+extern int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+                        u32 data_hi, u32 data_lo, int common);
+
+
+#endif /* BNX2X_STATS_H */
index 3662d6e446a928ef544c5a45e12313c336fb2cd6..c746b331771d38f38771c89aa492ede20cbdf711 100644 (file)
@@ -682,7 +682,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
                        client_info->ntt = 0;
                }
 
-               if (!list_empty(&bond->vlan_list)) {
+               if (bond->vlgrp) {
                        if (!vlan_get_tag(skb, &client_info->vlan_id))
                                client_info->tag = 1;
                }
@@ -815,7 +815,7 @@ static int rlb_initialize(struct bonding *bond)
 
        /*initialize packet type*/
        pk_type->type = cpu_to_be16(ETH_P_ARP);
-       pk_type->dev = NULL;
+       pk_type->dev = bond->dev;
        pk_type->func = rlb_arp_recv;
 
        /* register to receive ARPs */
@@ -904,7 +904,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
                skb->priority = TC_PRIO_CONTROL;
                skb->dev = slave->dev;
 
-               if (!list_empty(&bond->vlan_list)) {
+               if (bond->vlgrp) {
                        struct vlan_entry *vlan;
 
                        vlan = bond_next_vlan(bond,
index 20f45cbf961a70c87c3bb2a5ec9f2f1ab27228d8..2cc4cfc31892cd85458dec20b6c46401fdb90d9b 100644 (file)
@@ -424,6 +424,7 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 {
        unsigned short uninitialized_var(vlan_id);
 
+       /* Test vlan_list not vlgrp to catch and handle 802.1p tags */
        if (!list_empty(&bond->vlan_list) &&
            !(slave_dev->features & NETIF_F_HW_VLAN_TX) &&
            vlan_get_tag(skb, &vlan_id) == 0) {
@@ -487,7 +488,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev,
        struct slave *slave;
        int i;
 
+       write_lock(&bond->lock);
        bond->vlgrp = grp;
+       write_unlock(&bond->lock);
 
        bond_for_each_slave(bond, slave, i) {
                struct net_device *slave_dev = slave->dev;
@@ -567,10 +570,8 @@ static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *sla
        struct vlan_entry *vlan;
        const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
 
-       write_lock_bh(&bond->lock);
-
-       if (list_empty(&bond->vlan_list))
-               goto out;
+       if (!bond->vlgrp)
+               return;
 
        if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
            slave_ops->ndo_vlan_rx_register)
@@ -578,13 +579,10 @@ static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *sla
 
        if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
            !(slave_ops->ndo_vlan_rx_add_vid))
-               goto out;
+               return;
 
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list)
                slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id);
-
-out:
-       write_unlock_bh(&bond->lock);
 }
 
 static void bond_del_vlans_from_slave(struct bonding *bond,
@@ -594,16 +592,16 @@ static void bond_del_vlans_from_slave(struct bonding *bond,
        struct vlan_entry *vlan;
        struct net_device *vlan_dev;
 
-       write_lock_bh(&bond->lock);
-
-       if (list_empty(&bond->vlan_list))
-               goto out;
+       if (!bond->vlgrp)
+               return;
 
        if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
            !(slave_ops->ndo_vlan_rx_kill_vid))
                goto unreg;
 
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+               if (!vlan->vlan_id)
+                       continue;
                /* Save and then restore vlan_dev in the grp array,
                 * since the slave's driver might clear it.
                 */
@@ -616,9 +614,6 @@ unreg:
        if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
            slave_ops->ndo_vlan_rx_register)
                slave_ops->ndo_vlan_rx_register(slave_dev, NULL);
-
-out:
-       write_unlock_bh(&bond->lock);
 }
 
 /*------------------------------- Link status -------------------------------*/
@@ -1443,7 +1438,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        /* no need to lock since we're protected by rtnl_lock */
        if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
                pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
-               if (!list_empty(&bond->vlan_list)) {
+               if (bond->vlgrp) {
                        pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
                               bond_dev->name, slave_dev->name, bond_dev->name);
                        return -EPERM;
@@ -1942,7 +1937,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
                 */
                memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
 
-               if (list_empty(&bond->vlan_list)) {
+               if (!bond->vlgrp) {
                        bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
                } else {
                        pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
@@ -2134,9 +2129,9 @@ static int bond_release_all(struct net_device *bond_dev)
         */
        memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
 
-       if (list_empty(&bond->vlan_list))
+       if (!bond->vlgrp) {
                bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
-       else {
+       else {
                pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
                           bond_dev->name, bond_dev->name);
                pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
@@ -2569,7 +2564,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                if (!targets[i])
                        break;
                pr_debug("basa: target %x\n", targets[i]);
-               if (list_empty(&bond->vlan_list)) {
+               if (!bond->vlgrp) {
                        pr_debug("basa: empty vlan: arp_send\n");
                        bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
                                      bond->master_ip, 0);
@@ -2658,6 +2653,9 @@ static void bond_send_gratuitous_arp(struct bonding *bond)
                                bond->master_ip, 0);
        }
 
+       if (!bond->vlgrp)
+               return;
+
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
                vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
                if (vlan->vlan_ip) {
@@ -3590,6 +3588,8 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
                }
 
                list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+                       if (!bond->vlgrp)
+                               continue;
                        vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
                        if (vlan_dev == event_dev) {
                                switch (event) {
@@ -4686,6 +4686,7 @@ static void bond_work_cancel_all(struct bonding *bond)
 static void bond_uninit(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct vlan_entry *vlan, *tmp;
 
        bond_netpoll_cleanup(bond_dev);
 
@@ -4699,6 +4700,11 @@ static void bond_uninit(struct net_device *bond_dev)
        bond_remove_proc_entry(bond);
 
        __hw_addr_flush(&bond->mc_list);
+
+       list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) {
+               list_del(&vlan->vlan_list);
+               kfree(vlan);
+       }
 }
 
 /*------------------------- Module initialization ---------------------------*/
index 1a99764870996222e2a300230291941981833b99..c311aed9bd022c2870b33fdc8c88a953d6cceb93 100644 (file)
@@ -313,19 +313,26 @@ static ssize_t bonding_store_mode(struct device *d,
                       bond->dev->name, (int)strlen(buf) - 1, buf);
                ret = -EINVAL;
                goto out;
-       } else {
-               if (bond->params.mode == BOND_MODE_8023AD)
-                       bond_unset_master_3ad_flags(bond);
+       }
+       if ((new_value == BOND_MODE_ALB ||
+            new_value == BOND_MODE_TLB) &&
+           bond->params.arp_interval) {
+               pr_err("%s: %s mode is incompatible with arp monitoring.\n",
+                      bond->dev->name, bond_mode_tbl[new_value].modename);
+               ret = -EINVAL;
+               goto out;
+       }
+       if (bond->params.mode == BOND_MODE_8023AD)
+               bond_unset_master_3ad_flags(bond);
 
-               if (bond->params.mode == BOND_MODE_ALB)
-                       bond_unset_master_alb_flags(bond);
+       if (bond->params.mode == BOND_MODE_ALB)
+               bond_unset_master_alb_flags(bond);
 
-               bond->params.mode = new_value;
-               bond_set_mode_ops(bond, bond->params.mode);
-               pr_info("%s: setting mode to %s (%d).\n",
-                       bond->dev->name, bond_mode_tbl[new_value].modename,
-                      new_value);
-       }
+       bond->params.mode = new_value;
+       bond_set_mode_ops(bond, bond->params.mode);
+       pr_info("%s: setting mode to %s (%d).\n",
+               bond->dev->name, bond_mode_tbl[new_value].modename,
+               new_value);
 out:
        return ret;
 }
@@ -510,7 +517,13 @@ static ssize_t bonding_store_arp_interval(struct device *d,
                ret = -EINVAL;
                goto out;
        }
-
+       if (bond->params.mode == BOND_MODE_ALB ||
+           bond->params.mode == BOND_MODE_TLB) {
+               pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n",
+                       bond->dev->name, bond->dev->name);
+               ret = -EINVAL;
+               goto out;
+       }
        pr_info("%s: Setting ARP monitoring interval to %d.\n",
                bond->dev->name, new_value);
        bond->params.arp_interval = new_value;
index 6c948037fc78af607eb64c0a40fae04d60cd5d78..f5058ff2b210da07dd1411bdd8a12f1dafe9ddb2 100644 (file)
@@ -165,6 +165,9 @@ static ssize_t dbgfs_state(struct file *file, char __user *user_buf,
        len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
                        "Next RX len: %d\n", cfspi->rx_npck_len);
 
+       if (len > DEBUGFS_BUF_SIZE)
+               len = DEBUGFS_BUF_SIZE;
+
        size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
        kfree(buf);
 
index 2c5227c02fa02c2ec00fee49ec550abf042d93f0..9d9e45394433f134ef8a344e69fe1d42039e7fcc 100644 (file)
@@ -73,6 +73,15 @@ config CAN_JANZ_ICAN3
          This driver can also be built as a module. If so, the module will be
          called janz-ican3.ko.
 
+config HAVE_CAN_FLEXCAN
+       bool
+
+config CAN_FLEXCAN
+       tristate "Support for Freescale FLEXCAN based chips"
+       depends on CAN_DEV && HAVE_CAN_FLEXCAN
+       ---help---
+         Say Y here if you want to support for Freescale FlexCAN.
+
 source "drivers/net/can/mscan/Kconfig"
 
 source "drivers/net/can/sja1000/Kconfig"
index 9047cd066fea2d36b06d624765dc25b9e3bc6f98..00575373bbd0eed976566419be2ed9c9e34c8cf2 100644 (file)
@@ -16,5 +16,6 @@ obj-$(CONFIG_CAN_TI_HECC)     += ti_hecc.o
 obj-$(CONFIG_CAN_MCP251X)      += mcp251x.o
 obj-$(CONFIG_CAN_BFIN)         += bfin_can.o
 obj-$(CONFIG_CAN_JANZ_ICAN3)   += janz-ican3.o
+obj-$(CONFIG_CAN_FLEXCAN)      += flexcan.o
 
 ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
new file mode 100644 (file)
index 0000000..ef443a0
--- /dev/null
@@ -0,0 +1,1030 @@
+/*
+ * flexcan.c - FLEXCAN CAN controller driver
+ *
+ * Copyright (c) 2005-2006 Varma Electronics Oy
+ * Copyright (c) 2009 Sascha Hauer, Pengutronix
+ * Copyright (c) 2010 Marc Kleine-Budde, Pengutronix
+ *
+ * Based on code originally by Andrey Volkov <avolkov@varma-el.com>
+ *
+ * LICENCE:
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/platform/flexcan.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <mach/clock.h>
+
+#define DRV_NAME                       "flexcan"
+
+/* 8 for RX fifo and 2 error handling */
+#define FLEXCAN_NAPI_WEIGHT            (8 + 2)
+
+/* FLEXCAN module configuration register (CANMCR) bits */
+#define FLEXCAN_MCR_MDIS               BIT(31)
+#define FLEXCAN_MCR_FRZ                        BIT(30)
+#define FLEXCAN_MCR_FEN                        BIT(29)
+#define FLEXCAN_MCR_HALT               BIT(28)
+#define FLEXCAN_MCR_NOT_RDY            BIT(27)
+#define FLEXCAN_MCR_WAK_MSK            BIT(26)
+#define FLEXCAN_MCR_SOFTRST            BIT(25)
+#define FLEXCAN_MCR_FRZ_ACK            BIT(24)
+#define FLEXCAN_MCR_SUPV               BIT(23)
+#define FLEXCAN_MCR_SLF_WAK            BIT(22)
+#define FLEXCAN_MCR_WRN_EN             BIT(21)
+#define FLEXCAN_MCR_LPM_ACK            BIT(20)
+#define FLEXCAN_MCR_WAK_SRC            BIT(19)
+#define FLEXCAN_MCR_DOZE               BIT(18)
+#define FLEXCAN_MCR_SRX_DIS            BIT(17)
+#define FLEXCAN_MCR_BCC                        BIT(16)
+#define FLEXCAN_MCR_LPRIO_EN           BIT(13)
+#define FLEXCAN_MCR_AEN                        BIT(12)
+#define FLEXCAN_MCR_MAXMB(x)           ((x) & 0xf)
+#define FLEXCAN_MCR_IDAM_A             (0 << 8)
+#define FLEXCAN_MCR_IDAM_B             (1 << 8)
+#define FLEXCAN_MCR_IDAM_C             (2 << 8)
+#define FLEXCAN_MCR_IDAM_D             (3 << 8)
+
+/* FLEXCAN control register (CANCTRL) bits */
+#define FLEXCAN_CTRL_PRESDIV(x)                (((x) & 0xff) << 24)
+#define FLEXCAN_CTRL_RJW(x)            (((x) & 0x03) << 22)
+#define FLEXCAN_CTRL_PSEG1(x)          (((x) & 0x07) << 19)
+#define FLEXCAN_CTRL_PSEG2(x)          (((x) & 0x07) << 16)
+#define FLEXCAN_CTRL_BOFF_MSK          BIT(15)
+#define FLEXCAN_CTRL_ERR_MSK           BIT(14)
+#define FLEXCAN_CTRL_CLK_SRC           BIT(13)
+#define FLEXCAN_CTRL_LPB               BIT(12)
+#define FLEXCAN_CTRL_TWRN_MSK          BIT(11)
+#define FLEXCAN_CTRL_RWRN_MSK          BIT(10)
+#define FLEXCAN_CTRL_SMP               BIT(7)
+#define FLEXCAN_CTRL_BOFF_REC          BIT(6)
+#define FLEXCAN_CTRL_TSYN              BIT(5)
+#define FLEXCAN_CTRL_LBUF              BIT(4)
+#define FLEXCAN_CTRL_LOM               BIT(3)
+#define FLEXCAN_CTRL_PROPSEG(x)                ((x) & 0x07)
+#define FLEXCAN_CTRL_ERR_BUS           (FLEXCAN_CTRL_ERR_MSK)
+#define FLEXCAN_CTRL_ERR_STATE \
+       (FLEXCAN_CTRL_TWRN_MSK | FLEXCAN_CTRL_RWRN_MSK | \
+        FLEXCAN_CTRL_BOFF_MSK)
+#define FLEXCAN_CTRL_ERR_ALL \
+       (FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE)
+
+/* FLEXCAN error and status register (ESR) bits */
+#define FLEXCAN_ESR_TWRN_INT           BIT(17)
+#define FLEXCAN_ESR_RWRN_INT           BIT(16)
+#define FLEXCAN_ESR_BIT1_ERR           BIT(15)
+#define FLEXCAN_ESR_BIT0_ERR           BIT(14)
+#define FLEXCAN_ESR_ACK_ERR            BIT(13)
+#define FLEXCAN_ESR_CRC_ERR            BIT(12)
+#define FLEXCAN_ESR_FRM_ERR            BIT(11)
+#define FLEXCAN_ESR_STF_ERR            BIT(10)
+#define FLEXCAN_ESR_TX_WRN             BIT(9)
+#define FLEXCAN_ESR_RX_WRN             BIT(8)
+#define FLEXCAN_ESR_IDLE               BIT(7)
+#define FLEXCAN_ESR_TXRX               BIT(6)
+#define FLEXCAN_EST_FLT_CONF_SHIFT     (4)
+#define FLEXCAN_ESR_FLT_CONF_MASK      (0x3 << FLEXCAN_EST_FLT_CONF_SHIFT)
+#define FLEXCAN_ESR_FLT_CONF_ACTIVE    (0x0 << FLEXCAN_EST_FLT_CONF_SHIFT)
+#define FLEXCAN_ESR_FLT_CONF_PASSIVE   (0x1 << FLEXCAN_EST_FLT_CONF_SHIFT)
+#define FLEXCAN_ESR_BOFF_INT           BIT(2)
+#define FLEXCAN_ESR_ERR_INT            BIT(1)
+#define FLEXCAN_ESR_WAK_INT            BIT(0)
+#define FLEXCAN_ESR_ERR_BUS \
+       (FLEXCAN_ESR_BIT1_ERR | FLEXCAN_ESR_BIT0_ERR | \
+        FLEXCAN_ESR_ACK_ERR | FLEXCAN_ESR_CRC_ERR | \
+        FLEXCAN_ESR_FRM_ERR | FLEXCAN_ESR_STF_ERR)
+#define FLEXCAN_ESR_ERR_STATE \
+       (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT)
+#define FLEXCAN_ESR_ERR_ALL \
+       (FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE)
+
+/* FLEXCAN interrupt flag register (IFLAG) bits */
+#define FLEXCAN_TX_BUF_ID              8
+#define FLEXCAN_IFLAG_BUF(x)           BIT(x)
+#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
+#define FLEXCAN_IFLAG_RX_FIFO_WARN     BIT(6)
+#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE        BIT(5)
+#define FLEXCAN_IFLAG_DEFAULT \
+       (FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | \
+        FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID))
+
+/* FLEXCAN message buffers */
+#define FLEXCAN_MB_CNT_CODE(x)         (((x) & 0xf) << 24)
+#define FLEXCAN_MB_CNT_SRR             BIT(22)
+#define FLEXCAN_MB_CNT_IDE             BIT(21)
+#define FLEXCAN_MB_CNT_RTR             BIT(20)
+#define FLEXCAN_MB_CNT_LENGTH(x)       (((x) & 0xf) << 16)
+#define FLEXCAN_MB_CNT_TIMESTAMP(x)    ((x) & 0xffff)
+
+#define FLEXCAN_MB_CODE_MASK           (0xf0ffffff)
+
+/* Structure of the message buffer */
+struct flexcan_mb {
+       u32 can_ctrl;
+       u32 can_id;
+       u32 data[2];
+};
+
+/* Structure of the hardware registers */
+struct flexcan_regs {
+       u32 mcr;                /* 0x00 */
+       u32 ctrl;               /* 0x04 */
+       u32 timer;              /* 0x08 */
+       u32 _reserved1;         /* 0x0c */
+       u32 rxgmask;            /* 0x10 */
+       u32 rx14mask;           /* 0x14 */
+       u32 rx15mask;           /* 0x18 */
+       u32 ecr;                /* 0x1c */
+       u32 esr;                /* 0x20 */
+       u32 imask2;             /* 0x24 */
+       u32 imask1;             /* 0x28 */
+       u32 iflag2;             /* 0x2c */
+       u32 iflag1;             /* 0x30 */
+       u32 _reserved2[19];
+       struct flexcan_mb cantxfg[64];
+};
+
+struct flexcan_priv {
+       struct can_priv can;
+       struct net_device *dev;
+       struct napi_struct napi;
+
+       void __iomem *base;
+       u32 reg_esr;
+       u32 reg_ctrl_default;
+
+       struct clk *clk;
+       struct flexcan_platform_data *pdata;
+};
+
+static struct can_bittiming_const flexcan_bittiming_const = {
+       .name = DRV_NAME,
+       .tseg1_min = 4,
+       .tseg1_max = 16,
+       .tseg2_min = 2,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 256,
+       .brp_inc = 1,
+};
+
+/*
+ * Swtich transceiver on or off
+ */
+static void flexcan_transceiver_switch(const struct flexcan_priv *priv, int on)
+{
+       if (priv->pdata && priv->pdata->transceiver_switch)
+               priv->pdata->transceiver_switch(on);
+}
+
+static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
+                                             u32 reg_esr)
+{
+       return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+               (reg_esr & FLEXCAN_ESR_ERR_BUS);
+}
+
+static inline void flexcan_chip_enable(struct flexcan_priv *priv)
+{
+       struct flexcan_regs __iomem *regs = priv->base;
+       u32 reg;
+
+       reg = readl(&regs->mcr);
+       reg &= ~FLEXCAN_MCR_MDIS;
+       writel(reg, &regs->mcr);
+
+       udelay(10);
+}
+
+static inline void flexcan_chip_disable(struct flexcan_priv *priv)
+{
+       struct flexcan_regs __iomem *regs = priv->base;
+       u32 reg;
+
+       reg = readl(&regs->mcr);
+       reg |= FLEXCAN_MCR_MDIS;
+       writel(reg, &regs->mcr);
+}
+
+static int flexcan_get_berr_counter(const struct net_device *dev,
+                                   struct can_berr_counter *bec)
+{
+       const struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_regs __iomem *regs = priv->base;
+       u32 reg = readl(&regs->ecr);
+
+       bec->txerr = (reg >> 0) & 0xff;
+       bec->rxerr = (reg >> 8) & 0xff;
+
+       return 0;
+}
+
+static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       const struct flexcan_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct flexcan_regs __iomem *regs = priv->base;
+       struct can_frame *cf = (struct can_frame *)skb->data;
+       u32 can_id;
+       u32 ctrl = FLEXCAN_MB_CNT_CODE(0xc) | (cf->can_dlc << 16);
+
+       if (can_dropped_invalid_skb(dev, skb))
+               return NETDEV_TX_OK;
+
+       netif_stop_queue(dev);
+
+       if (cf->can_id & CAN_EFF_FLAG) {
+               can_id = cf->can_id & CAN_EFF_MASK;
+               ctrl |= FLEXCAN_MB_CNT_IDE | FLEXCAN_MB_CNT_SRR;
+       } else {
+               can_id = (cf->can_id & CAN_SFF_MASK) << 18;
+       }
+
+       if (cf->can_id & CAN_RTR_FLAG)
+               ctrl |= FLEXCAN_MB_CNT_RTR;
+
+       if (cf->can_dlc > 0) {
+               u32 data = be32_to_cpup((__be32 *)&cf->data[0]);
+               writel(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[0]);
+       }
+       if (cf->can_dlc > 3) {
+               u32 data = be32_to_cpup((__be32 *)&cf->data[4]);
+               writel(data, &regs->cantxfg[FLEXCAN_TX_BUF_ID].data[1]);
+       }
+
+       writel(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id);
+       writel(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+
+       kfree_skb(skb);
+
+       /* tx_packets is incremented in flexcan_irq */
+       stats->tx_bytes += cf->can_dlc;
+
+       return NETDEV_TX_OK;
+}
+
+static void do_bus_err(struct net_device *dev,
+                      struct can_frame *cf, u32 reg_esr)
+{
+       struct flexcan_priv *priv = netdev_priv(dev);
+       int rx_errors = 0, tx_errors = 0;
+
+       cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+       if (reg_esr & FLEXCAN_ESR_BIT1_ERR) {
+               dev_dbg(dev->dev.parent, "BIT1_ERR irq\n");
+               cf->data[2] |= CAN_ERR_PROT_BIT1;
+               tx_errors = 1;
+       }
+       if (reg_esr & FLEXCAN_ESR_BIT0_ERR) {
+               dev_dbg(dev->dev.parent, "BIT0_ERR irq\n");
+               cf->data[2] |= CAN_ERR_PROT_BIT0;
+               tx_errors = 1;
+       }
+       if (reg_esr & FLEXCAN_ESR_ACK_ERR) {
+               dev_dbg(dev->dev.parent, "ACK_ERR irq\n");
+               cf->can_id |= CAN_ERR_ACK;
+               cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+               tx_errors = 1;
+       }
+       if (reg_esr & FLEXCAN_ESR_CRC_ERR) {
+               dev_dbg(dev->dev.parent, "CRC_ERR irq\n");
+               cf->data[2] |= CAN_ERR_PROT_BIT;
+               cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+               rx_errors = 1;
+       }
+       if (reg_esr & FLEXCAN_ESR_FRM_ERR) {
+               dev_dbg(dev->dev.parent, "FRM_ERR irq\n");
+               cf->data[2] |= CAN_ERR_PROT_FORM;
+               rx_errors = 1;
+       }
+       if (reg_esr & FLEXCAN_ESR_STF_ERR) {
+               dev_dbg(dev->dev.parent, "STF_ERR irq\n");
+               cf->data[2] |= CAN_ERR_PROT_STUFF;
+               rx_errors = 1;
+       }
+
+       priv->can.can_stats.bus_error++;
+       if (rx_errors)
+               dev->stats.rx_errors++;
+       if (tx_errors)
+               dev->stats.tx_errors++;
+}
+
+static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
+{
+       struct sk_buff *skb;
+       struct can_frame *cf;
+
+       skb = alloc_can_err_skb(dev, &cf);
+       if (unlikely(!skb))
+               return 0;
+
+       do_bus_err(dev, cf, reg_esr);
+       netif_receive_skb(skb);
+
+       dev->stats.rx_packets++;
+       dev->stats.rx_bytes += cf->can_dlc;
+
+       return 1;
+}
+
+static void do_state(struct net_device *dev,
+                    struct can_frame *cf, enum can_state new_state)
+{
+       struct flexcan_priv *priv = netdev_priv(dev);
+       struct can_berr_counter bec;
+
+       flexcan_get_berr_counter(dev, &bec);
+
+       switch (priv->can.state) {
+       case CAN_STATE_ERROR_ACTIVE:
+               /*
+                * from: ERROR_ACTIVE
+                * to  : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
+                * =>  : there was a warning int
+                */
+               if (new_state >= CAN_STATE_ERROR_WARNING &&
+                   new_state <= CAN_STATE_BUS_OFF) {
+                       dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
+                       priv->can.can_stats.error_warning++;
+
+                       cf->can_id |= CAN_ERR_CRTL;
+                       cf->data[1] = (bec.txerr > bec.rxerr) ?
+                               CAN_ERR_CRTL_TX_WARNING :
+                               CAN_ERR_CRTL_RX_WARNING;
+               }
+       case CAN_STATE_ERROR_WARNING:   /* fallthrough */
+               /*
+                * from: ERROR_ACTIVE, ERROR_WARNING
+                * to  : ERROR_PASSIVE, BUS_OFF
+                * =>  : error passive int
+                */
+               if (new_state >= CAN_STATE_ERROR_PASSIVE &&
+                   new_state <= CAN_STATE_BUS_OFF) {
+                       dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
+                       priv->can.can_stats.error_passive++;
+
+                       cf->can_id |= CAN_ERR_CRTL;
+                       cf->data[1] = (bec.txerr > bec.rxerr) ?
+                               CAN_ERR_CRTL_TX_PASSIVE :
+                               CAN_ERR_CRTL_RX_PASSIVE;
+               }
+               break;
+       case CAN_STATE_BUS_OFF:
+               dev_err(dev->dev.parent,
+                       "BUG! hardware recovered automatically from BUS_OFF\n");
+               break;
+       default:
+               break;
+       }
+
+       /* process state changes depending on the new state */
+       switch (new_state) {
+       case CAN_STATE_ERROR_ACTIVE:
+               dev_dbg(dev->dev.parent, "Error Active\n");
+               cf->can_id |= CAN_ERR_PROT;
+               cf->data[2] = CAN_ERR_PROT_ACTIVE;
+               break;
+       case CAN_STATE_BUS_OFF:
+               cf->can_id |= CAN_ERR_BUSOFF;
+               can_bus_off(dev);
+               break;
+       default:
+               break;
+       }
+}
+
+static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
+{
+       struct flexcan_priv *priv = netdev_priv(dev);
+       struct sk_buff *skb;
+       struct can_frame *cf;
+       enum can_state new_state;
+       int flt;
+
+       flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
+       if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
+               if (likely(!(reg_esr & (FLEXCAN_ESR_TX_WRN |
+                                       FLEXCAN_ESR_RX_WRN))))
+                       new_state = CAN_STATE_ERROR_ACTIVE;
+               else
+                       new_state = CAN_STATE_ERROR_WARNING;
+       } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE))
+               new_state = CAN_STATE_ERROR_PASSIVE;
+       else
+               new_state = CAN_STATE_BUS_OFF;
+
+       /* state hasn't changed */
+       if (likely(new_state == priv->can.state))
+               return 0;
+
+       skb = alloc_can_err_skb(dev, &cf);
+       if (unlikely(!skb))
+               return 0;
+
+       do_state(dev, cf, new_state);
+       priv->can.state = new_state;
+       netif_receive_skb(skb);
+
+       dev->stats.rx_packets++;
+       dev->stats.rx_bytes += cf->can_dlc;
+
+       return 1;
+}
+
+static void flexcan_read_fifo(const struct net_device *dev,
+                             struct can_frame *cf)
+{
+       const struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_regs __iomem *regs = priv->base;
+       struct flexcan_mb __iomem *mb = &regs->cantxfg[0];
+       u32 reg_ctrl, reg_id;
+
+       reg_ctrl = readl(&mb->can_ctrl);
+       reg_id = readl(&mb->can_id);
+       if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
+               cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
+       else
+               cf->can_id = (reg_id >> 18) & CAN_SFF_MASK;
+
+       if (reg_ctrl & FLEXCAN_MB_CNT_RTR)
+               cf->can_id |= CAN_RTR_FLAG;
+       cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
+
+       *(__be32 *)(cf->data + 0) = cpu_to_be32(readl(&mb->data[0]));
+       *(__be32 *)(cf->data + 4) = cpu_to_be32(readl(&mb->data[1]));
+
+       /* mark as read */
+       writel(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
+       readl(&regs->timer);
+}
+
+static int flexcan_read_frame(struct net_device *dev)
+{
+       struct net_device_stats *stats = &dev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+
+       skb = alloc_can_skb(dev, &cf);
+       if (unlikely(!skb)) {
+               stats->rx_dropped++;
+               return 0;
+       }
+
+       flexcan_read_fifo(dev, cf);
+       netif_receive_skb(skb);
+
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+
+       return 1;
+}
+
+static int flexcan_poll(struct napi_struct *napi, int quota)
+{
+       struct net_device *dev = napi->dev;
+       const struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_regs __iomem *regs = priv->base;
+       u32 reg_iflag1, reg_esr;
+       int work_done = 0;
+
+       /*
+        * The error bits are cleared on read,
+        * use saved value from irq handler.
+        */
+       reg_esr = readl(&regs->esr) | priv->reg_esr;
+
+       /* handle state changes */
+       work_done += flexcan_poll_state(dev, reg_esr);
+
+       /* handle RX-FIFO */
+       reg_iflag1 = readl(&regs->iflag1);
+       while (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE &&
+              work_done < quota) {
+               work_done += flexcan_read_frame(dev);
+               reg_iflag1 = readl(&regs->iflag1);
+       }
+
+       /* report bus errors */
+       if (flexcan_has_and_handle_berr(priv, reg_esr) && work_done < quota)
+               work_done += flexcan_poll_bus_err(dev, reg_esr);
+
+       if (work_done < quota) {
+               napi_complete(napi);
+               /* enable IRQs */
+               writel(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
+               writel(priv->reg_ctrl_default, &regs->ctrl);
+       }
+
+       return work_done;
+}
+
+static irqreturn_t flexcan_irq(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct net_device_stats *stats = &dev->stats;
+       struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_regs __iomem *regs = priv->base;
+       u32 reg_iflag1, reg_esr;
+
+       reg_iflag1 = readl(&regs->iflag1);
+       reg_esr = readl(&regs->esr);
+       writel(FLEXCAN_ESR_ERR_INT, &regs->esr);        /* ACK err IRQ */
+
+       /*
+        * schedule NAPI in case of:
+        * - rx IRQ
+        * - state change IRQ
+        * - bus error IRQ and bus error reporting is activated
+        */
+       if ((reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) ||
+           (reg_esr & FLEXCAN_ESR_ERR_STATE) ||
+           flexcan_has_and_handle_berr(priv, reg_esr)) {
+               /*
+                * The error bits are cleared on read,
+                * save them for later use.
+                */
+               priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS;
+               writel(FLEXCAN_IFLAG_DEFAULT & ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE,
+                      &regs->imask1);
+               writel(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+                      &regs->ctrl);
+               napi_schedule(&priv->napi);
+       }
+
+       /* FIFO overflow */
+       if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
+               writel(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
+               dev->stats.rx_over_errors++;
+               dev->stats.rx_errors++;
+       }
+
+       /* transmission complete interrupt */
+       if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) {
+               /* tx_bytes is incremented in flexcan_start_xmit */
+               stats->tx_packets++;
+               writel((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
+               netif_wake_queue(dev);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void flexcan_set_bittiming(struct net_device *dev)
+{
+       const struct flexcan_priv *priv = netdev_priv(dev);
+       const struct can_bittiming *bt = &priv->can.bittiming;
+       struct flexcan_regs __iomem *regs = priv->base;
+       u32 reg;
+
+       reg = readl(&regs->ctrl);
+       reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) |
+                FLEXCAN_CTRL_RJW(0x3) |
+                FLEXCAN_CTRL_PSEG1(0x7) |
+                FLEXCAN_CTRL_PSEG2(0x7) |
+                FLEXCAN_CTRL_PROPSEG(0x7) |
+                FLEXCAN_CTRL_LPB |
+                FLEXCAN_CTRL_SMP |
+                FLEXCAN_CTRL_LOM);
+
+       reg |= FLEXCAN_CTRL_PRESDIV(bt->brp - 1) |
+               FLEXCAN_CTRL_PSEG1(bt->phase_seg1 - 1) |
+               FLEXCAN_CTRL_PSEG2(bt->phase_seg2 - 1) |
+               FLEXCAN_CTRL_RJW(bt->sjw - 1) |
+               FLEXCAN_CTRL_PROPSEG(bt->prop_seg - 1);
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+               reg |= FLEXCAN_CTRL_LPB;
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+               reg |= FLEXCAN_CTRL_LOM;
+       if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+               reg |= FLEXCAN_CTRL_SMP;
+
+       dev_info(dev->dev.parent, "writing ctrl=0x%08x\n", reg);
+       writel(reg, &regs->ctrl);
+
+       /* print chip status */
+       dev_dbg(dev->dev.parent, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
+               readl(&regs->mcr), readl(&regs->ctrl));
+}
+
+/*
+ * flexcan_chip_start
+ *
+ * this functions is entered with clocks enabled
+ *
+ */
+static int flexcan_chip_start(struct net_device *dev)
+{
+       struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_regs __iomem *regs = priv->base;
+       unsigned int i;
+       int err;
+       u32 reg_mcr, reg_ctrl;
+
+       /* enable module */
+       flexcan_chip_enable(priv);
+
+       /* soft reset */
+       writel(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+       udelay(10);
+
+       reg_mcr = readl(&regs->mcr);
+       if (reg_mcr & FLEXCAN_MCR_SOFTRST) {
+               dev_err(dev->dev.parent,
+                       "Failed to softreset can module (mcr=0x%08x)\n",
+                       reg_mcr);
+               err = -ENODEV;
+               goto out;
+       }
+
+       flexcan_set_bittiming(dev);
+
+       /*
+        * MCR
+        *
+        * enable freeze
+        * enable fifo
+        * halt now
+        * only supervisor access
+        * enable warning int
+        * choose format C
+        *
+        */
+       reg_mcr = readl(&regs->mcr);
+       reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
+               FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
+               FLEXCAN_MCR_IDAM_C;
+       dev_dbg(dev->dev.parent, "%s: writing mcr=0x%08x", __func__, reg_mcr);
+       writel(reg_mcr, &regs->mcr);
+
+       /*
+        * CTRL
+        *
+        * disable timer sync feature
+        *
+        * disable auto busoff recovery
+        * transmit lowest buffer first
+        *
+        * enable tx and rx warning interrupt
+        * enable bus off interrupt
+        * (== FLEXCAN_CTRL_ERR_STATE)
+        *
+        * _note_: we enable the "error interrupt"
+        * (FLEXCAN_CTRL_ERR_MSK), too. Otherwise we don't get any
+        * warning or bus passive interrupts.
+        */
+       reg_ctrl = readl(&regs->ctrl);
+       reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
+       reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
+               FLEXCAN_CTRL_ERR_STATE | FLEXCAN_CTRL_ERR_MSK;
+
+       /* save for later use */
+       priv->reg_ctrl_default = reg_ctrl;
+       dev_dbg(dev->dev.parent, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
+       writel(reg_ctrl, &regs->ctrl);
+
+       for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) {
+               writel(0, &regs->cantxfg[i].can_ctrl);
+               writel(0, &regs->cantxfg[i].can_id);
+               writel(0, &regs->cantxfg[i].data[0]);
+               writel(0, &regs->cantxfg[i].data[1]);
+
+               /* put MB into rx queue */
+               writel(FLEXCAN_MB_CNT_CODE(0x4), &regs->cantxfg[i].can_ctrl);
+       }
+
+       /* acceptance mask/acceptance code (accept everything) */
+       writel(0x0, &regs->rxgmask);
+       writel(0x0, &regs->rx14mask);
+       writel(0x0, &regs->rx15mask);
+
+       flexcan_transceiver_switch(priv, 1);
+
+       /* synchronize with the can bus */
+       reg_mcr = readl(&regs->mcr);
+       reg_mcr &= ~FLEXCAN_MCR_HALT;
+       writel(reg_mcr, &regs->mcr);
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       /* enable FIFO interrupts */
+       writel(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
+
+       /* print chip status */
+       dev_dbg(dev->dev.parent, "%s: reading mcr=0x%08x ctrl=0x%08x\n",
+               __func__, readl(&regs->mcr), readl(&regs->ctrl));
+
+       return 0;
+
+ out:
+       flexcan_chip_disable(priv);
+       return err;
+}
+
+/*
+ * flexcan_chip_stop
+ *
+ * this functions is entered with clocks enabled
+ *
+ */
+static void flexcan_chip_stop(struct net_device *dev)
+{
+       struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_regs __iomem *regs = priv->base;
+       u32 reg;
+
+       /* Disable all interrupts */
+       writel(0, &regs->imask1);
+
+       /* Disable + halt module */
+       reg = readl(&regs->mcr);
+       reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
+       writel(reg, &regs->mcr);
+
+       flexcan_transceiver_switch(priv, 0);
+       priv->can.state = CAN_STATE_STOPPED;
+
+       return;
+}
+
+static int flexcan_open(struct net_device *dev)
+{
+       struct flexcan_priv *priv = netdev_priv(dev);
+       int err;
+
+       clk_enable(priv->clk);
+
+       err = open_candev(dev);
+       if (err)
+               goto out;
+
+       err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
+       if (err)
+               goto out_close;
+
+       /* start chip and queuing */
+       err = flexcan_chip_start(dev);
+       if (err)
+               goto out_close;
+       napi_enable(&priv->napi);
+       netif_start_queue(dev);
+
+       return 0;
+
+ out_close:
+       close_candev(dev);
+ out:
+       clk_disable(priv->clk);
+
+       return err;
+}
+
+static int flexcan_close(struct net_device *dev)
+{
+       struct flexcan_priv *priv = netdev_priv(dev);
+
+       netif_stop_queue(dev);
+       napi_disable(&priv->napi);
+       flexcan_chip_stop(dev);
+
+       free_irq(dev->irq, dev);
+       clk_disable(priv->clk);
+
+       close_candev(dev);
+
+       return 0;
+}
+
+static int flexcan_set_mode(struct net_device *dev, enum can_mode mode)
+{
+       int err;
+
+       switch (mode) {
+       case CAN_MODE_START:
+               err = flexcan_chip_start(dev);
+               if (err)
+                       return err;
+
+               netif_wake_queue(dev);
+               break;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static const struct net_device_ops flexcan_netdev_ops = {
+       .ndo_open       = flexcan_open,
+       .ndo_stop       = flexcan_close,
+       .ndo_start_xmit = flexcan_start_xmit,
+};
+
+static int __devinit register_flexcandev(struct net_device *dev)
+{
+       struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_regs __iomem *regs = priv->base;
+       u32 reg, err;
+
+       clk_enable(priv->clk);
+
+       /* select "bus clock", chip must be disabled */
+       flexcan_chip_disable(priv);
+       reg = readl(&regs->ctrl);
+       reg |= FLEXCAN_CTRL_CLK_SRC;
+       writel(reg, &regs->ctrl);
+
+       flexcan_chip_enable(priv);
+
+       /* set freeze, halt and activate FIFO, restrict register access */
+       reg = readl(&regs->mcr);
+       reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
+               FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
+       writel(reg, &regs->mcr);
+
+       /*
+        * Currently we only support newer versions of this core
+        * featuring a RX FIFO. Older cores found on some Coldfire
+        * derivates are not yet supported.
+        */
+       reg = readl(&regs->mcr);
+       if (!(reg & FLEXCAN_MCR_FEN)) {
+               dev_err(dev->dev.parent,
+                       "Could not enable RX FIFO, unsupported core\n");
+               err = -ENODEV;
+               goto out;
+       }
+
+       err = register_candev(dev);
+
+ out:
+       /* disable core and turn off clocks */
+       flexcan_chip_disable(priv);
+       clk_disable(priv->clk);
+
+       return err;
+}
+
+static void __devexit unregister_flexcandev(struct net_device *dev)
+{
+       unregister_candev(dev);
+}
+
+static int __devinit flexcan_probe(struct platform_device *pdev)
+{
+       struct net_device *dev;
+       struct flexcan_priv *priv;
+       struct resource *mem;
+       struct clk *clk;
+       void __iomem *base;
+       resource_size_t mem_size;
+       int err, irq;
+
+       clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(clk)) {
+               dev_err(&pdev->dev, "no clock defined\n");
+               err = PTR_ERR(clk);
+               goto failed_clock;
+       }
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       irq = platform_get_irq(pdev, 0);
+       if (!mem || irq <= 0) {
+               err = -ENODEV;
+               goto failed_get;
+       }
+
+       mem_size = resource_size(mem);
+       if (!request_mem_region(mem->start, mem_size, pdev->name)) {
+               err = -EBUSY;
+               goto failed_req;
+       }
+
+       base = ioremap(mem->start, mem_size);
+       if (!base) {
+               err = -ENOMEM;
+               goto failed_map;
+       }
+
+       dev = alloc_candev(sizeof(struct flexcan_priv), 0);
+       if (!dev) {
+               err = -ENOMEM;
+               goto failed_alloc;
+       }
+
+       dev->netdev_ops = &flexcan_netdev_ops;
+       dev->irq = irq;
+       dev->flags |= IFF_ECHO; /* we support local echo in hardware */
+
+       priv = netdev_priv(dev);
+       priv->can.clock.freq = clk_get_rate(clk);
+       priv->can.bittiming_const = &flexcan_bittiming_const;
+       priv->can.do_set_mode = flexcan_set_mode;
+       priv->can.do_get_berr_counter = flexcan_get_berr_counter;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+               CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES |
+               CAN_CTRLMODE_BERR_REPORTING;
+       priv->base = base;
+       priv->dev = dev;
+       priv->clk = clk;
+       priv->pdata = pdev->dev.platform_data;
+
+       netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
+
+       dev_set_drvdata(&pdev->dev, dev);
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       err = register_flexcandev(dev);
+       if (err) {
+               dev_err(&pdev->dev, "registering netdev failed\n");
+               goto failed_register;
+       }
+
+       dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
+                priv->base, dev->irq);
+
+       return 0;
+
+ failed_register:
+       free_candev(dev);
+ failed_alloc:
+       iounmap(base);
+ failed_map:
+       release_mem_region(mem->start, mem_size);
+ failed_req:
+       clk_put(clk);
+ failed_get:
+ failed_clock:
+       return err;
+}
+
+static int __devexit flexcan_remove(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       struct flexcan_priv *priv = netdev_priv(dev);
+       struct resource *mem;
+
+       unregister_flexcandev(dev);
+       platform_set_drvdata(pdev, NULL);
+       free_candev(dev);
+       iounmap(priv->base);
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(mem->start, resource_size(mem));
+
+       clk_put(priv->clk);
+
+       return 0;
+}
+
+static struct platform_driver flexcan_driver = {
+       .driver.name = DRV_NAME,
+       .probe = flexcan_probe,
+       .remove = __devexit_p(flexcan_remove),
+};
+
+static int __init flexcan_init(void)
+{
+       pr_info("%s netdevice driver\n", DRV_NAME);
+       return platform_driver_register(&flexcan_driver);
+}
+
+static void __exit flexcan_exit(void)
+{
+       platform_driver_unregister(&flexcan_driver);
+       pr_info("%s: driver removed\n", DRV_NAME);
+}
+
+module_init(flexcan_init);
+module_exit(flexcan_exit);
+
+MODULE_AUTHOR("Sascha Hauer <kernel@pengutronix.de>, "
+             "Marc Kleine-Budde <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CAN port driver for flexcan based chip");
index 97ff6febad63d80f093403ae6be688da0ffba772..04525495b15bec744b2db5ceebe5b48dae650dfa 100644 (file)
@@ -7,4 +7,10 @@ config CAN_EMS_USB
          This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
          from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
 
+config CAN_ESD_USB2
+        tristate "ESD USB/2 CAN/USB interface"
+        ---help---
+          This driver supports the CAN-USB/2 interface
+          from esd electronic system design gmbh (http://www.esd.eu).
+
 endmenu
index 0afd51d4c7a5632965855b650120bac72f34def4..fce3cf11719f972505a65c60d4947c3d4deb279f 100644 (file)
@@ -3,5 +3,6 @@
 #
 
 obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
+obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
 
 ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
new file mode 100644 (file)
index 0000000..05a5275
--- /dev/null
@@ -0,0 +1,1132 @@
+/*
+ * CAN driver for esd CAN-USB/2
+ *
+ * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd.eu>");
+MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 interfaces");
+MODULE_LICENSE("GPL v2");
+
+/* Define these values to match your devices */
+#define USB_ESDGMBH_VENDOR_ID  0x0ab4
+#define USB_CANUSB2_PRODUCT_ID 0x0010
+
+#define ESD_USB2_CAN_CLOCK     60000000
+#define ESD_USB2_MAX_NETS      2
+
+/* USB2 commands */
+#define CMD_VERSION            1 /* also used for VERSION_REPLY */
+#define CMD_CAN_RX             2 /* device to host only */
+#define CMD_CAN_TX             3 /* also used for TX_DONE */
+#define CMD_SETBAUD            4 /* also used for SETBAUD_REPLY */
+#define CMD_TS                 5 /* also used for TS_REPLY */
+#define CMD_IDADD              6 /* also used for IDADD_REPLY */
+
+/* esd CAN message flags - dlc field */
+#define ESD_RTR                        0x10
+
+/* esd CAN message flags - id field */
+#define ESD_EXTID              0x20000000
+#define ESD_EVENT              0x40000000
+#define ESD_IDMASK             0x1fffffff
+
+/* esd CAN event ids used by this driver */
+#define ESD_EV_CAN_ERROR_EXT   2
+
+/* baudrate message flags */
+#define ESD_USB2_UBR           0x80000000
+#define ESD_USB2_LOM           0x40000000
+#define ESD_USB2_NO_BAUDRATE   0x7fffffff
+#define ESD_USB2_TSEG1_MIN     1
+#define ESD_USB2_TSEG1_MAX     16
+#define ESD_USB2_TSEG1_SHIFT   16
+#define ESD_USB2_TSEG2_MIN     1
+#define ESD_USB2_TSEG2_MAX     8
+#define ESD_USB2_TSEG2_SHIFT   20
+#define ESD_USB2_SJW_MAX       4
+#define ESD_USB2_SJW_SHIFT     14
+#define ESD_USB2_BRP_MIN       1
+#define ESD_USB2_BRP_MAX       1024
+#define ESD_USB2_BRP_INC       1
+#define ESD_USB2_3_SAMPLES     0x00800000
+
+/* esd IDADD message */
+#define ESD_ID_ENABLE          0x80
+#define ESD_MAX_ID_SEGMENT     64
+
+/* SJA1000 ECC register (emulated by usb2 firmware) */
+#define SJA1000_ECC_SEG                0x1F
+#define SJA1000_ECC_DIR                0x20
+#define SJA1000_ECC_ERR                0x06
+#define SJA1000_ECC_BIT                0x00
+#define SJA1000_ECC_FORM       0x40
+#define SJA1000_ECC_STUFF      0x80
+#define SJA1000_ECC_MASK       0xc0
+
+/* esd bus state event codes */
+#define ESD_BUSSTATE_MASK      0xc0
+#define ESD_BUSSTATE_WARN      0x40
+#define ESD_BUSSTATE_ERRPASSIVE        0x80
+#define ESD_BUSSTATE_BUSOFF    0xc0
+
+#define RX_BUFFER_SIZE         1024
+#define MAX_RX_URBS            4
+#define MAX_TX_URBS            16 /* must be power of 2 */
+
+struct header_msg {
+       u8 len; /* len is always the total message length in 32bit words */
+       u8 cmd;
+       u8 rsvd[2];
+};
+
+struct version_msg {
+       u8 len;
+       u8 cmd;
+       u8 rsvd;
+       u8 flags;
+       __le32 drv_version;
+};
+
+struct version_reply_msg {
+       u8 len;
+       u8 cmd;
+       u8 nets;
+       u8 features;
+       __le32 version;
+       u8 name[16];
+       __le32 rsvd;
+       __le32 ts;
+};
+
+struct rx_msg {
+       u8 len;
+       u8 cmd;
+       u8 net;
+       u8 dlc;
+       __le32 ts;
+       __le32 id; /* upper 3 bits contain flags */
+       u8 data[8];
+};
+
+struct tx_msg {
+       u8 len;
+       u8 cmd;
+       u8 net;
+       u8 dlc;
+       __le32 hnd;
+       __le32 id; /* upper 3 bits contain flags */
+       u8 data[8];
+};
+
+struct tx_done_msg {
+       u8 len;
+       u8 cmd;
+       u8 net;
+       u8 status;
+       __le32 hnd;
+       __le32 ts;
+};
+
+struct id_filter_msg {
+       u8 len;
+       u8 cmd;
+       u8 net;
+       u8 option;
+       __le32 mask[ESD_MAX_ID_SEGMENT + 1];
+};
+
+struct set_baudrate_msg {
+       u8 len;
+       u8 cmd;
+       u8 net;
+       u8 rsvd;
+       __le32 baud;
+};
+
+/* Main message type used between library and application */
+struct __attribute__ ((packed)) esd_usb2_msg {
+       union {
+               struct header_msg hdr;
+               struct version_msg version;
+               struct version_reply_msg version_reply;
+               struct rx_msg rx;
+               struct tx_msg tx;
+               struct tx_done_msg txdone;
+               struct set_baudrate_msg setbaud;
+               struct id_filter_msg filter;
+       } msg;
+};
+
+static struct usb_device_id esd_usb2_table[] = {
+       {USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)},
+       {}
+};
+MODULE_DEVICE_TABLE(usb, esd_usb2_table);
+
+struct esd_usb2_net_priv;
+
+struct esd_tx_urb_context {
+       struct esd_usb2_net_priv *priv;
+       u32 echo_index;
+       int dlc;
+};
+
+struct esd_usb2 {
+       struct usb_device *udev;
+       struct esd_usb2_net_priv *nets[ESD_USB2_MAX_NETS];
+
+       struct usb_anchor rx_submitted;
+
+       int net_count;
+       u32 version;
+       int rxinitdone;
+};
+
+struct esd_usb2_net_priv {
+       struct can_priv can; /* must be the first member */
+
+       atomic_t active_tx_jobs;
+       struct usb_anchor tx_submitted;
+       struct esd_tx_urb_context tx_contexts[MAX_TX_URBS];
+
+       int open_time;
+       struct esd_usb2 *usb2;
+       struct net_device *netdev;
+       int index;
+       u8 old_state;
+       struct can_berr_counter bec;
+};
+
+static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
+                             struct esd_usb2_msg *msg)
+{
+       struct net_device_stats *stats = &priv->netdev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       u32 id = le32_to_cpu(msg->msg.rx.id) & ESD_IDMASK;
+
+       if (id == ESD_EV_CAN_ERROR_EXT) {
+               u8 state = msg->msg.rx.data[0];
+               u8 ecc = msg->msg.rx.data[1];
+               u8 txerr = msg->msg.rx.data[2];
+               u8 rxerr = msg->msg.rx.data[3];
+
+               skb = alloc_can_err_skb(priv->netdev, &cf);
+               if (skb == NULL) {
+                       stats->rx_dropped++;
+                       return;
+               }
+
+               if (state != priv->old_state) {
+                       priv->old_state = state;
+
+                       switch (state & ESD_BUSSTATE_MASK) {
+                       case ESD_BUSSTATE_BUSOFF:
+                               priv->can.state = CAN_STATE_BUS_OFF;
+                               cf->can_id |= CAN_ERR_BUSOFF;
+                               can_bus_off(priv->netdev);
+                               break;
+                       case ESD_BUSSTATE_WARN:
+                               priv->can.state = CAN_STATE_ERROR_WARNING;
+                               priv->can.can_stats.error_warning++;
+                               break;
+                       case ESD_BUSSTATE_ERRPASSIVE:
+                               priv->can.state = CAN_STATE_ERROR_PASSIVE;
+                               priv->can.can_stats.error_passive++;
+                               break;
+                       default:
+                               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+                               break;
+                       }
+               } else {
+                       priv->can.can_stats.bus_error++;
+                       stats->rx_errors++;
+
+                       cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+                       switch (ecc & SJA1000_ECC_MASK) {
+                       case SJA1000_ECC_BIT:
+                               cf->data[2] |= CAN_ERR_PROT_BIT;
+                               break;
+                       case SJA1000_ECC_FORM:
+                               cf->data[2] |= CAN_ERR_PROT_FORM;
+                               break;
+                       case SJA1000_ECC_STUFF:
+                               cf->data[2] |= CAN_ERR_PROT_STUFF;
+                               break;
+                       default:
+                               cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+                               cf->data[3] = ecc & SJA1000_ECC_SEG;
+                               break;
+                       }
+
+                       /* Error occured during transmission? */
+                       if (!(ecc & SJA1000_ECC_DIR))
+                               cf->data[2] |= CAN_ERR_PROT_TX;
+
+                       if (priv->can.state == CAN_STATE_ERROR_WARNING ||
+                           priv->can.state == CAN_STATE_ERROR_PASSIVE) {
+                               cf->data[1] = (txerr > rxerr) ?
+                                       CAN_ERR_CRTL_TX_PASSIVE :
+                                       CAN_ERR_CRTL_RX_PASSIVE;
+                       }
+                       cf->data[6] = txerr;
+                       cf->data[7] = rxerr;
+               }
+
+               netif_rx(skb);
+
+               priv->bec.txerr = txerr;
+               priv->bec.rxerr = rxerr;
+
+               stats->rx_packets++;
+               stats->rx_bytes += cf->can_dlc;
+       }
+}
+
+static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
+                               struct esd_usb2_msg *msg)
+{
+       struct net_device_stats *stats = &priv->netdev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       int i;
+       u32 id;
+
+       if (!netif_device_present(priv->netdev))
+               return;
+
+       id = le32_to_cpu(msg->msg.rx.id);
+
+       if (id & ESD_EVENT) {
+               esd_usb2_rx_event(priv, msg);
+       } else {
+               skb = alloc_can_skb(priv->netdev, &cf);
+               if (skb == NULL) {
+                       stats->rx_dropped++;
+                       return;
+               }
+
+               cf->can_id = id & ESD_IDMASK;
+               cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
+
+               if (id & ESD_EXTID)
+                       cf->can_id |= CAN_EFF_FLAG;
+
+               if (msg->msg.rx.dlc & ESD_RTR) {
+                       cf->can_id |= CAN_RTR_FLAG;
+               } else {
+                       for (i = 0; i < cf->can_dlc; i++)
+                               cf->data[i] = msg->msg.rx.data[i];
+               }
+
+               netif_rx(skb);
+
+               stats->rx_packets++;
+               stats->rx_bytes += cf->can_dlc;
+       }
+
+       return;
+}
+
+static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
+                                struct esd_usb2_msg *msg)
+{
+       struct net_device_stats *stats = &priv->netdev->stats;
+       struct net_device *netdev = priv->netdev;
+       struct esd_tx_urb_context *context;
+
+       if (!netif_device_present(netdev))
+               return;
+
+       context = &priv->tx_contexts[msg->msg.txdone.hnd & (MAX_TX_URBS - 1)];
+
+       if (!msg->msg.txdone.status) {
+               stats->tx_packets++;
+               stats->tx_bytes += context->dlc;
+               can_get_echo_skb(netdev, context->echo_index);
+       } else {
+               stats->tx_errors++;
+               can_free_echo_skb(netdev, context->echo_index);
+       }
+
+       /* Release context */
+       context->echo_index = MAX_TX_URBS;
+       atomic_dec(&priv->active_tx_jobs);
+
+       netif_wake_queue(netdev);
+}
+
+static void esd_usb2_read_bulk_callback(struct urb *urb)
+{
+       struct esd_usb2 *dev = urb->context;
+       int retval;
+       int pos = 0;
+       int i;
+
+       switch (urb->status) {
+       case 0: /* success */
+               break;
+
+       case -ENOENT:
+       case -ESHUTDOWN:
+               return;
+
+       default:
+               dev_info(dev->udev->dev.parent,
+                        "Rx URB aborted (%d)\n", urb->status);
+               goto resubmit_urb;
+       }
+
+       while (pos < urb->actual_length) {
+               struct esd_usb2_msg *msg;
+
+               msg = (struct esd_usb2_msg *)(urb->transfer_buffer + pos);
+
+               switch (msg->msg.hdr.cmd) {
+               case CMD_CAN_RX:
+                       esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
+                       break;
+
+               case CMD_CAN_TX:
+                       esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net],
+                                            msg);
+                       break;
+               }
+
+               pos += msg->msg.hdr.len << 2;
+
+               if (pos > urb->actual_length) {
+                       dev_err(dev->udev->dev.parent, "format error\n");
+                       break;
+               }
+       }
+
+resubmit_urb:
+       usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
+                         urb->transfer_buffer, RX_BUFFER_SIZE,
+                         esd_usb2_read_bulk_callback, dev);
+
+       retval = usb_submit_urb(urb, GFP_ATOMIC);
+       if (retval == -ENODEV) {
+               for (i = 0; i < dev->net_count; i++) {
+                       if (dev->nets[i])
+                               netif_device_detach(dev->nets[i]->netdev);
+               }
+       } else if (retval) {
+               dev_err(dev->udev->dev.parent,
+                       "failed resubmitting read bulk urb: %d\n", retval);
+       }
+
+       return;
+}
+
+/*
+ * callback for bulk IN urb
+ */
+static void esd_usb2_write_bulk_callback(struct urb *urb)
+{
+       struct esd_tx_urb_context *context = urb->context;
+       struct esd_usb2_net_priv *priv;
+       struct esd_usb2 *dev;
+       struct net_device *netdev;
+       size_t size = sizeof(struct esd_usb2_msg);
+
+       WARN_ON(!context);
+
+       priv = context->priv;
+       netdev = priv->netdev;
+       dev = priv->usb2;
+
+       /* free up our allocated buffer */
+       usb_free_coherent(urb->dev, size,
+                         urb->transfer_buffer, urb->transfer_dma);
+
+       if (!netif_device_present(netdev))
+               return;
+
+       if (urb->status)
+               dev_info(netdev->dev.parent, "Tx URB aborted (%d)\n",
+                        urb->status);
+
+       netdev->trans_start = jiffies;
+}
+
+static ssize_t show_firmware(struct device *d,
+                            struct device_attribute *attr, char *buf)
+{
+       struct usb_interface *intf = to_usb_interface(d);
+       struct esd_usb2 *dev = usb_get_intfdata(intf);
+
+       return sprintf(buf, "%d.%d.%d\n",
+                      (dev->version >> 12) & 0xf,
+                      (dev->version >> 8) & 0xf,
+                      dev->version & 0xff);
+}
+static DEVICE_ATTR(firmware, S_IRUGO, show_firmware, NULL);
+
+static ssize_t show_hardware(struct device *d,
+                            struct device_attribute *attr, char *buf)
+{
+       struct usb_interface *intf = to_usb_interface(d);
+       struct esd_usb2 *dev = usb_get_intfdata(intf);
+
+       return sprintf(buf, "%d.%d.%d\n",
+                      (dev->version >> 28) & 0xf,
+                      (dev->version >> 24) & 0xf,
+                      (dev->version >> 16) & 0xff);
+}
+static DEVICE_ATTR(hardware, S_IRUGO, show_hardware, NULL);
+
+static ssize_t show_nets(struct device *d,
+                        struct device_attribute *attr, char *buf)
+{
+       struct usb_interface *intf = to_usb_interface(d);
+       struct esd_usb2 *dev = usb_get_intfdata(intf);
+
+       return sprintf(buf, "%d", dev->net_count);
+}
+static DEVICE_ATTR(nets, S_IRUGO, show_nets, NULL);
+
+static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg)
+{
+       int actual_length;
+
+       return usb_bulk_msg(dev->udev,
+                           usb_sndbulkpipe(dev->udev, 2),
+                           msg,
+                           msg->msg.hdr.len << 2,
+                           &actual_length,
+                           1000);
+}
+
+static int esd_usb2_wait_msg(struct esd_usb2 *dev,
+                            struct esd_usb2_msg *msg)
+{
+       int actual_length;
+
+       return usb_bulk_msg(dev->udev,
+                           usb_rcvbulkpipe(dev->udev, 1),
+                           msg,
+                           sizeof(*msg),
+                           &actual_length,
+                           1000);
+}
+
+static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
+{
+       int i, err = 0;
+
+       if (dev->rxinitdone)
+               return 0;
+
+       for (i = 0; i < MAX_RX_URBS; i++) {
+               struct urb *urb = NULL;
+               u8 *buf = NULL;
+
+               /* create a URB, and a buffer for it */
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!urb) {
+                       dev_warn(dev->udev->dev.parent,
+                                "No memory left for URBs\n");
+                       err = -ENOMEM;
+                       break;
+               }
+
+               buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
+                                        &urb->transfer_dma);
+               if (!buf) {
+                       dev_warn(dev->udev->dev.parent,
+                                "No memory left for USB buffer\n");
+                       err = -ENOMEM;
+                       goto freeurb;
+               }
+
+               usb_fill_bulk_urb(urb, dev->udev,
+                                 usb_rcvbulkpipe(dev->udev, 1),
+                                 buf, RX_BUFFER_SIZE,
+                                 esd_usb2_read_bulk_callback, dev);
+               urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+               usb_anchor_urb(urb, &dev->rx_submitted);
+
+               err = usb_submit_urb(urb, GFP_KERNEL);
+               if (err) {
+                       usb_unanchor_urb(urb);
+                       usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
+                                         urb->transfer_dma);
+               }
+
+freeurb:
+               /* Drop reference, USB core will take care of freeing it */
+               usb_free_urb(urb);
+               if (err)
+                       break;
+       }
+
+       /* Did we submit any URBs */
+       if (i == 0) {
+               dev_err(dev->udev->dev.parent, "couldn't setup read URBs\n");
+               return err;
+       }
+
+       /* Warn if we've couldn't transmit all the URBs */
+       if (i < MAX_RX_URBS) {
+               dev_warn(dev->udev->dev.parent,
+                        "rx performance may be slow\n");
+       }
+
+       dev->rxinitdone = 1;
+       return 0;
+}
+
+/*
+ * Start interface
+ */
+static int esd_usb2_start(struct esd_usb2_net_priv *priv)
+{
+       struct esd_usb2 *dev = priv->usb2;
+       struct net_device *netdev = priv->netdev;
+       struct esd_usb2_msg msg;
+       int err, i;
+
+       /*
+        * Enable all IDs
+        * The IDADD message takes up to 64 32 bit bitmasks (2048 bits).
+        * Each bit represents one 11 bit CAN identifier. A set bit
+        * enables reception of the corresponding CAN identifier. A cleared
+        * bit disabled this identifier. An additional bitmask value
+        * following the CAN 2.0A bits is used to enable reception of
+        * extended CAN frames. Only the LSB of this final mask is checked
+        * for the complete 29 bit ID range. The IDADD message also allows
+        * filter configuration for an ID subset. In this case you can add
+        * the number of the starting bitmask (0..64) to the filter.option
+        * field followed by only some bitmasks.
+        */
+       msg.msg.hdr.cmd = CMD_IDADD;
+       msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
+       msg.msg.filter.net = priv->index;
+       msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
+       for (i = 0; i < ESD_MAX_ID_SEGMENT; i++)
+               msg.msg.filter.mask[i] = cpu_to_le32(0xffffffff);
+       /* enable 29bit extended IDs */
+       msg.msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
+
+       err = esd_usb2_send_msg(dev, &msg);
+       if (err)
+               goto failed;
+
+       err = esd_usb2_setup_rx_urbs(dev);
+       if (err)
+               goto failed;
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       return 0;
+
+failed:
+       if (err == -ENODEV)
+               netif_device_detach(netdev);
+
+       dev_err(netdev->dev.parent, "couldn't start device: %d\n", err);
+
+       return err;
+}
+
+static void unlink_all_urbs(struct esd_usb2 *dev)
+{
+       struct esd_usb2_net_priv *priv;
+       int i;
+
+       usb_kill_anchored_urbs(&dev->rx_submitted);
+       for (i = 0; i < dev->net_count; i++) {
+               priv = dev->nets[i];
+               if (priv) {
+                       usb_kill_anchored_urbs(&priv->tx_submitted);
+                       atomic_set(&priv->active_tx_jobs, 0);
+
+                       for (i = 0; i < MAX_TX_URBS; i++)
+                               priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+               }
+       }
+}
+
+static int esd_usb2_open(struct net_device *netdev)
+{
+       struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+       int err;
+
+       /* common open */
+       err = open_candev(netdev);
+       if (err)
+               return err;
+
+       /* finally start device */
+       err = esd_usb2_start(priv);
+       if (err) {
+               dev_warn(netdev->dev.parent,
+                        "couldn't start device: %d\n", err);
+               close_candev(netdev);
+               return err;
+       }
+
+       priv->open_time = jiffies;
+
+       netif_start_queue(netdev);
+
+       return 0;
+}
+
+static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
+                                     struct net_device *netdev)
+{
+       struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+       struct esd_usb2 *dev = priv->usb2;
+       struct esd_tx_urb_context *context = NULL;
+       struct net_device_stats *stats = &netdev->stats;
+       struct can_frame *cf = (struct can_frame *)skb->data;
+       struct esd_usb2_msg *msg;
+       struct urb *urb;
+       u8 *buf;
+       int i, err;
+       int ret = NETDEV_TX_OK;
+       size_t size = sizeof(struct esd_usb2_msg);
+
+       if (can_dropped_invalid_skb(netdev, skb))
+               return NETDEV_TX_OK;
+
+       /* create a URB, and a buffer for it, and copy the data to the URB */
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!urb) {
+               dev_err(netdev->dev.parent, "No memory left for URBs\n");
+               stats->tx_dropped++;
+               dev_kfree_skb(skb);
+               goto nourbmem;
+       }
+
+       buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC,
+                                &urb->transfer_dma);
+       if (!buf) {
+               dev_err(netdev->dev.parent, "No memory left for USB buffer\n");
+               stats->tx_dropped++;
+               dev_kfree_skb(skb);
+               goto nobufmem;
+       }
+
+       msg = (struct esd_usb2_msg *)buf;
+
+       msg->msg.hdr.len = 3; /* minimal length */
+       msg->msg.hdr.cmd = CMD_CAN_TX;
+       msg->msg.tx.net = priv->index;
+       msg->msg.tx.dlc = cf->can_dlc;
+       msg->msg.tx.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
+
+       if (cf->can_id & CAN_RTR_FLAG)
+               msg->msg.tx.dlc |= ESD_RTR;
+
+       if (cf->can_id & CAN_EFF_FLAG)
+               msg->msg.tx.id |= cpu_to_le32(ESD_EXTID);
+
+       for (i = 0; i < cf->can_dlc; i++)
+               msg->msg.tx.data[i] = cf->data[i];
+
+       msg->msg.hdr.len += (cf->can_dlc + 3) >> 2;
+
+       for (i = 0; i < MAX_TX_URBS; i++) {
+               if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
+                       context = &priv->tx_contexts[i];
+                       break;
+               }
+       }
+
+       /*
+        * This may never happen.
+        */
+       if (!context) {
+               dev_warn(netdev->dev.parent, "couldn't find free context\n");
+               ret = NETDEV_TX_BUSY;
+               goto releasebuf;
+       }
+
+       context->priv = priv;
+       context->echo_index = i;
+       context->dlc = cf->can_dlc;
+
+       /* hnd must not be 0 - MSB is stripped in txdone handling */
+       msg->msg.tx.hnd = 0x80000000 | i; /* returned in TX done message */
+
+       usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
+                         msg->msg.hdr.len << 2,
+                         esd_usb2_write_bulk_callback, context);
+
+       urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+       usb_anchor_urb(urb, &priv->tx_submitted);
+
+       can_put_echo_skb(skb, netdev, context->echo_index);
+
+       atomic_inc(&priv->active_tx_jobs);
+
+       /* Slow down tx path */
+       if (atomic_read(&priv->active_tx_jobs) >= MAX_TX_URBS)
+               netif_stop_queue(netdev);
+
+       err = usb_submit_urb(urb, GFP_ATOMIC);
+       if (err) {
+               can_free_echo_skb(netdev, context->echo_index);
+
+               atomic_dec(&priv->active_tx_jobs);
+               usb_unanchor_urb(urb);
+
+               stats->tx_dropped++;
+
+               if (err == -ENODEV)
+                       netif_device_detach(netdev);
+               else
+                       dev_warn(netdev->dev.parent, "failed tx_urb %d\n", err);
+
+               goto releasebuf;
+       }
+
+       netdev->trans_start = jiffies;
+
+       /*
+        * Release our reference to this URB, the USB core will eventually free
+        * it entirely.
+        */
+       usb_free_urb(urb);
+
+       return NETDEV_TX_OK;
+
+releasebuf:
+       usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
+
+nobufmem:
+       usb_free_urb(urb);
+
+nourbmem:
+       return ret;
+}
+
+static int esd_usb2_close(struct net_device *netdev)
+{
+       struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+       struct esd_usb2_msg msg;
+       int i;
+
+       /* Disable all IDs (see esd_usb2_start()) */
+       msg.msg.hdr.cmd = CMD_IDADD;
+       msg.msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
+       msg.msg.filter.net = priv->index;
+       msg.msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
+       for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++)
+               msg.msg.filter.mask[i] = 0;
+       if (esd_usb2_send_msg(priv->usb2, &msg) < 0)
+               dev_err(netdev->dev.parent, "sending idadd message failed\n");
+
+       /* set CAN controller to reset mode */
+       msg.msg.hdr.len = 2;
+       msg.msg.hdr.cmd = CMD_SETBAUD;
+       msg.msg.setbaud.net = priv->index;
+       msg.msg.setbaud.rsvd = 0;
+       msg.msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
+       if (esd_usb2_send_msg(priv->usb2, &msg) < 0)
+               dev_err(netdev->dev.parent, "sending setbaud message failed\n");
+
+       priv->can.state = CAN_STATE_STOPPED;
+
+       netif_stop_queue(netdev);
+
+       close_candev(netdev);
+
+       priv->open_time = 0;
+
+       return 0;
+}
+
+static const struct net_device_ops esd_usb2_netdev_ops = {
+       .ndo_open = esd_usb2_open,
+       .ndo_stop = esd_usb2_close,
+       .ndo_start_xmit = esd_usb2_start_xmit,
+};
+
+static struct can_bittiming_const esd_usb2_bittiming_const = {
+       .name = "esd_usb2",
+       .tseg1_min = ESD_USB2_TSEG1_MIN,
+       .tseg1_max = ESD_USB2_TSEG1_MAX,
+       .tseg2_min = ESD_USB2_TSEG2_MIN,
+       .tseg2_max = ESD_USB2_TSEG2_MAX,
+       .sjw_max = ESD_USB2_SJW_MAX,
+       .brp_min = ESD_USB2_BRP_MIN,
+       .brp_max = ESD_USB2_BRP_MAX,
+       .brp_inc = ESD_USB2_BRP_INC,
+};
+
+static int esd_usb2_set_bittiming(struct net_device *netdev)
+{
+       struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+       struct can_bittiming *bt = &priv->can.bittiming;
+       struct esd_usb2_msg msg;
+       u32 canbtr;
+
+       canbtr = ESD_USB2_UBR;
+       canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1);
+       canbtr |= ((bt->sjw - 1) & (ESD_USB2_SJW_MAX - 1))
+               << ESD_USB2_SJW_SHIFT;
+       canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1)
+                  & (ESD_USB2_TSEG1_MAX - 1))
+               << ESD_USB2_TSEG1_SHIFT;
+       canbtr |= ((bt->phase_seg2 - 1) & (ESD_USB2_TSEG2_MAX - 1))
+               << ESD_USB2_TSEG2_SHIFT;
+       if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+               canbtr |= ESD_USB2_3_SAMPLES;
+
+       msg.msg.hdr.len = 2;
+       msg.msg.hdr.cmd = CMD_SETBAUD;
+       msg.msg.setbaud.net = priv->index;
+       msg.msg.setbaud.rsvd = 0;
+       msg.msg.setbaud.baud = cpu_to_le32(canbtr);
+
+       dev_info(netdev->dev.parent, "setting BTR=%#x\n", canbtr);
+
+       return esd_usb2_send_msg(priv->usb2, &msg);
+}
+
+static int esd_usb2_get_berr_counter(const struct net_device *netdev,
+                                    struct can_berr_counter *bec)
+{
+       struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+
+       bec->txerr = priv->bec.txerr;
+       bec->rxerr = priv->bec.rxerr;
+
+       return 0;
+}
+
+static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+       struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+
+       if (!priv->open_time)
+               return -EINVAL;
+
+       switch (mode) {
+       case CAN_MODE_START:
+               netif_wake_queue(netdev);
+               break;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
+{
+       struct esd_usb2 *dev = usb_get_intfdata(intf);
+       struct net_device *netdev;
+       struct esd_usb2_net_priv *priv;
+       int err = 0;
+       int i;
+
+       netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
+       if (!netdev) {
+               dev_err(&intf->dev, "couldn't alloc candev\n");
+               err = -ENOMEM;
+               goto done;
+       }
+
+       priv = netdev_priv(netdev);
+
+       init_usb_anchor(&priv->tx_submitted);
+       atomic_set(&priv->active_tx_jobs, 0);
+
+       for (i = 0; i < MAX_TX_URBS; i++)
+               priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+
+       priv->usb2 = dev;
+       priv->netdev = netdev;
+       priv->index = index;
+
+       priv->can.state = CAN_STATE_STOPPED;
+       priv->can.clock.freq = ESD_USB2_CAN_CLOCK;
+       priv->can.bittiming_const = &esd_usb2_bittiming_const;
+       priv->can.do_set_bittiming = esd_usb2_set_bittiming;
+       priv->can.do_set_mode = esd_usb2_set_mode;
+       priv->can.do_get_berr_counter = esd_usb2_get_berr_counter;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+       netdev->flags |= IFF_ECHO; /* we support local echo */
+
+       netdev->netdev_ops = &esd_usb2_netdev_ops;
+
+       SET_NETDEV_DEV(netdev, &intf->dev);
+
+       err = register_candev(netdev);
+       if (err) {
+               dev_err(&intf->dev,
+                       "couldn't register CAN device: %d\n", err);
+               free_candev(netdev);
+               err = -ENOMEM;
+               goto done;
+       }
+
+       dev->nets[index] = priv;
+       dev_info(netdev->dev.parent, "device %s registered\n", netdev->name);
+
+done:
+       return err;
+}
+
+/*
+ * probe function for new USB2 devices
+ *
+ * check version information and number of available
+ * CAN interfaces
+ */
+static int esd_usb2_probe(struct usb_interface *intf,
+                        const struct usb_device_id *id)
+{
+       struct esd_usb2 *dev;
+       struct esd_usb2_msg msg;
+       int i, err;
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev) {
+               err = -ENOMEM;
+               goto done;
+       }
+
+       dev->udev = interface_to_usbdev(intf);
+
+       init_usb_anchor(&dev->rx_submitted);
+
+       usb_set_intfdata(intf, dev);
+
+       /* query number of CAN interfaces (nets) */
+       msg.msg.hdr.cmd = CMD_VERSION;
+       msg.msg.hdr.len = 2;
+       msg.msg.version.rsvd = 0;
+       msg.msg.version.flags = 0;
+       msg.msg.version.drv_version = 0;
+
+       err = esd_usb2_send_msg(dev, &msg);
+       if (err < 0) {
+               dev_err(&intf->dev, "sending version message failed\n");
+               goto free_dev;
+       }
+
+       err = esd_usb2_wait_msg(dev, &msg);
+       if (err < 0) {
+               dev_err(&intf->dev, "no version message answer\n");
+               goto free_dev;
+       }
+
+       dev->net_count = (int)msg.msg.version_reply.nets;
+       dev->version = le32_to_cpu(msg.msg.version_reply.version);
+
+       if (device_create_file(&intf->dev, &dev_attr_firmware))
+               dev_err(&intf->dev,
+                       "Couldn't create device file for firmware\n");
+
+       if (device_create_file(&intf->dev, &dev_attr_hardware))
+               dev_err(&intf->dev,
+                       "Couldn't create device file for hardware\n");
+
+       if (device_create_file(&intf->dev, &dev_attr_nets))
+               dev_err(&intf->dev,
+                       "Couldn't create device file for nets\n");
+
+       /* do per device probing */
+       for (i = 0; i < dev->net_count; i++)
+               esd_usb2_probe_one_net(intf, i);
+
+       return 0;
+
+free_dev:
+       kfree(dev);
+done:
+       return err;
+}
+
+/*
+ * called by the usb core when the device is removed from the system
+ */
+static void esd_usb2_disconnect(struct usb_interface *intf)
+{
+       struct esd_usb2 *dev = usb_get_intfdata(intf);
+       struct net_device *netdev;
+       int i;
+
+       device_remove_file(&intf->dev, &dev_attr_firmware);
+       device_remove_file(&intf->dev, &dev_attr_hardware);
+       device_remove_file(&intf->dev, &dev_attr_nets);
+
+       usb_set_intfdata(intf, NULL);
+
+       if (dev) {
+               for (i = 0; i < dev->net_count; i++) {
+                       if (dev->nets[i]) {
+                               netdev = dev->nets[i]->netdev;
+                               unregister_netdev(netdev);
+                               free_candev(netdev);
+                       }
+               }
+               unlink_all_urbs(dev);
+       }
+}
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver esd_usb2_driver = {
+       .name = "esd_usb2",
+       .probe = esd_usb2_probe,
+       .disconnect = esd_usb2_disconnect,
+       .id_table = esd_usb2_table,
+};
+
+static int __init esd_usb2_init(void)
+{
+       int err;
+
+       /* register this driver with the USB subsystem */
+       err = usb_register(&esd_usb2_driver);
+
+       if (err) {
+               err("usb_register failed. Error number %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+module_init(esd_usb2_init);
+
+static void __exit esd_usb2_exit(void)
+{
+       /* deregister this driver with the USB subsystem */
+       usb_deregister(&esd_usb2_driver);
+}
+module_exit(esd_usb2_exit);
index 5ecf0bcf372d5fec31c71b3acc2b59fb1f7a577c..09610323a948cab243d083c1a8435853b8893184 100644 (file)
@@ -40,9 +40,9 @@
 
 #include "cnic_if.h"
 #include "bnx2.h"
-#include "bnx2x_reg.h"
-#include "bnx2x_fw_defs.h"
-#include "bnx2x_hsi.h"
+#include "bnx2x/bnx2x_reg.h"
+#include "bnx2x/bnx2x_fw_defs.h"
+#include "bnx2x/bnx2x_hsi.h"
 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
 #include "cnic.h"
index 95a8ba0759f1aa61bf13517db07d94f644897443..427c451be1a78d64b6ed7ca4e710ce4fddee2192 100644 (file)
@@ -679,14 +679,6 @@ int t3_seeprom_wp(struct adapter *adapter, int enable)
        return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
 }
 
-/*
- * Convert a character holding a hex digit to a number.
- */
-static unsigned int hex2int(unsigned char c)
-{
-       return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
-}
-
 /**
  *     get_vpd_params - read VPD parameters from VPD EEPROM
  *     @adapter: adapter to read
@@ -727,15 +719,15 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
                p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
                p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
        } else {
-               p->port_type[0] = hex2int(vpd.port0_data[0]);
-               p->port_type[1] = hex2int(vpd.port1_data[0]);
+               p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
+               p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
                p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
                p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
        }
 
        for (i = 0; i < 6; i++)
-               p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
-                                hex2int(vpd.na_data[2 * i + 1]);
+               p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 +
+                                hex_to_bin(vpd.na_data[2 * i + 1]);
        return 0;
 }
 
index 4769c1c58e8586c1f431e203277ca785dbaec416..6e562c0dad7dee0a309b87a0e7cec03a91cf69ec 100644 (file)
@@ -482,7 +482,8 @@ struct adapter {
        struct pci_dev *pdev;
        struct device *pdev_dev;
        unsigned long registered_device_map;
-       unsigned long flags;
+       unsigned int fn;
+       unsigned int flags;
 
        const char *name;
        int msg_enable;
index 0af6d6750a9d33c8f7696cb7ce914dfb078a5e88..c327527fbbc854d6cf4f63d0457221cf434869e6 100644 (file)
@@ -67,7 +67,7 @@
 #include "t4fw_api.h"
 #include "l2t.h"
 
-#define DRV_VERSION "1.0.0-ko"
+#define DRV_VERSION "1.3.0-ko"
 #define DRV_DESC "Chelsio T4 Network Driver"
 
 /*
@@ -171,10 +171,20 @@ enum {
                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
 
-#define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 }
+#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
 
 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
-       CH_DEVICE(0xa000),  /* PE10K */
+       CH_DEVICE(0xa000, 0),  /* PE10K */
+       CH_DEVICE(0x4001, 0),
+       CH_DEVICE(0x4002, 0),
+       CH_DEVICE(0x4003, 0),
+       CH_DEVICE(0x4004, 0),
+       CH_DEVICE(0x4005, 0),
+       CH_DEVICE(0x4006, 0),
+       CH_DEVICE(0x4007, 0),
+       CH_DEVICE(0x4008, 0),
+       CH_DEVICE(0x4009, 0),
+       CH_DEVICE(0x400a, 0),
        { 0, }
 };
 
@@ -314,12 +324,13 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
        int uc_cnt = netdev_uc_count(dev);
        int mc_cnt = netdev_mc_count(dev);
        const struct port_info *pi = netdev_priv(dev);
+       unsigned int mb = pi->adapter->fn;
 
        /* first do the secondary unicast addresses */
        netdev_for_each_uc_addr(ha, dev) {
                addr[naddr++] = ha->addr;
                if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
-                       ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
+                       ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
                                        naddr, addr, filt_idx, &uhash, sleep);
                        if (ret < 0)
                                return ret;
@@ -333,7 +344,7 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
        netdev_for_each_mc_addr(ha, dev) {
                addr[naddr++] = ha->addr;
                if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
-                       ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
+                       ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
                                        naddr, addr, filt_idx, &mhash, sleep);
                        if (ret < 0)
                                return ret;
@@ -343,7 +354,7 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
                }
        }
 
-       return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0,
+       return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
                                uhash | mhash, sleep);
 }
 
@@ -358,7 +369,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
 
        ret = set_addr_filters(dev, sleep_ok);
        if (ret == 0)
-               ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
+               ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
                                    (dev->flags & IFF_PROMISC) ? 1 : 0,
                                    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
                                    sleep_ok);
@@ -375,15 +386,16 @@ static int link_start(struct net_device *dev)
 {
        int ret;
        struct port_info *pi = netdev_priv(dev);
+       unsigned int mb = pi->adapter->fn;
 
        /*
         * We do not set address filters and promiscuity here, the stack does
         * that step explicitly.
         */
-       ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
+       ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
                            pi->vlan_grp != NULL, true);
        if (ret == 0) {
-               ret = t4_change_mac(pi->adapter, 0, pi->viid,
+               ret = t4_change_mac(pi->adapter, mb, pi->viid,
                                    pi->xact_addr_filt, dev->dev_addr, true,
                                    true);
                if (ret >= 0) {
@@ -392,9 +404,10 @@ static int link_start(struct net_device *dev)
                }
        }
        if (ret == 0)
-               ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg);
+               ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
+                                   &pi->link_cfg);
        if (ret == 0)
-               ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true);
+               ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
        return ret;
 }
 
@@ -618,8 +631,8 @@ static int write_rss(const struct port_info *pi, const u16 *queues)
        for (i = 0; i < pi->rss_size; i++, queues++)
                rss[i] = q[*queues].rspq.abs_id;
 
-       err = t4_config_rss_range(pi->adapter, 0, pi->viid, 0, pi->rss_size,
-                                 rss, pi->rss_size);
+       err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
+                                 pi->rss_size, rss, pi->rss_size);
        kfree(rss);
        return err;
 }
@@ -1307,16 +1320,18 @@ static int restart_autoneg(struct net_device *dev)
                return -EAGAIN;
        if (p->link_cfg.autoneg != AUTONEG_ENABLE)
                return -EINVAL;
-       t4_restart_aneg(p->adapter, 0, p->tx_chan);
+       t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
        return 0;
 }
 
 static int identify_port(struct net_device *dev, u32 data)
 {
+       struct adapter *adap = netdev2adap(dev);
+
        if (data == 0)
                data = 2;     /* default to 2 seconds */
 
-       return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid,
+       return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid,
                                data * 5);
 }
 
@@ -1456,7 +1471,8 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        lc->autoneg = cmd->autoneg;
 
        if (netif_running(dev))
-               return t4_link_start(p->adapter, 0, p->tx_chan, lc);
+               return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+                                    lc);
        return 0;
 }
 
@@ -1488,7 +1504,8 @@ static int set_pauseparam(struct net_device *dev,
        if (epause->tx_pause)
                lc->requested_fc |= PAUSE_TX;
        if (netif_running(dev))
-               return t4_link_start(p->adapter, 0, p->tx_chan, lc);
+               return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+                                    lc);
        return 0;
 }
 
@@ -1620,7 +1637,8 @@ static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
                        v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
                            FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
                            FW_PARAMS_PARAM_YZ(q->cntxt_id);
-                       err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx);
+                       err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
+                                           &new_idx);
                        if (err)
                                return err;
                }
@@ -1808,12 +1826,14 @@ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        return err;
 }
 
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
 static int set_tso(struct net_device *dev, u32 value)
 {
        if (value)
-               dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+               dev->features |= TSO_FLAGS;
        else
-               dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+               dev->features &= ~TSO_FLAGS;
        return 0;
 }
 
@@ -2494,9 +2514,11 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        lli.adapter_type = adap->params.rev;
        lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
        lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
-                       t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
+                       t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
+                       (adap->fn * 4));
        lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
-                       t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
+                       t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
+                       (adap->fn * 4));
        lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
        lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
        lli.fw_vers = adap->params.fw_vers;
@@ -2713,7 +2735,7 @@ static int cxgb_close(struct net_device *dev)
 
        netif_tx_stop_all_queues(dev);
        netif_carrier_off(dev);
-       return t4_enable_vi(adapter, 0, pi->viid, false, false);
+       return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
 }
 
 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
@@ -2760,6 +2782,7 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
 
 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 {
+       unsigned int mbox;
        int ret = 0, prtad, devad;
        struct port_info *pi = netdev_priv(dev);
        struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
@@ -2782,11 +2805,12 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
                } else
                        return -EINVAL;
 
+               mbox = pi->adapter->fn;
                if (cmd == SIOCGMIIREG)
-                       ret = t4_mdio_rd(pi->adapter, 0, prtad, devad,
+                       ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
                                         data->reg_num, &data->val_out);
                else
-                       ret = t4_mdio_wr(pi->adapter, 0, prtad, devad,
+                       ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
                                         data->reg_num, data->val_in);
                break;
        default:
@@ -2808,8 +2832,8 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
 
        if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
                return -EINVAL;
-       ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, -1,
-                           true);
+       ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
+                           -1, -1, -1, true);
        if (!ret)
                dev->mtu = new_mtu;
        return ret;
@@ -2824,8 +2848,8 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
        if (!is_valid_ether_addr(addr->sa_data))
                return -EINVAL;
 
-       ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt,
-                           addr->sa_data, true, true);
+       ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
+                           pi->xact_addr_filt, addr->sa_data, true, true);
        if (ret < 0)
                return ret;
 
@@ -2839,8 +2863,8 @@ static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
        struct port_info *pi = netdev_priv(dev);
 
        pi->vlan_grp = grp;
-       t4_set_rxmode(pi->adapter, 0, pi->viid, -1, -1, -1, -1, grp != NULL,
-                     true);
+       t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
+                     grp != NULL, true);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2897,6 +2921,21 @@ static void setup_memwin(struct adapter *adap)
        t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
                     (bar0 + MEMWIN2_BASE) | BIR(0) |
                     WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
+       if (adap->vres.ocq.size) {
+               unsigned int start, sz_kb;
+
+               start = pci_resource_start(adap->pdev, 2) +
+                       OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
+               sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
+               t4_write_reg(adap,
+                            PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
+                            start | BIR(1) | WINDOW(ilog2(sz_kb)));
+               t4_write_reg(adap,
+                            PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
+                            adap->vres.ocq.start);
+               t4_read_reg(adap,
+                           PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
+       }
 }
 
 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
@@ -2909,7 +2948,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
        c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
                               FW_CMD_REQUEST | FW_CMD_READ);
        c->retval_len16 = htonl(FW_LEN16(*c));
-       ret = t4_wr_mbox(adap, 0, c, sizeof(*c), c);
+       ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
        if (ret < 0)
                return ret;
 
@@ -2925,37 +2964,33 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
        }
        c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
                               FW_CMD_REQUEST | FW_CMD_WRITE);
-       ret = t4_wr_mbox(adap, 0, c, sizeof(*c), NULL);
+       ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
        if (ret < 0)
                return ret;
 
-       ret = t4_config_glbl_rss(adap, 0,
+       ret = t4_config_glbl_rss(adap, adap->fn,
                                 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
                                 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
                                 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
        if (ret < 0)
                return ret;
 
-       ret = t4_cfg_pfvf(adap, 0, 0, 0, MAX_EGRQ, 64, MAX_INGQ, 0, 0, 4,
-                         0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+       ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
+                         0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
        if (ret < 0)
                return ret;
 
        t4_sge_init(adap);
 
-       /* get basic stuff going */
-       ret = t4_early_init(adap, 0);
-       if (ret < 0)
-               return ret;
-
        /* tweak some settings */
        t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
        t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
        t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
        v = t4_read_reg(adap, TP_PIO_DATA);
        t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
-       setup_memwin(adap);
-       return 0;
+
+       /* get basic stuff going */
+       return t4_early_init(adap, adap->fn);
 }
 
 /*
@@ -2983,7 +3018,7 @@ static int adap_init0(struct adapter *adap)
                return ret;
 
        /* contact FW, request master */
-       ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state);
+       ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
        if (ret < 0) {
                dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
                        ret);
@@ -2991,7 +3026,7 @@ static int adap_init0(struct adapter *adap)
        }
 
        /* reset device */
-       ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST);
+       ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
        if (ret < 0)
                goto bye;
 
@@ -3007,7 +3042,7 @@ static int adap_init0(struct adapter *adap)
         FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
 
        params[0] = FW_PARAM_DEV(CCLK);
-       ret = t4_query_params(adap, 0, 0, 0, 1, params, val);
+       ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
        if (ret < 0)
                goto bye;
        adap->params.vpd.cclk = val[0];
@@ -3018,14 +3053,15 @@ static int adap_init0(struct adapter *adap)
 
 #define FW_PARAM_PFVF(param) \
        (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
-        FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
+        FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
+        FW_PARAMS_PARAM_Y(adap->fn))
 
        params[0] = FW_PARAM_DEV(PORTVEC);
        params[1] = FW_PARAM_PFVF(L2T_START);
        params[2] = FW_PARAM_PFVF(L2T_END);
        params[3] = FW_PARAM_PFVF(FILTER_START);
        params[4] = FW_PARAM_PFVF(FILTER_END);
-       ret = t4_query_params(adap, 0, 0, 0, 5, params, val);
+       ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val);
        if (ret < 0)
                goto bye;
        port_vec = val[0];
@@ -3040,7 +3076,8 @@ static int adap_init0(struct adapter *adap)
                params[3] = FW_PARAM_PFVF(TDDP_START);
                params[4] = FW_PARAM_PFVF(TDDP_END);
                params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
-               ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+               ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
+                                     val);
                if (ret < 0)
                        goto bye;
                adap->tids.ntids = val[0];
@@ -3059,7 +3096,8 @@ static int adap_init0(struct adapter *adap)
                params[3] = FW_PARAM_PFVF(RQ_END);
                params[4] = FW_PARAM_PFVF(PBL_START);
                params[5] = FW_PARAM_PFVF(PBL_END);
-               ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+               ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
+                                     val);
                if (ret < 0)
                        goto bye;
                adap->vres.stag.start = val[0];
@@ -3073,18 +3111,24 @@ static int adap_init0(struct adapter *adap)
                params[1] = FW_PARAM_PFVF(SQRQ_END);
                params[2] = FW_PARAM_PFVF(CQ_START);
                params[3] = FW_PARAM_PFVF(CQ_END);
-               ret = t4_query_params(adap, 0, 0, 0, 4, params, val);
+               params[4] = FW_PARAM_PFVF(OCQ_START);
+               params[5] = FW_PARAM_PFVF(OCQ_END);
+               ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
+                                     val);
                if (ret < 0)
                        goto bye;
                adap->vres.qp.start = val[0];
                adap->vres.qp.size = val[1] - val[0] + 1;
                adap->vres.cq.start = val[2];
                adap->vres.cq.size = val[3] - val[2] + 1;
+               adap->vres.ocq.start = val[4];
+               adap->vres.ocq.size = val[5] - val[4] + 1;
        }
        if (c.iscsicaps) {
                params[0] = FW_PARAM_PFVF(ISCSI_START);
                params[1] = FW_PARAM_PFVF(ISCSI_END);
-               ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
+               ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
+                                     val);
                if (ret < 0)
                        goto bye;
                adap->vres.iscsi.start = val[0];
@@ -3122,7 +3166,7 @@ static int adap_init0(struct adapter *adap)
 
                        /* VF numbering starts at 1! */
                        for (vf = 1; vf <= num_vf[pf]; vf++) {
-                               ret = t4_cfg_pfvf(adap, 0, pf, vf,
+                               ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
                                                  VFRES_NEQ, VFRES_NETHCTRL,
                                                  VFRES_NIQFLINT, VFRES_NIQ,
                                                  VFRES_TC, VFRES_NVI,
@@ -3139,6 +3183,7 @@ static int adap_init0(struct adapter *adap)
        }
 #endif
 
+       setup_memwin(adap);
        return 0;
 
        /*
@@ -3147,7 +3192,7 @@ static int adap_init0(struct adapter *adap)
         * commands.
         */
 bye:   if (ret != -ETIMEDOUT && ret != -EIO)
-               t4_fw_bye(adap, 0);
+               t4_fw_bye(adap, adap->fn);
        return ret;
 }
 
@@ -3203,7 +3248,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
 
        if (t4_wait_dev_ready(adap) < 0)
                return PCI_ERS_RESULT_DISCONNECT;
-       if (t4_fw_hello(adap, 0, 0, MASTER_MUST, NULL))
+       if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
                return PCI_ERS_RESULT_DISCONNECT;
        adap->flags |= FW_OK;
        if (adap_init1(adap, &c))
@@ -3212,7 +3257,8 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
        for_each_port(adap, i) {
                struct port_info *p = adap2pinfo(adap, i);
 
-               ret = t4_alloc_vi(adap, 0, p->tx_chan, 0, 0, 1, NULL, NULL);
+               ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
+                                 NULL, NULL);
                if (ret < 0)
                        return PCI_ERS_RESULT_DISCONNECT;
                p->viid = ret;
@@ -3221,6 +3267,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
 
        t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
                     adap->params.b_wnd);
+       setup_memwin(adap);
        if (cxgb_up(adap))
                return PCI_ERS_RESULT_DISCONNECT;
        return PCI_ERS_RESULT_RECOVERED;
@@ -3516,10 +3563,10 @@ static void free_some_resources(struct adapter *adapter)
                        free_netdev(adapter->port[i]);
                }
        if (adapter->flags & FW_OK)
-               t4_fw_bye(adapter, 0);
+               t4_fw_bye(adapter, adapter->fn);
 }
 
-#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
                   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
 
 static int __devinit init_one(struct pci_dev *pdev,
@@ -3539,9 +3586,9 @@ static int __devinit init_one(struct pci_dev *pdev,
                return err;
        }
 
-       /* We control everything through PF 0 */
+       /* We control everything through one PF */
        func = PCI_FUNC(pdev->devfn);
-       if (func > 0) {
+       if (func != ent->driver_data) {
                pci_save_state(pdev);        /* to restore SR-IOV later */
                goto sriov;
        }
@@ -3587,6 +3634,7 @@ static int __devinit init_one(struct pci_dev *pdev,
 
        adapter->pdev = pdev;
        adapter->pdev_dev = &pdev->dev;
+       adapter->fn = func;
        adapter->name = pci_name(pdev);
        adapter->msg_enable = dflt_msg_enable;
        memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
@@ -3625,7 +3673,7 @@ static int __devinit init_one(struct pci_dev *pdev,
                netif_tx_stop_all_queues(netdev);
                netdev->irq = pdev->irq;
 
-               netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+               netdev->features |= NETIF_F_SG | TSO_FLAGS;
                netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
                netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
                netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
@@ -3638,7 +3686,7 @@ static int __devinit init_one(struct pci_dev *pdev,
        pci_set_drvdata(pdev, adapter);
 
        if (adapter->flags & FW_OK) {
-               err = t4_port_init(adapter, 0, 0, 0);
+               err = t4_port_init(adapter, func, func, 0);
                if (err)
                        goto out_free_dev;
        }
index 0dc0866df1bfe3caca3c9f09c3237140d8f92300..85d74e751ce00b33d88971f030a73771d2f7f2a4 100644 (file)
@@ -187,8 +187,12 @@ struct cxgb4_virt_res {                      /* virtualized HW resources */
        struct cxgb4_range pbl;
        struct cxgb4_range qp;
        struct cxgb4_range cq;
+       struct cxgb4_range ocq;
 };
 
+#define OCQ_WIN_OFFSET(pdev, vres) \
+       (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size))
+
 /*
  * Block of information the LLD provides to ULDs attaching to a device.
  */
index 4388f72d586af4ac23356fadc7f0a4881496f944..bf38cfc575655650b46038f9722170c801f75070 100644 (file)
@@ -931,23 +931,23 @@ out_free: dev_kfree_skb(skb);
 
        ssi = skb_shinfo(skb);
        if (ssi->gso_size) {
-               struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+               struct cpl_tx_pkt_lso *lso = (void *)wr;
                bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
                int l3hdr_len = skb_network_header_len(skb);
                int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
 
                wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
                                       FW_WR_IMMDLEN(sizeof(*lso)));
-               lso->lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
-                                     LSO_FIRST_SLICE | LSO_LAST_SLICE |
-                                     LSO_IPV6(v6) |
-                                     LSO_ETHHDR_LEN(eth_xtra_len / 4) |
-                                     LSO_IPHDR_LEN(l3hdr_len / 4) |
-                                     LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
-               lso->ipid_ofst = htons(0);
-               lso->mss = htons(ssi->gso_size);
-               lso->seqno_offset = htonl(0);
-               lso->len = htonl(skb->len);
+               lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
+                                       LSO_FIRST_SLICE | LSO_LAST_SLICE |
+                                       LSO_IPV6(v6) |
+                                       LSO_ETHHDR_LEN(eth_xtra_len / 4) |
+                                       LSO_IPHDR_LEN(l3hdr_len / 4) |
+                                       LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+               lso->c.ipid_ofst = htons(0);
+               lso->c.mss = htons(ssi->gso_size);
+               lso->c.seqno_offset = htonl(0);
+               lso->c.len = htonl(skb->len);
                cpl = (void *)(lso + 1);
                cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
                        TXPKT_IPHDR_LEN(l3hdr_len) |
@@ -1593,14 +1593,15 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
 
        if (csum_ok && (pi->rx_offload & RX_CSO) &&
            (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
-               if (!pkt->ip_frag)
+               if (!pkt->ip_frag) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
-               else {
+                       rxq->stats.rx_cso++;
+               } else if (pkt->l2info & htonl(RXF_IP)) {
                        __sum16 c = (__force __sum16)pkt->csum;
                        skb->csum = csum_unfold(c);
                        skb->ip_summed = CHECKSUM_COMPLETE;
+                       rxq->stats.rx_cso++;
                }
-               rxq->stats.rx_cso++;
        } else
                skb->ip_summed = CHECKSUM_NONE;
 
@@ -1998,7 +1999,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
        memset(&c, 0, sizeof(c));
        c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
                            FW_CMD_WRITE | FW_CMD_EXEC |
-                           FW_IQ_CMD_PFN(0) | FW_IQ_CMD_VFN(0));
+                           FW_IQ_CMD_PFN(adap->fn) | FW_IQ_CMD_VFN(0));
        c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
                                 FW_LEN16(c));
        c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
@@ -2030,7 +2031,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                c.fl0addr = cpu_to_be64(fl->addr);
        }
 
-       ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+       ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
        if (ret)
                goto err;
 
@@ -2109,7 +2110,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
        memset(&c, 0, sizeof(c));
        c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
                            FW_CMD_WRITE | FW_CMD_EXEC |
-                           FW_EQ_ETH_CMD_PFN(0) | FW_EQ_ETH_CMD_VFN(0));
+                           FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
        c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
                                 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
        c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
@@ -2122,7 +2123,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
                                  FW_EQ_ETH_CMD_EQSIZE(nentries));
        c.eqaddr = cpu_to_be64(txq->q.phys_addr);
 
-       ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+       ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
        if (ret) {
                kfree(txq->q.sdesc);
                txq->q.sdesc = NULL;
@@ -2159,7 +2160,8 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
 
        c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
                            FW_CMD_WRITE | FW_CMD_EXEC |
-                           FW_EQ_CTRL_CMD_PFN(0) | FW_EQ_CTRL_CMD_VFN(0));
+                           FW_EQ_CTRL_CMD_PFN(adap->fn) |
+                           FW_EQ_CTRL_CMD_VFN(0));
        c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
                                 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
        c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
@@ -2173,7 +2175,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
                                  FW_EQ_CTRL_CMD_EQSIZE(nentries));
        c.eqaddr = cpu_to_be64(txq->q.phys_addr);
 
-       ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+       ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
        if (ret) {
                dma_free_coherent(adap->pdev_dev,
                                  nentries * sizeof(struct tx_desc),
@@ -2209,7 +2211,8 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
        memset(&c, 0, sizeof(c));
        c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
                            FW_CMD_WRITE | FW_CMD_EXEC |
-                           FW_EQ_OFLD_CMD_PFN(0) | FW_EQ_OFLD_CMD_VFN(0));
+                           FW_EQ_OFLD_CMD_PFN(adap->fn) |
+                           FW_EQ_OFLD_CMD_VFN(0));
        c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
                                 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
        c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
@@ -2221,7 +2224,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
                                  FW_EQ_OFLD_CMD_EQSIZE(nentries));
        c.eqaddr = cpu_to_be64(txq->q.phys_addr);
 
-       ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+       ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
        if (ret) {
                kfree(txq->q.sdesc);
                txq->q.sdesc = NULL;
@@ -2257,8 +2260,8 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
        unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
 
        adap->sge.ingr_map[rq->cntxt_id] = NULL;
-       t4_iq_free(adap, 0, 0, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id,
-                  0xffff);
+       t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
+                  rq->cntxt_id, fl_id, 0xffff);
        dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
                          rq->desc, rq->phys_addr);
        netif_napi_del(&rq->napi);
@@ -2295,7 +2298,8 @@ void t4_free_sge_resources(struct adapter *adap)
                if (eq->rspq.desc)
                        free_rspq_fl(adap, &eq->rspq, &eq->fl);
                if (etq->q.desc) {
-                       t4_eth_eq_free(adap, 0, 0, 0, etq->q.cntxt_id);
+                       t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
+                                      etq->q.cntxt_id);
                        free_tx_desc(adap, &etq->q, etq->q.in_use, true);
                        kfree(etq->q.sdesc);
                        free_txq(adap, &etq->q);
@@ -2318,7 +2322,8 @@ void t4_free_sge_resources(struct adapter *adap)
 
                if (q->q.desc) {
                        tasklet_kill(&q->qresume_tsk);
-                       t4_ofld_eq_free(adap, 0, 0, 0, q->q.cntxt_id);
+                       t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
+                                       q->q.cntxt_id);
                        free_tx_desc(adap, &q->q, q->q.in_use, false);
                        kfree(q->q.sdesc);
                        __skb_queue_purge(&q->sendq);
@@ -2332,7 +2337,8 @@ void t4_free_sge_resources(struct adapter *adap)
 
                if (cq->q.desc) {
                        tasklet_kill(&cq->qresume_tsk);
-                       t4_ctrl_eq_free(adap, 0, 0, 0, cq->q.cntxt_id);
+                       t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
+                                       cq->q.cntxt_id);
                        __skb_queue_purge(&cq->sendq);
                        free_txq(adap, &cq->q);
                }
@@ -2400,6 +2406,7 @@ void t4_sge_stop(struct adapter *adap)
  */
 void t4_sge_init(struct adapter *adap)
 {
+       unsigned int i, v;
        struct sge *s = &adap->sge;
        unsigned int fl_align_log = ilog2(FL_ALIGN);
 
@@ -2408,8 +2415,10 @@ void t4_sge_init(struct adapter *adap)
                         INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
                         RXPKTCPLMODE |
                         (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
-       t4_set_reg_field(adap, SGE_HOST_PAGE_SIZE, HOSTPAGESIZEPF0_MASK,
-                        HOSTPAGESIZEPF0(PAGE_SHIFT - 10));
+
+       for (i = v = 0; i < 32; i += 4)
+               v |= (PAGE_SHIFT - 10) << i;
+       t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
        t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
 #if FL_PG_ORDER > 0
        t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
index ab46797623b6ad1afdba4b81db61f0dc75d95f9d..9e1a4b49b47a56821851d065eb1e3e6f64cbb86b 100644 (file)
@@ -1444,7 +1444,7 @@ static void pl_intr_handler(struct adapter *adap)
                t4_fatal_err(adap);
 }
 
-#define PF_INTR_MASK (PFSW | PFCIM)
+#define PF_INTR_MASK (PFSW)
 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
                EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
                CPL_SWITCH | SGE | ULP_TX)
index e875d095af39571d702b8e5995b2df341b0bf22a..10a055565776cfceb4730df1168b44662d4d9761 100644 (file)
@@ -135,5 +135,5 @@ struct rsp_ctrl {
 
 #define QINTR_CNT_EN       0x1
 #define QINTR_TIMER_IDX(x) ((x) << 1)
-#define QINTR_TIMER_IDX_GET(x) (((x) << 1) & 0x7)
+#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
 #endif /* __T4_HW_H */
index 623932b39b5dcd964d11ea3964ec2a9f80500d08..a550d0c706f3e6d70fe318c9282274355aaa62c5 100644 (file)
@@ -529,6 +529,8 @@ struct cpl_rx_pkt {
        __be32 l2info;
 #define RXF_UDP (1 << 22)
 #define RXF_TCP (1 << 23)
+#define RXF_IP  (1 << 24)
+#define RXF_IP6 (1 << 25)
        __be16 hdr_len;
        __be16 err_vec;
 };
index bf21c148fb2b28e83daeed2316f4e1778ee6d004..0adc5bcec7c41b029508da3c840549501ea0ae78 100644 (file)
 #define  WINDOW_MASK     0x000000ffU
 #define  WINDOW_SHIFT    0
 #define  WINDOW(x)       ((x) << WINDOW_SHIFT)
+#define PCIE_MEM_ACCESS_OFFSET 0x306c
 
 #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
 #define  RNPP 0x80000000U
index ca45df8954dd1eac1a8631736baf70bb96c36e60..0969f2fbc1b0f0a977b1e470a1f4d60e19be215c 100644 (file)
@@ -485,6 +485,8 @@ enum fw_params_param_pfvf {
        FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
        FW_PARAMS_PARAM_PFVF_VIID       = 0x24,
        FW_PARAMS_PARAM_PFVF_CPMASK     = 0x25,
+       FW_PARAMS_PARAM_PFVF_OCQ_START  = 0x26,
+       FW_PARAMS_PARAM_PFVF_OCQ_END    = 0x27,
 };
 
 /*
index 25e14d2da7554857bd0be7a6eac90585c88dd3ca..d0824e322068d4842ebdee10a7cce87a87a7247c 100644 (file)
@@ -298,6 +298,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
 #define EMAC_CTRL_EWCTL                (0x4)
 #define EMAC_CTRL_EWINTTCNT    (0x8)
 
+/* EMAC DM644x control module masks */
+#define EMAC_DM644X_EWINTCNT_MASK      0x1FFFF
+#define EMAC_DM644X_INTMIN_INTVL       0x1
+#define EMAC_DM644X_INTMAX_INTVL       (EMAC_DM644X_EWINTCNT_MASK)
+
 /* EMAC MDIO related */
 /* Mask & Control defines */
 #define MDIO_CONTROL_CLKDIV    (0xFF)
@@ -318,8 +323,20 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
 #define MDIO_CONTROL           (0x04)
 
 /* EMAC DM646X control module registers */
-#define EMAC_DM646X_CMRXINTEN  (0x14)
-#define EMAC_DM646X_CMTXINTEN  (0x18)
+#define EMAC_DM646X_CMINTCTRL  0x0C
+#define EMAC_DM646X_CMRXINTEN  0x14
+#define EMAC_DM646X_CMTXINTEN  0x18
+#define EMAC_DM646X_CMRXINTMAX 0x70
+#define EMAC_DM646X_CMTXINTMAX 0x74
+
+/* EMAC DM646X control module masks */
+#define EMAC_DM646X_INTPACEEN          (0x3 << 16)
+#define EMAC_DM646X_INTPRESCALE_MASK   (0x7FF << 0)
+#define EMAC_DM646X_CMINTMAX_CNT       63
+#define EMAC_DM646X_CMINTMIN_CNT       2
+#define EMAC_DM646X_CMINTMAX_INTVL     (1000 / EMAC_DM646X_CMINTMIN_CNT)
+#define EMAC_DM646X_CMINTMIN_INTVL     ((1000 / EMAC_DM646X_CMINTMAX_CNT) + 1)
+
 
 /* EMAC EOI codes for C0 */
 #define EMAC_DM646X_MAC_EOI_C0_RXEN    (0x01)
@@ -468,6 +485,8 @@ struct emac_priv {
        u32 duplex; /* Link duplex: 0=Half, 1=Full */
        u32 rx_buf_size;
        u32 isr_count;
+       u32 coal_intvl;
+       u32 bus_freq_mhz;
        u8 rmii_en;
        u8 version;
        u32 mac_hash1;
@@ -545,9 +564,11 @@ static void emac_dump_regs(struct emac_priv *priv)
 
        /* Print important registers in EMAC */
        dev_info(emac_dev, "EMAC Basic registers\n");
-       dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n",
-               emac_ctrl_read(EMAC_CTRL_EWCTL),
-               emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
+       if (priv->version == EMAC_VERSION_1) {
+               dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n",
+                       emac_ctrl_read(EMAC_CTRL_EWCTL),
+                       emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
+       }
        dev_info(emac_dev, "EMAC: TXID: %08X %s, RXID: %08X %s\n",
                emac_read(EMAC_TXIDVER),
                ((emac_read(EMAC_TXCONTROL)) ? "enabled" : "disabled"),
@@ -690,6 +711,103 @@ static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
 
 }
 
+/**
+ * emac_get_coalesce : Get interrupt coalesce settings for this device
+ * @ndev : The DaVinci EMAC network adapter
+ * @coal : ethtool coalesce settings structure
+ *
+ * Fetch the current interrupt coalesce settings
+ *
+ */
+static int emac_get_coalesce(struct net_device *ndev,
+                               struct ethtool_coalesce *coal)
+{
+       struct emac_priv *priv = netdev_priv(ndev);
+
+       coal->rx_coalesce_usecs = priv->coal_intvl;
+       return 0;
+
+}
+
+/**
+ * emac_set_coalesce : Set interrupt coalesce settings for this device
+ * @ndev : The DaVinci EMAC network adapter
+ * @coal : ethtool coalesce settings structure
+ *
+ * Set interrupt coalesce parameters
+ *
+ */
+static int emac_set_coalesce(struct net_device *ndev,
+                               struct ethtool_coalesce *coal)
+{
+       struct emac_priv *priv = netdev_priv(ndev);
+       u32 int_ctrl, num_interrupts = 0;
+       u32 prescale = 0, addnl_dvdr = 1, coal_intvl = 0;
+
+       if (!coal->rx_coalesce_usecs)
+               return -EINVAL;
+
+       coal_intvl = coal->rx_coalesce_usecs;
+
+       switch (priv->version) {
+       case EMAC_VERSION_2:
+               int_ctrl =  emac_ctrl_read(EMAC_DM646X_CMINTCTRL);
+               prescale = priv->bus_freq_mhz * 4;
+
+               if (coal_intvl < EMAC_DM646X_CMINTMIN_INTVL)
+                       coal_intvl = EMAC_DM646X_CMINTMIN_INTVL;
+
+               if (coal_intvl > EMAC_DM646X_CMINTMAX_INTVL) {
+                       /*
+                        * Interrupt pacer works with 4us Pulse, we can
+                        * throttle further by dilating the 4us pulse.
+                        */
+                       addnl_dvdr = EMAC_DM646X_INTPRESCALE_MASK / prescale;
+
+                       if (addnl_dvdr > 1) {
+                               prescale *= addnl_dvdr;
+                               if (coal_intvl > (EMAC_DM646X_CMINTMAX_INTVL
+                                                       * addnl_dvdr))
+                                       coal_intvl = (EMAC_DM646X_CMINTMAX_INTVL
+                                                       * addnl_dvdr);
+                       } else {
+                               addnl_dvdr = 1;
+                               coal_intvl = EMAC_DM646X_CMINTMAX_INTVL;
+                       }
+               }
+
+               num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
+
+               int_ctrl |= EMAC_DM646X_INTPACEEN;
+               int_ctrl &= (~EMAC_DM646X_INTPRESCALE_MASK);
+               int_ctrl |= (prescale & EMAC_DM646X_INTPRESCALE_MASK);
+               emac_ctrl_write(EMAC_DM646X_CMINTCTRL, int_ctrl);
+
+               emac_ctrl_write(EMAC_DM646X_CMRXINTMAX, num_interrupts);
+               emac_ctrl_write(EMAC_DM646X_CMTXINTMAX, num_interrupts);
+
+               break;
+       default:
+               int_ctrl = emac_ctrl_read(EMAC_CTRL_EWINTTCNT);
+               int_ctrl &= (~EMAC_DM644X_EWINTCNT_MASK);
+               prescale = coal_intvl * priv->bus_freq_mhz;
+               if (prescale > EMAC_DM644X_EWINTCNT_MASK) {
+                       prescale = EMAC_DM644X_EWINTCNT_MASK;
+                       coal_intvl = prescale / priv->bus_freq_mhz;
+               }
+               emac_ctrl_write(EMAC_CTRL_EWINTTCNT, (int_ctrl | prescale));
+
+               break;
+       }
+
+       printk(KERN_INFO"Set coalesce to %d usecs.\n", coal_intvl);
+       priv->coal_intvl = coal_intvl;
+
+       return 0;
+
+}
+
+
 /**
  * ethtool_ops: DaVinci EMAC Ethtool structure
  *
@@ -701,6 +819,8 @@ static const struct ethtool_ops ethtool_ops = {
        .get_settings = emac_get_settings,
        .set_settings = emac_set_settings,
        .get_link = ethtool_op_get_link,
+       .get_coalesce = emac_get_coalesce,
+       .set_coalesce =  emac_set_coalesce,
 };
 
 /**
@@ -1182,8 +1302,8 @@ static int emac_net_tx_complete(struct emac_priv *priv,
        struct net_device *ndev = priv->ndev;
        u32 cnt;
 
-       if (unlikely(num_tokens && netif_queue_stopped(dev)))
-               netif_start_queue(dev);
+       if (unlikely(num_tokens && netif_queue_stopped(ndev)))
+               netif_start_queue(ndev);
        for (cnt = 0; cnt < num_tokens; cnt++) {
                struct sk_buff *skb = (struct sk_buff *)net_data_tokens[cnt];
                if (skb == NULL)
@@ -2148,7 +2268,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
        struct net_device *ndev = priv->ndev;
        struct device *emac_dev = &ndev->dev;
        u32 status = 0;
-       u32 num_pkts = 0;
+       u32 num_tx_pkts = 0, num_rx_pkts = 0;
 
        /* Check interrupt vectors and call packet processing */
        status = emac_read(EMAC_MACINVECTOR);
@@ -2159,27 +2279,19 @@ static int emac_poll(struct napi_struct *napi, int budget)
                mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
 
        if (status & mask) {
-               num_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH,
+               num_tx_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH,
                                          EMAC_DEF_TX_MAX_SERVICE);
        } /* TX processing */
 
-       if (num_pkts)
-               return budget;
-
        mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
 
        if (priv->version == EMAC_VERSION_2)
                mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
 
        if (status & mask) {
-               num_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget);
+               num_rx_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget);
        } /* RX processing */
 
-       if (num_pkts < budget) {
-               napi_complete(napi);
-               emac_int_enable(priv);
-       }
-
        mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
        if (priv->version == EMAC_VERSION_2)
                mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
@@ -2210,9 +2322,12 @@ static int emac_poll(struct napi_struct *napi, int budget)
                                dev_err(emac_dev, "RX Host error %s on ch=%d\n",
                                        &emac_rxhost_errcodes[cause][0], ch);
                }
-       } /* Host error processing */
+       } else if (num_rx_pkts < budget) {
+               napi_complete(napi);
+               emac_int_enable(priv);
+       }
 
-       return num_pkts;
+       return num_rx_pkts;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2437,6 +2552,14 @@ static int emac_dev_open(struct net_device *ndev)
        /* Start/Enable EMAC hardware */
        emac_hw_enable(priv);
 
+       /* Enable Interrupt pacing if configured */
+       if (priv->coal_intvl != 0) {
+               struct ethtool_coalesce coal;
+
+               coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+               emac_set_coalesce(ndev, &coal);
+       }
+
        /* find the first phy */
        priv->phydev = NULL;
        if (priv->phy_mask) {
@@ -2677,6 +2800,9 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
        priv->int_enable = pdata->interrupt_enable;
        priv->int_disable = pdata->interrupt_disable;
 
+       priv->coal_intvl = 0;
+       priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
+
        emac_dev = &ndev->dev;
        /* Get EMAC platform data */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 4ea7141f525d513aa899b56e1f358c0845662c67..7c075756611ad9d20e752a99105ead383524d342 100644 (file)
@@ -854,7 +854,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
        dev = alloc_etherdev(sizeof(*bp));
        if (!dev) {
                dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
-               goto err_out;
+               goto err_out_release_mem;
        }
 
        /* TODO: Actually, we have some interesting features... */
@@ -911,7 +911,8 @@ static int __devinit dnet_probe(struct platform_device *pdev)
        if (err)
                dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
 
-       if (dnet_mii_init(bp) != 0)
+       err = dnet_mii_init(bp);
+       if (err)
                goto err_out_unregister_netdev;
 
        dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
@@ -936,6 +937,8 @@ err_out_iounmap:
        iounmap(bp->regs);
 err_out_free_dev:
        free_netdev(dev);
+err_out_release_mem:
+       release_mem_region(mem_base, mem_size);
 err_out:
        return err;
 }
index 65298a6d9af7ece5589b4f42624f7ce484d75ff4..99288b95aead4460edc009434be71015730ba95f 100644 (file)
@@ -324,18 +324,20 @@ enum e1000_state_t {
 extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
 #define e_dbg(format, arg...) \
        netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
-#define e_err(format, arg...) \
-       netdev_err(adapter->netdev, format, ## arg)
-#define e_info(format, arg...) \
-       netdev_info(adapter->netdev, format, ## arg)
-#define e_warn(format, arg...) \
-       netdev_warn(adapter->netdev, format, ## arg)
-#define e_notice(format, arg...) \
-       netdev_notice(adapter->netdev, format, ## arg)
+#define e_err(msglvl, format, arg...) \
+       netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_info(msglvl, format, arg...) \
+       netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_warn(msglvl, format, arg...) \
+       netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
+#define e_notice(msglvl, format, arg...) \
+       netif_notice(adapter, msglvl, adapter->netdev, format, ## arg)
 #define e_dev_info(format, arg...) \
        dev_info(&adapter->pdev->dev, format, ## arg)
 #define e_dev_warn(format, arg...) \
        dev_warn(&adapter->pdev->dev, format, ## arg)
+#define e_dev_err(format, arg...) \
+       dev_err(&adapter->pdev->dev, format, ## arg)
 
 extern char e1000_driver_name[];
 extern const char e1000_driver_version[];
index d5ff029aa7b226f6ab60c4d96e46c231f19b9c16..f4d0922ec65b8ee6d7541b2ceee8e46cd85bb715 100644 (file)
@@ -346,7 +346,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
 
        netdev->features &= ~NETIF_F_TSO6;
 
-       e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
+       e_info(probe, "TSO is %s\n", data ? "Enabled" : "Disabled");
        adapter->tso_force = true;
        return 0;
 }
@@ -714,9 +714,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
                writel(write & test[i], address);
                read = readl(address);
                if (read != (write & test[i] & mask)) {
-                       e_info("pattern test reg %04X failed: "
-                              "got 0x%08X expected 0x%08X\n",
-                              reg, read, (write & test[i] & mask));
+                       e_err(drv, "pattern test reg %04X failed: "
+                             "got 0x%08X expected 0x%08X\n",
+                             reg, read, (write & test[i] & mask));
                        *data = reg;
                        return true;
                }
@@ -734,7 +734,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
        writel(write & mask, address);
        read = readl(address);
        if ((read & mask) != (write & mask)) {
-               e_err("set/check reg %04X test failed: "
+               e_err(drv, "set/check reg %04X test failed: "
                      "got 0x%08X expected 0x%08X\n",
                      reg, (read & mask), (write & mask));
                *data = reg;
@@ -779,7 +779,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
        ew32(STATUS, toggle);
        after = er32(STATUS) & toggle;
        if (value != after) {
-               e_err("failed STATUS register test got: "
+               e_err(drv, "failed STATUS register test got: "
                      "0x%08X expected: 0x%08X\n", after, value);
                *data = 1;
                return 1;
@@ -894,7 +894,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
                *data = 1;
                return -1;
        }
-       e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
+       e_info(hw, "testing %s interrupt\n", (shared_int ?
+              "shared" : "unshared"));
 
        /* Disable all the interrupts */
        ew32(IMC, 0xFFFFFFFF);
@@ -1561,7 +1562,7 @@ static void e1000_diag_test(struct net_device *netdev,
                u8 forced_speed_duplex = hw->forced_speed_duplex;
                u8 autoneg = hw->autoneg;
 
-               e_info("offline testing starting\n");
+               e_info(hw, "offline testing starting\n");
 
                /* Link test performed before hardware reset so autoneg doesn't
                 * interfere with test result */
@@ -1601,7 +1602,7 @@ static void e1000_diag_test(struct net_device *netdev,
                if (if_running)
                        dev_open(netdev);
        } else {
-               e_info("online testing starting\n");
+               e_info(hw, "online testing starting\n");
                /* Online tests */
                if (e1000_link_test(adapter, &data[4]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1694,8 +1695,8 @@ static void e1000_get_wol(struct net_device *netdev,
                wol->supported &= ~WAKE_UCAST;
 
                if (adapter->wol & E1000_WUFC_EX)
-                       e_err("Interface does not support "
-                       "directed (unicast) frame wake-up packets\n");
+                       e_err(drv, "Interface does not support directed "
+                             "(unicast) frame wake-up packets\n");
                break;
        default:
                break;
@@ -1726,8 +1727,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        switch (hw->device_id) {
        case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
                if (wol->wolopts & WAKE_UCAST) {
-                       e_err("Interface does not support "
-                             "directed (unicast) frame wake-up packets\n");
+                       e_err(drv, "Interface does not support directed "
+                             "(unicast) frame wake-up packets\n");
                        return -EOPNOTSUPP;
                }
                break;
index 68a80893dce1daa492c23f6a7e6e0dd290645046..02833af8a0b1d8bbb2953cb353581ede85cb3b9b 100644 (file)
@@ -275,7 +275,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
        err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
                          netdev);
        if (err) {
-               e_err("Unable to allocate interrupt Error: %d\n", err);
+               e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
        }
 
        return err;
@@ -657,7 +657,7 @@ void e1000_reset(struct e1000_adapter *adapter)
                ew32(WUC, 0);
 
        if (e1000_init_hw(hw))
-               e_err("Hardware Error\n");
+               e_dev_err("Hardware Error\n");
        e1000_update_mng_vlan(adapter);
 
        /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
@@ -925,7 +925,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        /* initialize eeprom parameters */
        if (e1000_init_eeprom_params(hw)) {
-               e_err("EEPROM initialization failed\n");
+               e_err(probe, "EEPROM initialization failed\n");
                goto err_eeprom;
        }
 
@@ -936,7 +936,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        /* make sure the EEPROM is good */
        if (e1000_validate_eeprom_checksum(hw) < 0) {
-               e_err("The EEPROM Checksum Is Not Valid\n");
+               e_err(probe, "The EEPROM Checksum Is Not Valid\n");
                e1000_dump_eeprom(adapter);
                /*
                 * set MAC address to all zeroes to invalidate and temporary
@@ -950,14 +950,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        } else {
                /* copy the MAC address out of the EEPROM */
                if (e1000_read_mac_addr(hw))
-                       e_err("EEPROM Read Error\n");
+                       e_err(probe, "EEPROM Read Error\n");
        }
        /* don't block initalization here due to bad MAC address */
        memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
        memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
 
        if (!is_valid_ether_addr(netdev->perm_addr))
-               e_err("Invalid MAC Address\n");
+               e_err(probe, "Invalid MAC Address\n");
 
        e1000_get_bus_info(hw);
 
@@ -1047,7 +1047,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                goto err_register;
 
        /* print bus type/speed/width info */
-       e_info("(PCI%s:%dMHz:%d-bit) %pM\n",
+       e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
               ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
               ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
                (hw->bus_speed == e1000_bus_speed_120) ? 120 :
@@ -1059,7 +1059,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        /* carrier off reporting is important to ethtool even BEFORE open */
        netif_carrier_off(netdev);
 
-       e_info("Intel(R) PRO/1000 Network Connection\n");
+       e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
 
        cards_found++;
        return 0;
@@ -1159,7 +1159,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
        /* identify the MAC */
 
        if (e1000_set_mac_type(hw)) {
-               e_err("Unknown MAC Type\n");
+               e_err(probe, "Unknown MAC Type\n");
                return -EIO;
        }
 
@@ -1192,7 +1192,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
        adapter->num_rx_queues = 1;
 
        if (e1000_alloc_queues(adapter)) {
-               e_err("Unable to allocate memory for queues\n");
+               e_err(probe, "Unable to allocate memory for queues\n");
                return -ENOMEM;
        }
 
@@ -1386,7 +1386,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
        size = sizeof(struct e1000_buffer) * txdr->count;
        txdr->buffer_info = vmalloc(size);
        if (!txdr->buffer_info) {
-               e_err("Unable to allocate memory for the Tx descriptor ring\n");
+               e_err(probe, "Unable to allocate memory for the Tx descriptor "
+                     "ring\n");
                return -ENOMEM;
        }
        memset(txdr->buffer_info, 0, size);
@@ -1401,7 +1402,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
        if (!txdr->desc) {
 setup_tx_desc_die:
                vfree(txdr->buffer_info);
-               e_err("Unable to allocate memory for the Tx descriptor ring\n");
+               e_err(probe, "Unable to allocate memory for the Tx descriptor "
+                     "ring\n");
                return -ENOMEM;
        }
 
@@ -1409,7 +1411,7 @@ setup_tx_desc_die:
        if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
                void *olddesc = txdr->desc;
                dma_addr_t olddma = txdr->dma;
-               e_err("txdr align check failed: %u bytes at %p\n",
+               e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
                      txdr->size, txdr->desc);
                /* Try again, without freeing the previous */
                txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
@@ -1427,7 +1429,7 @@ setup_tx_desc_die:
                                          txdr->dma);
                        dma_free_coherent(&pdev->dev, txdr->size, olddesc,
                                          olddma);
-                       e_err("Unable to allocate aligned memory "
+                       e_err(probe, "Unable to allocate aligned memory "
                              "for the transmit descriptor ring\n");
                        vfree(txdr->buffer_info);
                        return -ENOMEM;
@@ -1460,7 +1462,7 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
        for (i = 0; i < adapter->num_tx_queues; i++) {
                err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
                if (err) {
-                       e_err("Allocation for Tx Queue %u failed\n", i);
+                       e_err(probe, "Allocation for Tx Queue %u failed\n", i);
                        for (i-- ; i >= 0; i--)
                                e1000_free_tx_resources(adapter,
                                                        &adapter->tx_ring[i]);
@@ -1580,7 +1582,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
        size = sizeof(struct e1000_buffer) * rxdr->count;
        rxdr->buffer_info = vmalloc(size);
        if (!rxdr->buffer_info) {
-               e_err("Unable to allocate memory for the Rx descriptor ring\n");
+               e_err(probe, "Unable to allocate memory for the Rx descriptor "
+                     "ring\n");
                return -ENOMEM;
        }
        memset(rxdr->buffer_info, 0, size);
@@ -1596,7 +1599,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
                                        GFP_KERNEL);
 
        if (!rxdr->desc) {
-               e_err("Unable to allocate memory for the Rx descriptor ring\n");
+               e_err(probe, "Unable to allocate memory for the Rx descriptor "
+                     "ring\n");
 setup_rx_desc_die:
                vfree(rxdr->buffer_info);
                return -ENOMEM;
@@ -1606,7 +1610,7 @@ setup_rx_desc_die:
        if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
                void *olddesc = rxdr->desc;
                dma_addr_t olddma = rxdr->dma;
-               e_err("rxdr align check failed: %u bytes at %p\n",
+               e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
                      rxdr->size, rxdr->desc);
                /* Try again, without freeing the previous */
                rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
@@ -1615,8 +1619,8 @@ setup_rx_desc_die:
                if (!rxdr->desc) {
                        dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
                                          olddma);
-                       e_err("Unable to allocate memory for the Rx descriptor "
-                             "ring\n");
+                       e_err(probe, "Unable to allocate memory for the Rx "
+                             "descriptor ring\n");
                        goto setup_rx_desc_die;
                }
 
@@ -1626,8 +1630,8 @@ setup_rx_desc_die:
                                          rxdr->dma);
                        dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
                                          olddma);
-                       e_err("Unable to allocate aligned memory for the Rx "
-                             "descriptor ring\n");
+                       e_err(probe, "Unable to allocate aligned memory for "
+                             "the Rx descriptor ring\n");
                        goto setup_rx_desc_die;
                } else {
                        /* Free old allocation, new allocation was successful */
@@ -1659,7 +1663,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
        for (i = 0; i < adapter->num_rx_queues; i++) {
                err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
                if (err) {
-                       e_err("Allocation for Rx Queue %u failed\n", i);
+                       e_err(probe, "Allocation for Rx Queue %u failed\n", i);
                        for (i-- ; i >= 0; i--)
                                e1000_free_rx_resources(adapter,
                                                        &adapter->rx_ring[i]);
@@ -2110,7 +2114,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
        u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
 
        if (!mcarray) {
-               e_err("memory allocation failed\n");
+               e_err(probe, "memory allocation failed\n");
                return;
        }
 
@@ -2648,7 +2652,8 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
                break;
        default:
                if (unlikely(net_ratelimit()))
-                       e_warn("checksum_partial proto=%x!\n", skb->protocol);
+                       e_warn(drv, "checksum_partial proto=%x!\n",
+                              skb->protocol);
                break;
        }
 
@@ -2992,7 +2997,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                /* fall through */
                                pull_size = min((unsigned int)4, skb->data_len);
                                if (!__pskb_pull_tail(skb, pull_size)) {
-                                       e_err("__pskb_pull_tail failed.\n");
+                                       e_err(drv, "__pskb_pull_tail "
+                                             "failed.\n");
                                        dev_kfree_skb_any(skb);
                                        return NETDEV_TX_OK;
                                }
@@ -3140,7 +3146,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
 
        if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
            (max_frame > MAX_JUMBO_FRAME_SIZE)) {
-               e_err("Invalid MTU setting\n");
+               e_err(probe, "Invalid MTU setting\n");
                return -EINVAL;
        }
 
@@ -3148,7 +3154,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        switch (hw->mac_type) {
        case e1000_undefined ... e1000_82542_rev2_1:
                if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
-                       e_err("Jumbo Frames not supported.\n");
+                       e_err(probe, "Jumbo Frames not supported.\n");
                        return -EINVAL;
                }
                break;
@@ -3500,7 +3506,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
                    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
 
                        /* detected Tx unit hang */
-                       e_err("Detected Tx Unit Hang\n"
+                       e_err(drv, "Detected Tx Unit Hang\n"
                              "  Tx Queue             <%lu>\n"
                              "  TDH                  <%x>\n"
                              "  TDT                  <%x>\n"
@@ -3749,7 +3755,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 
                /* eth type trans needs skb->data to point to something */
                if (!pskb_may_pull(skb, ETH_HLEN)) {
-                       e_err("pskb_may_pull failed.\n");
+                       e_err(drv, "pskb_may_pull failed.\n");
                        dev_kfree_skb(skb);
                        goto next_desc;
                }
@@ -3874,7 +3880,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 
                if (adapter->discarding) {
                        /* All receives must fit into a single buffer */
-                       e_info("Receive packet consumed multiple buffers\n");
+                       e_dbg("Receive packet consumed multiple buffers\n");
                        /* recycle */
                        buffer_info->skb = skb;
                        if (status & E1000_RXD_STAT_EOP)
@@ -3986,8 +3992,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
                /* Fix for errata 23, can't cross 64kB boundary */
                if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
                        struct sk_buff *oldskb = skb;
-                       e_err("skb align check failed: %u bytes at %p\n",
-                             bufsz, skb->data);
+                       e_err(rx_err, "skb align check failed: %u bytes at "
+                             "%p\n", bufsz, skb->data);
                        /* Try again, without freeing the previous */
                        skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                        /* Failed allocation, critical failure */
@@ -4095,8 +4101,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                /* Fix for errata 23, can't cross 64kB boundary */
                if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
                        struct sk_buff *oldskb = skb;
-                       e_err("skb align check failed: %u bytes at %p\n",
-                             bufsz, skb->data);
+                       e_err(rx_err, "skb align check failed: %u bytes at "
+                             "%p\n", bufsz, skb->data);
                        /* Try again, without freeing the previous */
                        skb = netdev_alloc_skb_ip_align(netdev, bufsz);
                        /* Failed allocation, critical failure */
@@ -4141,8 +4147,8 @@ map_skb:
                if (!e1000_check_64k_bound(adapter,
                                        (void *)(unsigned long)buffer_info->dma,
                                        adapter->rx_buffer_len)) {
-                       e_err("dma align check failed: %u bytes at %p\n",
-                             adapter->rx_buffer_len,
+                       e_err(rx_err, "dma align check failed: %u bytes at "
+                             "%p\n", adapter->rx_buffer_len,
                              (void *)(unsigned long)buffer_info->dma);
                        dev_kfree_skb(skb);
                        buffer_info->skb = NULL;
@@ -4355,7 +4361,7 @@ void e1000_pci_set_mwi(struct e1000_hw *hw)
        int ret_val = pci_set_mwi(adapter->pdev);
 
        if (ret_val)
-               e_err("Error in setting MWI\n");
+               e_err(probe, "Error in setting MWI\n");
 }
 
 void e1000_pci_clear_mwi(struct e1000_hw *hw)
@@ -4486,7 +4492,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
        /* Fiber NICs only allow 1000 gbps Full duplex */
        if ((hw->media_type == e1000_media_type_fiber) &&
                spddplx != (SPEED_1000 + DUPLEX_FULL)) {
-               e_err("Unsupported Speed/Duplex configuration\n");
+               e_err(probe, "Unsupported Speed/Duplex configuration\n");
                return -EINVAL;
        }
 
@@ -4509,7 +4515,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
                break;
        case SPEED_1000 + DUPLEX_HALF: /* not supported */
        default:
-               e_err("Unsupported Speed/Duplex configuration\n");
+               e_err(probe, "Unsupported Speed/Duplex configuration\n");
                return -EINVAL;
        }
        return 0;
index 9ee133f5034ed021b20c5e8e9f77fca600f77f84..f9a31c82f87108c3e9ef6974a6c9089a62cdb5fe 100644 (file)
@@ -348,6 +348,7 @@ struct e1000_adapter {
        u32 test_icr;
 
        u32 msg_enable;
+       unsigned int num_vectors;
        struct msix_entry *msix_entries;
        int int_mode;
        u32 eiac_mask;
index 0cd569a57f6d2546bfcf9bb0bb248ec00422a77a..66ed08f726fb9bcdd13225086d05f3dcf437da6a 100644 (file)
@@ -312,8 +312,8 @@ enum e1e_registers {
 #define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9    /* Kumeran InBand Parameters */
 #define E1000_KMRNCTRLSTA_DIAG_NELPBK  0x1000 /* Nearend Loopback mode */
 #define E1000_KMRNCTRLSTA_K1_CONFIG    0x7
-#define E1000_KMRNCTRLSTA_K1_ENABLE    0x140E
-#define E1000_KMRNCTRLSTA_HD_CTRL      0x0002
+#define E1000_KMRNCTRLSTA_K1_ENABLE    0x0002
+#define E1000_KMRNCTRLSTA_HD_CTRL      0x10   /* Kumeran HD Control */
 
 #define IFE_PHY_EXTENDED_STATUS_CONTROL        0x10
 #define IFE_PHY_SPECIAL_CONTROL                0x11 /* 100BaseTx PHY Special Control */
index 6aa795a6160b0e857f9cb0ecd5dd367a3f3ab9f4..9e9164a9d4893e4bc320287b1946ec1547518d0e 100644 (file)
@@ -1785,25 +1785,25 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
 {
        int err;
-       int numvecs, i;
-
+       int i;
 
        switch (adapter->int_mode) {
        case E1000E_INT_MODE_MSIX:
                if (adapter->flags & FLAG_HAS_MSIX) {
-                       numvecs = 3; /* RxQ0, TxQ0 and other */
-                       adapter->msix_entries = kcalloc(numvecs,
+                       adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
+                       adapter->msix_entries = kcalloc(adapter->num_vectors,
                                                      sizeof(struct msix_entry),
                                                      GFP_KERNEL);
                        if (adapter->msix_entries) {
-                               for (i = 0; i < numvecs; i++)
+                               for (i = 0; i < adapter->num_vectors; i++)
                                        adapter->msix_entries[i].entry = i;
 
                                err = pci_enable_msix(adapter->pdev,
                                                      adapter->msix_entries,
-                                                     numvecs);
-                               if (err == 0)
+                                                     adapter->num_vectors);
+                               if (err == 0) {
                                        return;
+                               }
                        }
                        /* MSI-X failed, so fall through and try MSI */
                        e_err("Failed to initialize MSI-X interrupts.  "
@@ -1825,6 +1825,9 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
                /* Don't do anything; this is the system default */
                break;
        }
+
+       /* store the number of vectors being used */
+       adapter->num_vectors = 1;
 }
 
 /**
@@ -1946,7 +1949,14 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
        if (adapter->msix_entries)
                ew32(EIAC_82574, 0);
        e1e_flush();
-       synchronize_irq(adapter->pdev->irq);
+
+       if (adapter->msix_entries) {
+               int i;
+               for (i = 0; i < adapter->num_vectors; i++)
+                       synchronize_irq(adapter->msix_entries[i].vector);
+       } else {
+               synchronize_irq(adapter->pdev->irq);
+       }
 }
 
 /**
@@ -3218,12 +3228,6 @@ int e1000e_up(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
 
-       /* DMA latency requirement to workaround early-receive/jumbo issue */
-       if (adapter->flags & FLAG_HAS_ERT)
-               adapter->netdev->pm_qos_req =
-                       pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
-                                      PM_QOS_DEFAULT_VALUE);
-
        /* hardware has been reset, we need to reload some things */
        e1000_configure(adapter);
 
@@ -3287,12 +3291,6 @@ void e1000e_down(struct e1000_adapter *adapter)
        e1000_clean_tx_ring(adapter);
        e1000_clean_rx_ring(adapter);
 
-       if (adapter->flags & FLAG_HAS_ERT) {
-               pm_qos_remove_request(
-                             adapter->netdev->pm_qos_req);
-               adapter->netdev->pm_qos_req = NULL;
-       }
-
        /*
         * TODO: for power management, we could drop the link and
         * pci_disable_device here.
@@ -3527,6 +3525,12 @@ static int e1000_open(struct net_device *netdev)
             E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
                e1000_update_mng_vlan(adapter);
 
+       /* DMA latency requirement to workaround early-receive/jumbo issue */
+       if (adapter->flags & FLAG_HAS_ERT)
+               adapter->netdev->pm_qos_req =
+                                   pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
+                                                      PM_QOS_DEFAULT_VALUE);
+
        /*
         * before we allocate an interrupt, we must be ready to handle it.
         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
@@ -3631,6 +3635,11 @@ static int e1000_close(struct net_device *netdev)
        if (adapter->flags & FLAG_HAS_AMT)
                e1000_release_hw_control(adapter);
 
+       if (adapter->flags & FLAG_HAS_ERT) {
+               pm_qos_remove_request(adapter->netdev->pm_qos_req);
+               adapter->netdev->pm_qos_req = NULL;
+       }
+
        pm_runtime_put_sync(&pdev->dev);
 
        return 0;
@@ -5650,8 +5659,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        if (err)
                goto err_sw_init;
 
-       err = -EIO;
-
        memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
        memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
        memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
index 38c282e6565bcb42d3c482cd4945bd74a81a38bb..6d653c459c1f7e817661225619ddb837ddb0622e 100644 (file)
@@ -632,7 +632,7 @@ static void ethoc_mdio_poll(struct net_device *dev)
 {
 }
 
-static int ethoc_mdio_probe(struct net_device *dev)
+static int __devinit ethoc_mdio_probe(struct net_device *dev)
 {
        struct ethoc *priv = netdev_priv(dev);
        struct phy_device *phy;
@@ -871,7 +871,7 @@ static const struct net_device_ops ethoc_netdev_ops = {
  * ethoc_probe() - initialize OpenCores ethernet MAC
  * pdev:       platform device
  */
-static int ethoc_probe(struct platform_device *pdev)
+static int __devinit ethoc_probe(struct platform_device *pdev)
 {
        struct net_device *netdev = NULL;
        struct resource *res = NULL;
@@ -1080,7 +1080,7 @@ out:
  * ethoc_remove() - shutdown OpenCores ethernet MAC
  * @pdev:      platform device
  */
-static int ethoc_remove(struct platform_device *pdev)
+static int __devexit ethoc_remove(struct platform_device *pdev)
 {
        struct net_device *netdev = platform_get_drvdata(pdev);
        struct ethoc *priv = netdev_priv(netdev);
@@ -1121,7 +1121,7 @@ static int ethoc_resume(struct platform_device *pdev)
 
 static struct platform_driver ethoc_driver = {
        .probe   = ethoc_probe,
-       .remove  = ethoc_remove,
+       .remove  = __devexit_p(ethoc_remove),
        .suspend = ethoc_suspend,
        .resume  = ethoc_resume,
        .driver  = {
index 391a553a3addd314b5defc7d6d0b8d41553054be..768b840aeb6b7b0bf2ceec87469bb0b4925b5d20 100644 (file)
@@ -118,6 +118,8 @@ static unsigned char        fec_mac_default[] = {
 #define FEC_ENET_MII   ((uint)0x00800000)      /* MII interrupt */
 #define FEC_ENET_EBERR ((uint)0x00400000)      /* SDMA bus error */
 
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
+
 /* The FEC stores dest/src/type, data, and checksum for receive packets.
  */
 #define PKT_MAXBUF_SIZE                1518
@@ -1213,8 +1215,7 @@ fec_restart(struct net_device *dev, int duplex)
        writel(0, fep->hwp + FEC_R_DES_ACTIVE);
 
        /* Enable interrupts we wish to service */
-       writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
-                       fep->hwp + FEC_IMASK);
+       writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 }
 
 static void
@@ -1233,8 +1234,8 @@ fec_stop(struct net_device *dev)
        /* Whack a reset.  We should wait for this. */
        writel(1, fep->hwp + FEC_ECNTRL);
        udelay(10);
-
        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+       writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 }
 
 static int __devinit
index 9ef6a9d5fbcb6e11f3e7975897d3391e49431b5f..4da05b1b445c033a3606bdd0d1cf05ca0675660b 100644 (file)
 #define DEV_HAS_MSI_X              0x0000080  /* device supports MSI-X */
 #define DEV_HAS_POWER_CNTRL        0x0000100  /* device supports power savings */
 #define DEV_HAS_STATISTICS_V1      0x0000200  /* device supports hw statistics version 1 */
-#define DEV_HAS_STATISTICS_V2      0x0000600  /* device supports hw statistics version 2 */
-#define DEV_HAS_STATISTICS_V3      0x0000e00  /* device supports hw statistics version 3 */
+#define DEV_HAS_STATISTICS_V2      0x0000400  /* device supports hw statistics version 2 */
+#define DEV_HAS_STATISTICS_V3      0x0000800  /* device supports hw statistics version 3 */
+#define DEV_HAS_STATISTICS_V12     0x0000600  /* device supports hw statistics version 1 and 2 */
+#define DEV_HAS_STATISTICS_V123    0x0000e00  /* device supports hw statistics version 1, 2, and 3 */
 #define DEV_HAS_TEST_EXTENDED      0x0001000  /* device supports extended diagnostic test */
 #define DEV_HAS_MGMT_UNIT          0x0002000  /* device supports management unit */
 #define DEV_HAS_CORRECT_MACADDR    0x0004000  /* device supports correct mac address order */
@@ -6067,111 +6069,111 @@ static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
        },
        {       /* MCP55 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0372),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
        },
        {       /* MCP55 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0373),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
        },
        {       /* MCP61 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x03E5),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
        },
        {       /* MCP61 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x03E6),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
        },
        {       /* MCP61 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x03EE),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
        },
        {       /* MCP61 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x03EF),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
        },
        {       /* MCP65 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0450),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
        },
        {       /* MCP65 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0451),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
        },
        {       /* MCP65 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0452),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
        },
        {       /* MCP65 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0453),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
        },
        {       /* MCP67 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x054C),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
        },
        {       /* MCP67 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x054D),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
        },
        {       /* MCP67 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x054E),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
        },
        {       /* MCP67 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x054F),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
        },
        {       /* MCP73 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x07DC),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
        },
        {       /* MCP73 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x07DD),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
        },
        {       /* MCP73 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x07DE),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
        },
        {       /* MCP73 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x07DF),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
        },
        {       /* MCP77 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0760),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
        },
        {       /* MCP77 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0761),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
        },
        {       /* MCP77 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0762),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
        },
        {       /* MCP77 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0763),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
        },
        {       /* MCP79 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0AB0),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
        },
        {       /* MCP79 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0AB1),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
        },
        {       /* MCP79 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0AB2),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
        },
        {       /* MCP79 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0AB3),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
        },
        {       /* MCP89 Ethernet Controller */
                PCI_DEVICE(0x10DE, 0x0D7D),
-               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
+               .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
        },
        {0,},
 };
index acbf0d003a6d9bb73ff4cdd92c74aa86f1358ca1..ce587f4c4203f0d4a7952a70ff664cb07c1e2c21 100644 (file)
@@ -720,9 +720,10 @@ static int __devinit hp100_probe1(struct net_device *dev, int ioaddr,
                /* Conversion to new PCI API :
                 * Pages are always aligned and zeroed, no need to it ourself.
                 * Doc says should be OK for EISA bus as well - Jean II */
-               if ((lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr)) == NULL) {
+               lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr);
+               if (!lp->page_vaddr_algn) {
                        err = -ENOMEM;
-                       goto out2;
+                       goto out_mem_ptr;
                }
                lp->whatever_offset = ((u_long) page_baddr) - ((u_long) lp->page_vaddr_algn);
 
@@ -798,6 +799,7 @@ out3:
                pci_free_consistent(lp->pci_dev, MAX_RINGSIZE + 0x0f,
                                    lp->page_vaddr_algn,
                                    virt_to_whatever(dev, lp->page_vaddr_algn));
+out_mem_ptr:
        if (mem_ptr_virt)
                iounmap(mem_ptr_virt);
 out2:
index 06251a9e9f1b65711e2bfaf6367119a11f68dd6d..187622f1c81611f3b5b269c7c730610460c03210 100644 (file)
@@ -63,6 +63,7 @@ static bool igb_sgmii_active_82575(struct e1000_hw *);
 static s32  igb_reset_init_script_82575(struct e1000_hw *);
 static s32  igb_read_mac_addr_82575(struct e1000_hw *);
 static s32  igb_set_pcie_completion_timeout(struct e1000_hw *hw);
+static s32  igb_reset_mdicnfg_82580(struct e1000_hw *hw);
 
 static const u16 e1000_82580_rxpbs_table[] =
        { 36, 72, 144, 1, 2, 4, 8, 16,
@@ -70,6 +71,35 @@ static const u16 e1000_82580_rxpbs_table[] =
 #define E1000_82580_RXPBS_TABLE_SIZE \
        (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
 
+/**
+ *  igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
+ *  @hw: pointer to the HW structure
+ *
+ *  Called to determine if the I2C pins are being used for I2C or as an
+ *  external MDIO interface since the two options are mutually exclusive.
+ **/
+static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
+{
+       u32 reg = 0;
+       bool ext_mdio = false;
+
+       switch (hw->mac.type) {
+       case e1000_82575:
+       case e1000_82576:
+               reg = rd32(E1000_MDIC);
+               ext_mdio = !!(reg & E1000_MDIC_DEST);
+               break;
+       case e1000_82580:
+       case e1000_i350:
+               reg = rd32(E1000_MDICNFG);
+               ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
+               break;
+       default:
+               break;
+       }
+       return ext_mdio;
+}
+
 static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 {
        struct e1000_phy_info *phy = &hw->phy;
@@ -130,27 +160,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
        switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
        case E1000_CTRL_EXT_LINK_MODE_SGMII:
                dev_spec->sgmii_active = true;
-               ctrl_ext |= E1000_CTRL_I2C_ENA;
                break;
        case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
        case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
                hw->phy.media_type = e1000_media_type_internal_serdes;
-               ctrl_ext |= E1000_CTRL_I2C_ENA;
                break;
        default:
-               ctrl_ext &= ~E1000_CTRL_I2C_ENA;
                break;
        }
 
-       wr32(E1000_CTRL_EXT, ctrl_ext);
-
-       /*
-        * if using i2c make certain the MDICNFG register is cleared to prevent
-        * communications from being misrouted to the mdic registers
-        */
-       if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580))
-               wr32(E1000_MDICNFG, 0);
-
        /* Set mta register count */
        mac->mta_reg_count = 128;
        /* Set rar entry count */
@@ -228,19 +246,29 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
        phy->autoneg_mask        = AUTONEG_ADVERTISE_SPEED_DEFAULT;
        phy->reset_delay_us      = 100;
 
+       ctrl_ext = rd32(E1000_CTRL_EXT);
+
        /* PHY function pointers */
        if (igb_sgmii_active_82575(hw)) {
-               phy->ops.reset              = igb_phy_hw_reset_sgmii_82575;
-               phy->ops.read_reg           = igb_read_phy_reg_sgmii_82575;
-               phy->ops.write_reg          = igb_write_phy_reg_sgmii_82575;
+               phy->ops.reset      = igb_phy_hw_reset_sgmii_82575;
+               ctrl_ext |= E1000_CTRL_I2C_ENA;
+       } else {
+               phy->ops.reset      = igb_phy_hw_reset;
+               ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+       }
+
+       wr32(E1000_CTRL_EXT, ctrl_ext);
+       igb_reset_mdicnfg_82580(hw);
+
+       if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
+               phy->ops.read_reg   = igb_read_phy_reg_sgmii_82575;
+               phy->ops.write_reg  = igb_write_phy_reg_sgmii_82575;
        } else if (hw->mac.type >= e1000_82580) {
-               phy->ops.reset              = igb_phy_hw_reset;
-               phy->ops.read_reg           = igb_read_phy_reg_82580;
-               phy->ops.write_reg          = igb_write_phy_reg_82580;
+               phy->ops.read_reg   = igb_read_phy_reg_82580;
+               phy->ops.write_reg  = igb_write_phy_reg_82580;
        } else {
-               phy->ops.reset              = igb_phy_hw_reset;
-               phy->ops.read_reg           = igb_read_phy_reg_igp;
-               phy->ops.write_reg          = igb_write_phy_reg_igp;
+               phy->ops.read_reg   = igb_read_phy_reg_igp;
+               phy->ops.write_reg  = igb_write_phy_reg_igp;
        }
 
        /* set lan id */
@@ -400,6 +428,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
        s32  ret_val = 0;
        u16 phy_id;
        u32 ctrl_ext;
+       u32 mdic;
 
        /*
         * For SGMII PHYs, we try the list of possible addresses until
@@ -414,6 +443,29 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
                goto out;
        }
 
+       if (igb_sgmii_uses_mdio_82575(hw)) {
+               switch (hw->mac.type) {
+               case e1000_82575:
+               case e1000_82576:
+                       mdic = rd32(E1000_MDIC);
+                       mdic &= E1000_MDIC_PHY_MASK;
+                       phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
+                       break;
+               case e1000_82580:
+               case e1000_i350:
+                       mdic = rd32(E1000_MDICNFG);
+                       mdic &= E1000_MDICNFG_PHY_MASK;
+                       phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
+                       break;
+               default:
+                       ret_val = -E1000_ERR_PHY;
+                       goto out;
+                       break;
+               }
+               ret_val = igb_get_phy_id(hw);
+               goto out;
+       }
+
        /* Power on sgmii phy if it is disabled */
        ctrl_ext = rd32(E1000_CTRL_EXT);
        wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
@@ -1500,6 +1552,43 @@ out:
        return ret_val;
 }
 
+/**
+ *  igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ *  the values found in the EEPROM.  This addresses an issue in which these
+ *  bits are not restored from EEPROM after reset.
+ **/
+static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
+{
+       s32 ret_val = 0;
+       u32 mdicnfg;
+       u16 nvm_data;
+
+       if (hw->mac.type != e1000_82580)
+               goto out;
+       if (!igb_sgmii_active_82575(hw))
+               goto out;
+
+       ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+                                  NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+                                  &nvm_data);
+       if (ret_val) {
+               hw_dbg("NVM Read Error\n");
+               goto out;
+       }
+
+       mdicnfg = rd32(E1000_MDICNFG);
+       if (nvm_data & NVM_WORD24_EXT_MDIO)
+               mdicnfg |= E1000_MDICNFG_EXT_MDIO;
+       if (nvm_data & NVM_WORD24_COM_MDIO)
+               mdicnfg |= E1000_MDICNFG_COM_MDIO;
+       wr32(E1000_MDICNFG, mdicnfg);
+out:
+       return ret_val;
+}
+
 /**
  *  igb_reset_hw_82580 - Reset hardware
  *  @hw: pointer to the HW structure
@@ -1575,6 +1664,10 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
        wr32(E1000_IMC, 0xffffffff);
        icr = rd32(E1000_ICR);
 
+       ret_val = igb_reset_mdicnfg_82580(hw);
+       if (ret_val)
+               hw_dbg("Could not reset MDICNFG based on EEPROM\n");
+
        /* Install any alternate MAC address into RAR0 */
        ret_val = igb_check_alt_mac_addr(hw);
 
index 90bc29d7e182800b29ed7a507a92aa72daf3d936..bbd2ec308eb06b07963bfc5d658d73318968c406 100644 (file)
 
 #define E1000_TIMINCA_16NS_SHIFT 24
 
+#define E1000_MDICNFG_EXT_MDIO    0x80000000      /* MDI ext/int destination */
+#define E1000_MDICNFG_COM_MDIO    0x40000000      /* MDI shared w/ lan 0 */
+#define E1000_MDICNFG_PHY_MASK    0x03E00000
+#define E1000_MDICNFG_PHY_SHIFT   21
+
 /* PCI Express Control */
 #define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
 #define E1000_GCR_CMPL_TMOUT_10ms       0x00001000
 
 #define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
 
+/* Mask bits for fields in Word 0x24 of the NVM */
+#define NVM_WORD24_COM_MDIO         0x0008 /* MDIO interface shared */
+#define NVM_WORD24_EXT_MDIO         0x0004 /* MDIO accesses routed external */
+
 /* Mask bits for fields in Word 0x0f of the NVM */
 #define NVM_WORD0F_PAUSE_MASK       0x3000
 #define NVM_WORD0F_ASM_DIR          0x2000
 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
 
 /* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK  0x001F0000
 #define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK  0x03E00000
 #define E1000_MDIC_PHY_SHIFT 21
 #define E1000_MDIC_OP_WRITE  0x04000000
 #define E1000_MDIC_OP_READ   0x08000000
 #define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_INT_EN    0x20000000
 #define E1000_MDIC_ERROR     0x40000000
+#define E1000_MDIC_DEST      0x80000000
 
 /* SerDes Control */
 #define E1000_GEN_CTL_READY             0x80000000
index 94656179441dd43ce4f21c91c24d3ca719dcbfc4..df5dcd23e4fcb408384c0991057305a23e41a4f7 100644 (file)
@@ -1290,7 +1290,13 @@ static void igb_irq_disable(struct igb_adapter *adapter)
        wr32(E1000_IAM, 0);
        wr32(E1000_IMC, ~0);
        wrfl();
-       synchronize_irq(adapter->pdev->irq);
+       if (adapter->msix_entries) {
+               int i;
+               for (i = 0; i < adapter->num_q_vectors; i++)
+                       synchronize_irq(adapter->msix_entries[i].vector);
+       } else {
+               synchronize_irq(adapter->pdev->irq);
+       }
 }
 
 /**
@@ -1722,6 +1728,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        u16 eeprom_apme_mask = IGB_EEPROM_APME;
        u32 part_num;
 
+       /* Catch broken hardware that put the wrong VF device ID in
+        * the PCIe SR-IOV capability.
+        */
+       if (pdev->is_virtfn) {
+               WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
+                    pci_name(pdev), pdev->vendor, pdev->device);
+               return -EINVAL;
+       }
+
        err = pci_enable_device_mem(pdev);
        if (err)
                return err;
index 5e2b2a8c56c6f81ade223d30f9e3bf0705ed3f6e..048595bc79ad393b18f7352a6da40551be00c494 100644 (file)
@@ -2751,7 +2751,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
                dev_info(&pdev->dev,
                         "PF still in reset state, assigning new address."
                         " Is the PF interface up?\n");
-               random_ether_addr(hw->mac.addr);
+               dev_hw_addr_random(adapter->netdev, hw->mac.addr);
        } else {
                err = hw->mac.ops.read_mac_addr(hw);
                if (err) {
index d67e48418e55c3a5fa085231c7fe462e491703d7..850ca1c5ee19cc024cb41aff9164b459ab999e46 100644 (file)
@@ -2848,9 +2848,7 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
        unsigned short ss_device = 0x0000;
        int ret = 0;
 
-       dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
-
-       while (dev != NULL) {
+       for_each_pci_dev(dev) {
                struct smsc_ircc_subsystem_configuration *conf;
 
                /*
@@ -2899,7 +2897,6 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
                                        ret = -ENODEV;
                        }
                }
-               dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
        }
 
        return ret;
index da54b38bb480593f27bf0006a820dda70c391276..dcebc82c6f4de3a4ae0134f802cbf5c7f8e8d302 100644 (file)
@@ -54,14 +54,14 @@ struct ixgbe_stats {
                                sizeof(((struct ixgbe_adapter *)0)->m), \
                                offsetof(struct ixgbe_adapter, m)
 #define IXGBE_NETDEV_STAT(m)   NETDEV_STATS, \
-                               sizeof(((struct net_device *)0)->m), \
-                               offsetof(struct net_device, m) - offsetof(struct net_device, stats)
+                               sizeof(((struct rtnl_link_stats64 *)0)->m), \
+                               offsetof(struct rtnl_link_stats64, m)
 
 static struct ixgbe_stats ixgbe_gstrings_stats[] = {
-       {"rx_packets", IXGBE_NETDEV_STAT(stats.rx_packets)},
-       {"tx_packets", IXGBE_NETDEV_STAT(stats.tx_packets)},
-       {"rx_bytes", IXGBE_NETDEV_STAT(stats.rx_bytes)},
-       {"tx_bytes", IXGBE_NETDEV_STAT(stats.tx_bytes)},
+       {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
+       {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
+       {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
+       {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
        {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
        {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
        {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
@@ -69,27 +69,27 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
        {"lsc_int", IXGBE_STAT(lsc_int)},
        {"tx_busy", IXGBE_STAT(tx_busy)},
        {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
-       {"rx_errors", IXGBE_NETDEV_STAT(stats.rx_errors)},
-       {"tx_errors", IXGBE_NETDEV_STAT(stats.tx_errors)},
-       {"rx_dropped", IXGBE_NETDEV_STAT(stats.rx_dropped)},
-       {"tx_dropped", IXGBE_NETDEV_STAT(stats.tx_dropped)},
-       {"multicast", IXGBE_NETDEV_STAT(stats.multicast)},
+       {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
+       {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
+       {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
+       {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
+       {"multicast", IXGBE_NETDEV_STAT(multicast)},
        {"broadcast", IXGBE_STAT(stats.bprc)},
        {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
-       {"collisions", IXGBE_NETDEV_STAT(stats.collisions)},
-       {"rx_over_errors", IXGBE_NETDEV_STAT(stats.rx_over_errors)},
-       {"rx_crc_errors", IXGBE_NETDEV_STAT(stats.rx_crc_errors)},
-       {"rx_frame_errors", IXGBE_NETDEV_STAT(stats.rx_frame_errors)},
+       {"collisions", IXGBE_NETDEV_STAT(collisions)},
+       {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
+       {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
+       {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
        {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
        {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
        {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
        {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
-       {"rx_fifo_errors", IXGBE_NETDEV_STAT(stats.rx_fifo_errors)},
-       {"rx_missed_errors", IXGBE_NETDEV_STAT(stats.rx_missed_errors)},
-       {"tx_aborted_errors", IXGBE_NETDEV_STAT(stats.tx_aborted_errors)},
-       {"tx_carrier_errors", IXGBE_NETDEV_STAT(stats.tx_carrier_errors)},
-       {"tx_fifo_errors", IXGBE_NETDEV_STAT(stats.tx_fifo_errors)},
-       {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(stats.tx_heartbeat_errors)},
+       {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
+       {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
+       {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
+       {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
+       {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
+       {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
        {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
        {"tx_restart_queue", IXGBE_STAT(restart_queue)},
        {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
index 920375951454294f86f5cea0f38b0cfe00caa38a..7d6a415bcf885633999e10ed795df49876d6c47e 100644 (file)
@@ -4783,6 +4783,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 #ifdef CONFIG_IXGBE_DCB
                /* Default traffic class to use for FCoE */
                adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
+               adapter->fcoe.up = IXGBE_FCOE_DEFTC;
 #endif
 #endif /* IXGBE_FCOE */
        }
@@ -6147,21 +6148,26 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        int txq = smp_processor_id();
 
+#ifdef IXGBE_FCOE
+       if ((skb->protocol == htons(ETH_P_FCOE)) ||
+           (skb->protocol == htons(ETH_P_FIP))) {
+               if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+                       txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
+                       txq += adapter->ring_feature[RING_F_FCOE].mask;
+                       return txq;
+               } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+                       txq = adapter->fcoe.up;
+                       return txq;
+               }
+       }
+#endif
+
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
                while (unlikely(txq >= dev->real_num_tx_queues))
                        txq -= dev->real_num_tx_queues;
                return txq;
        }
 
-#ifdef IXGBE_FCOE
-       if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
-           ((skb->protocol == htons(ETH_P_FCOE)) ||
-            (skb->protocol == htons(ETH_P_FIP)))) {
-               txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
-               txq += adapter->ring_feature[RING_F_FCOE].mask;
-               return txq;
-       }
-#endif
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
                if (skb->priority == TC_PRIO_CONTROL)
                        txq = adapter->ring_feature[RING_F_DCB].indices-1;
@@ -6205,18 +6211,15 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
        tx_ring = adapter->tx_ring[skb->queue_mapping];
 
 #ifdef IXGBE_FCOE
-       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
-#ifdef CONFIG_IXGBE_DCB
-               /* for FCoE with DCB, we force the priority to what
-                * was specified by the switch */
-               if ((skb->protocol == htons(ETH_P_FCOE)) ||
-                   (skb->protocol == htons(ETH_P_FIP))) {
-                       tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
-                                     << IXGBE_TX_FLAGS_VLAN_SHIFT);
-                       tx_flags |= ((adapter->fcoe.up << 13)
-                                    << IXGBE_TX_FLAGS_VLAN_SHIFT);
-               }
-#endif
+       /* for FCoE with DCB, we force the priority to what
+        * was specified by the switch */
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
+           (skb->protocol == htons(ETH_P_FCOE) ||
+            skb->protocol == htons(ETH_P_FIP))) {
+               tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
+                             << IXGBE_TX_FLAGS_VLAN_SHIFT);
+               tx_flags |= ((adapter->fcoe.up << 13)
+                             << IXGBE_TX_FLAGS_VLAN_SHIFT);
                /* flag for FCoE offloads */
                if (skb->protocol == htons(ETH_P_FCOE))
                        tx_flags |= IXGBE_TX_FLAGS_FCOE;
@@ -6536,6 +6539,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 #endif
        u32 part_num, eec;
 
+       /* Catch broken hardware that put the wrong VF device ID in
+        * the PCIe SR-IOV capability.
+        */
+       if (pdev->is_virtfn) {
+               WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
+                    pci_name(pdev), pdev->vendor, pdev->device);
+               return -EINVAL;
+       }
+
        err = pci_enable_device_mem(pdev);
        if (err)
                return err;
@@ -6549,8 +6561,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                        err = dma_set_coherent_mask(&pdev->dev,
                                                    DMA_BIT_MASK(32));
                        if (err) {
-                               e_dev_err("No usable DMA configuration, "
-                                         "aborting\n");
+                               dev_err(&pdev->dev,
+                                       "No usable DMA configuration, aborting\n");
                                goto err_dma;
                        }
                }
@@ -6560,7 +6572,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
                                           IORESOURCE_MEM), ixgbe_driver_name);
        if (err) {
-               e_dev_err("pci_request_selected_regions failed 0x%x\n", err);
+               dev_err(&pdev->dev,
+                       "pci_request_selected_regions failed 0x%x\n", err);
                goto err_pci_reg;
        }
 
index af491352b5e00eb2de151ff63bb73b519897b679..3e291ccc629d0971ef9d9ec2f23f2b2b93b65b1b 100644 (file)
@@ -1463,18 +1463,10 @@ static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
-       struct net_device *v_netdev;
 
        /* add VID to filter table */
        if (hw->mac.ops.set_vfta)
                hw->mac.ops.set_vfta(hw, vid, 0, true);
-       /*
-        * Copy feature flags from netdev to the vlan netdev for this vid.
-        * This allows things like TSO to bubble down to our vlan device.
-        */
-       v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
-       v_netdev->features |= adapter->netdev->features;
-       vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
 }
 
 static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -2229,7 +2221,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
        if (err) {
                dev_info(&pdev->dev,
                         "PF still in reset state, assigning new address\n");
-               random_ether_addr(hw->mac.addr);
+               dev_hw_addr_random(adapter->netdev, hw->mac.addr);
        } else {
                err = hw->mac.ops.init_hw(hw);
                if (err) {
@@ -3402,7 +3394,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
        /* setup the private structure */
        err = ixgbevf_sw_init(adapter);
 
-#ifdef MAX_SKB_FRAGS
        netdev->features = NETIF_F_SG |
                           NETIF_F_IP_CSUM |
                           NETIF_F_HW_VLAN_TX |
@@ -3416,13 +3407,12 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
        netdev->vlan_features |= NETIF_F_TSO;
        netdev->vlan_features |= NETIF_F_TSO6;
        netdev->vlan_features |= NETIF_F_IP_CSUM;
+       netdev->vlan_features |= NETIF_F_IPV6_CSUM;
        netdev->vlan_features |= NETIF_F_SG;
 
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
-#endif /* MAX_SKB_FRAGS */
-
        /* The HW MAC address was set and/or determined in sw_init */
        memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
        memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
index 634dad1c8b48ea024a66804b88ed783b59da48c1..928b2b83cef509080c30eccd6c4d1c4cfe76fd58 100644 (file)
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/ks8842.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
 
 #define DRV_NAME "ks8842"
 
 /* Timberdale specific Registers */
-#define REG_TIMB_RST   0x1c
+#define REG_TIMB_RST           0x1c
+#define REG_TIMB_FIFO          0x20
+#define REG_TIMB_ISR           0x24
+#define REG_TIMB_IER           0x28
+#define REG_TIMB_IAR           0x2C
+#define REQ_TIMB_DMA_RESUME    0x30
 
 /* KS8842 registers */
 
 #define IRQ_RX_ERROR   0x0080
 #define ENABLED_IRQS   (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
                IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
+/* When running via timberdale in DMA mode, the RX interrupt should be
+   enabled in the KS8842, but not in the FPGA IP, since the IP handles
+   RX DMA internally.
+   TX interrupts are not needed it is handled by the FPGA the driver is
+   notified via DMA callbacks.
+*/
+#define ENABLED_IRQS_DMA_IP    (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
+       IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
+#define ENABLED_IRQS_DMA       (ENABLED_IRQS_DMA_IP | IRQ_RX)
 #define REG_ISR                0x02
 #define REG_RXSR       0x04
 #define RXSR_VALID     0x8000
 #define        MICREL_KS884X           0x01    /* 0=Timeberdale(FPGA), 1=Micrel */
 #define        KS884X_16BIT            0x02    /*  1=16bit, 0=32bit */
 
+#define DMA_BUFFER_SIZE                2048
+
+struct ks8842_tx_dma_ctl {
+       struct dma_chan *chan;
+       struct dma_async_tx_descriptor *adesc;
+       void *buf;
+       struct scatterlist sg;
+       int channel;
+};
+
+struct ks8842_rx_dma_ctl {
+       struct dma_chan *chan;
+       struct dma_async_tx_descriptor *adesc;
+       struct sk_buff  *skb;
+       struct scatterlist sg;
+       struct tasklet_struct tasklet;
+       int channel;
+};
+
+#define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
+        ((adapter)->dma_rx.channel != -1))
+
 struct ks8842_adapter {
        void __iomem    *hw_addr;
        int             irq;
@@ -127,8 +166,19 @@ struct ks8842_adapter {
        spinlock_t      lock; /* spinlock to be interrupt safe */
        struct work_struct timeout_work;
        struct net_device *netdev;
+       struct device *dev;
+       struct ks8842_tx_dma_ctl        dma_tx;
+       struct ks8842_rx_dma_ctl        dma_rx;
 };
 
+static void ks8842_dma_rx_cb(void *data);
+static void ks8842_dma_tx_cb(void *data);
+
+static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
+{
+       iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
+}
+
 static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
 {
        iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
@@ -282,10 +332,6 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
        /* restart port auto-negotiation */
        ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
 
-       if (!(adapter->conf_flags & MICREL_KS884X))
-               /* only advertise 10Mbps */
-               ks8842_clear_bits(adapter, 49, 3 << 2, REG_P1CR4);
-
        /* Enable the transmitter */
        ks8842_enable_tx(adapter);
 
@@ -296,8 +342,19 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
        ks8842_write16(adapter, 18, 0xffff, REG_ISR);
 
        /* enable interrupts */
-       ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
-
+       if (KS8842_USE_DMA(adapter)) {
+               /* When running in DMA Mode the RX interrupt is not enabled in
+                  timberdale because RX data is received by DMA callbacks
+                  it must still be enabled in the KS8842 because it indicates
+                  to timberdale when there is RX data for it's DMA FIFOs */
+               iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
+               ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
+       } else {
+               if (!(adapter->conf_flags & MICREL_KS884X))
+                       iowrite16(ENABLED_IRQS,
+                               adapter->hw_addr + REG_TIMB_IER);
+               ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
+       }
        /* enable the switch */
        ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
 }
@@ -370,6 +427,53 @@ static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
        return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
 }
 
+static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct ks8842_adapter *adapter = netdev_priv(netdev);
+       struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
+       u8 *buf = ctl->buf;
+
+       if (ctl->adesc) {
+               netdev_dbg(netdev, "%s: TX ongoing\n", __func__);
+               /* transfer ongoing */
+               return NETDEV_TX_BUSY;
+       }
+
+       sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
+
+       /* copy data to the TX buffer */
+       /* the control word, enable IRQ, port 1 and the length */
+       *buf++ = 0x00;
+       *buf++ = 0x01; /* Port 1 */
+       *buf++ = skb->len & 0xff;
+       *buf++ = (skb->len >> 8) & 0xff;
+       skb_copy_from_linear_data(skb, buf, skb->len);
+
+       dma_sync_single_range_for_device(adapter->dev,
+               sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
+               DMA_TO_DEVICE);
+
+       /* make sure the length is a multiple of 4 */
+       if (sg_dma_len(&ctl->sg) % 4)
+               sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
+
+       ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
+               &ctl->sg, 1, DMA_TO_DEVICE,
+               DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+       if (!ctl->adesc)
+               return NETDEV_TX_BUSY;
+
+       ctl->adesc->callback_param = netdev;
+       ctl->adesc->callback = ks8842_dma_tx_cb;
+       ctl->adesc->tx_submit(ctl->adesc);
+
+       netdev->stats.tx_bytes += skb->len;
+
+       dev_kfree_skb(skb);
+
+       return NETDEV_TX_OK;
+}
+
 static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
 {
        struct ks8842_adapter *adapter = netdev_priv(netdev);
@@ -421,6 +525,121 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
        return NETDEV_TX_OK;
 }
 
+static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status)
+{
+       netdev_dbg(netdev, "RX error, status: %x\n", status);
+
+       netdev->stats.rx_errors++;
+       if (status & RXSR_TOO_LONG)
+               netdev->stats.rx_length_errors++;
+       if (status & RXSR_CRC_ERROR)
+               netdev->stats.rx_crc_errors++;
+       if (status & RXSR_RUNT)
+               netdev->stats.rx_frame_errors++;
+}
+
+static void ks8842_update_rx_counters(struct net_device *netdev, u32 status,
+       int len)
+{
+       netdev_dbg(netdev, "RX packet, len: %d\n", len);
+
+       netdev->stats.rx_packets++;
+       netdev->stats.rx_bytes += len;
+       if (status & RXSR_MULTICAST)
+               netdev->stats.multicast++;
+}
+
+static int __ks8842_start_new_rx_dma(struct net_device *netdev)
+{
+       struct ks8842_adapter *adapter = netdev_priv(netdev);
+       struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
+       struct scatterlist *sg = &ctl->sg;
+       int err;
+
+       ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
+       if (ctl->skb) {
+               sg_init_table(sg, 1);
+               sg_dma_address(sg) = dma_map_single(adapter->dev,
+                       ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+               err = dma_mapping_error(adapter->dev, sg_dma_address(sg));
+               if (unlikely(err)) {
+                       sg_dma_address(sg) = 0;
+                       goto out;
+               }
+
+               sg_dma_len(sg) = DMA_BUFFER_SIZE;
+
+               ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
+                       sg, 1, DMA_FROM_DEVICE,
+                       DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
+
+               if (!ctl->adesc)
+                       goto out;
+
+               ctl->adesc->callback_param = netdev;
+               ctl->adesc->callback = ks8842_dma_rx_cb;
+               ctl->adesc->tx_submit(ctl->adesc);
+       } else {
+               err = -ENOMEM;
+               sg_dma_address(sg) = 0;
+               goto out;
+       }
+
+       return err;
+out:
+       if (sg_dma_address(sg))
+               dma_unmap_single(adapter->dev, sg_dma_address(sg),
+                       DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+       sg_dma_address(sg) = 0;
+       if (ctl->skb)
+               dev_kfree_skb(ctl->skb);
+
+       ctl->skb = NULL;
+
+       printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
+       return err;
+}
+
+static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
+{
+       struct net_device *netdev = (struct net_device *)arg;
+       struct ks8842_adapter *adapter = netdev_priv(netdev);
+       struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
+       struct sk_buff *skb = ctl->skb;
+       dma_addr_t addr = sg_dma_address(&ctl->sg);
+       u32 status;
+
+       ctl->adesc = NULL;
+
+       /* kick next transfer going */
+       __ks8842_start_new_rx_dma(netdev);
+
+       /* now handle the data we got */
+       dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+
+       status = *((u32 *)skb->data);
+
+       netdev_dbg(netdev, "%s - rx_data: status: %x\n",
+               __func__, status & 0xffff);
+
+       /* check the status */
+       if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
+               int len = (status >> 16) & 0x7ff;
+
+               ks8842_update_rx_counters(netdev, status, len);
+
+               /* reserve 4 bytes which is the status word */
+               skb_reserve(skb, 4);
+               skb_put(skb, len);
+
+               skb->protocol = eth_type_trans(skb, netdev);
+               netif_rx(skb);
+       } else {
+               ks8842_update_rx_err_counters(netdev, status);
+               dev_kfree_skb(skb);
+       }
+}
+
 static void ks8842_rx_frame(struct net_device *netdev,
        struct ks8842_adapter *adapter)
 {
@@ -444,13 +663,9 @@ static void ks8842_rx_frame(struct net_device *netdev,
        if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
                struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
 
-               netdev_dbg(netdev, "%s, got package, len: %d\n", __func__, len);
                if (skb) {
 
-                       netdev->stats.rx_packets++;
-                       netdev->stats.rx_bytes += len;
-                       if (status & RXSR_MULTICAST)
-                               netdev->stats.multicast++;
+                       ks8842_update_rx_counters(netdev, status, len);
 
                        if (adapter->conf_flags & KS884X_16BIT) {
                                u16 *data16 = (u16 *)skb_put(skb, len);
@@ -476,16 +691,8 @@ static void ks8842_rx_frame(struct net_device *netdev,
                        netif_rx(skb);
                } else
                        netdev->stats.rx_dropped++;
-       } else {
-               netdev_dbg(netdev, "RX error, status: %x\n", status);
-               netdev->stats.rx_errors++;
-               if (status & RXSR_TOO_LONG)
-                       netdev->stats.rx_length_errors++;
-               if (status & RXSR_CRC_ERROR)
-                       netdev->stats.rx_crc_errors++;
-               if (status & RXSR_RUNT)
-                       netdev->stats.rx_frame_errors++;
-       }
+       } else
+               ks8842_update_rx_err_counters(netdev, status);
 
        /* set high watermark to 3K */
        ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
@@ -540,18 +747,30 @@ void ks8842_tasklet(unsigned long arg)
        isr = ks8842_read16(adapter, 18, REG_ISR);
        netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
 
+       /* when running in DMA mode, do not ack RX interrupts, it is handled
+          internally by timberdale, otherwise it's DMA FIFO:s would stop
+       */
+       if (KS8842_USE_DMA(adapter))
+               isr &= ~IRQ_RX;
+
        /* Ack */
        ks8842_write16(adapter, 18, isr, REG_ISR);
 
+       if (!(adapter->conf_flags & MICREL_KS884X))
+               /* Ack in the timberdale IP as well */
+               iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
+
        if (!netif_running(netdev))
                return;
 
        if (isr & IRQ_LINK_CHANGE)
                ks8842_update_link_status(netdev, adapter);
 
-       if (isr & (IRQ_RX | IRQ_RX_ERROR))
+       /* should not get IRQ_RX when running DMA mode */
+       if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter))
                ks8842_handle_rx(netdev, adapter);
 
+       /* should only happen when in PIO mode */
        if (isr & IRQ_TX)
                ks8842_handle_tx(netdev, adapter);
 
@@ -570,8 +789,17 @@ void ks8842_tasklet(unsigned long arg)
 
        /* re-enable interrupts, put back the bank selection register */
        spin_lock_irqsave(&adapter->lock, flags);
-       ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
+       if (KS8842_USE_DMA(adapter))
+               ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
+       else
+               ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
        iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
+
+       /* Make sure timberdale continues DMA operations, they are stopped while
+          we are handling the ks8842 because we might change bank */
+       if (KS8842_USE_DMA(adapter))
+               ks8842_resume_dma(adapter);
+
        spin_unlock_irqrestore(&adapter->lock, flags);
 }
 
@@ -587,8 +815,12 @@ static irqreturn_t ks8842_irq(int irq, void *devid)
        netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
 
        if (isr) {
-               /* disable IRQ */
-               ks8842_write16(adapter, 18, 0x00, REG_IER);
+               if (KS8842_USE_DMA(adapter))
+                       /* disable all but RX IRQ, since the FPGA relies on it*/
+                       ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
+               else
+                       /* disable IRQ */
+                       ks8842_write16(adapter, 18, 0x00, REG_IER);
 
                /* schedule tasklet */
                tasklet_schedule(&adapter->tasklet);
@@ -598,9 +830,151 @@ static irqreturn_t ks8842_irq(int irq, void *devid)
 
        iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
 
+       /* After an interrupt, tell timberdale to continue DMA operations.
+          DMA is disabled while we are handling the ks8842 because we might
+          change bank */
+       ks8842_resume_dma(adapter);
+
        return ret;
 }
 
+static void ks8842_dma_rx_cb(void *data)
+{
+       struct net_device       *netdev = data;
+       struct ks8842_adapter   *adapter = netdev_priv(netdev);
+
+       netdev_dbg(netdev, "RX DMA finished\n");
+       /* schedule tasklet */
+       if (adapter->dma_rx.adesc)
+               tasklet_schedule(&adapter->dma_rx.tasklet);
+}
+
+static void ks8842_dma_tx_cb(void *data)
+{
+       struct net_device               *netdev = data;
+       struct ks8842_adapter           *adapter = netdev_priv(netdev);
+       struct ks8842_tx_dma_ctl        *ctl = &adapter->dma_tx;
+
+       netdev_dbg(netdev, "TX DMA finished\n");
+
+       if (!ctl->adesc)
+               return;
+
+       netdev->stats.tx_packets++;
+       ctl->adesc = NULL;
+
+       if (netif_queue_stopped(netdev))
+               netif_wake_queue(netdev);
+}
+
+static void ks8842_stop_dma(struct ks8842_adapter *adapter)
+{
+       struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
+       struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
+
+       tx_ctl->adesc = NULL;
+       if (tx_ctl->chan)
+               tx_ctl->chan->device->device_control(tx_ctl->chan,
+                       DMA_TERMINATE_ALL, 0);
+
+       rx_ctl->adesc = NULL;
+       if (rx_ctl->chan)
+               rx_ctl->chan->device->device_control(rx_ctl->chan,
+                       DMA_TERMINATE_ALL, 0);
+
+       if (sg_dma_address(&rx_ctl->sg))
+               dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg),
+                       DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
+       sg_dma_address(&rx_ctl->sg) = 0;
+
+       dev_kfree_skb(rx_ctl->skb);
+       rx_ctl->skb = NULL;
+}
+
+static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
+{
+       struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
+       struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
+
+       ks8842_stop_dma(adapter);
+
+       if (tx_ctl->chan)
+               dma_release_channel(tx_ctl->chan);
+       tx_ctl->chan = NULL;
+
+       if (rx_ctl->chan)
+               dma_release_channel(rx_ctl->chan);
+       rx_ctl->chan = NULL;
+
+       tasklet_kill(&rx_ctl->tasklet);
+
+       if (sg_dma_address(&tx_ctl->sg))
+               dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg),
+                       DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+       sg_dma_address(&tx_ctl->sg) = 0;
+
+       kfree(tx_ctl->buf);
+       tx_ctl->buf = NULL;
+}
+
+static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
+{
+       return chan->chan_id == (long)filter_param;
+}
+
+static int ks8842_alloc_dma_bufs(struct net_device *netdev)
+{
+       struct ks8842_adapter *adapter = netdev_priv(netdev);
+       struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
+       struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
+       int err;
+
+       dma_cap_mask_t mask;
+
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
+       dma_cap_set(DMA_PRIVATE, mask);
+
+       sg_init_table(&tx_ctl->sg, 1);
+
+       tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
+                                          (void *)(long)tx_ctl->channel);
+       if (!tx_ctl->chan) {
+               err = -ENODEV;
+               goto err;
+       }
+
+       /* allocate DMA buffer */
+       tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
+       if (!tx_ctl->buf) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
+               tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+       err = dma_mapping_error(adapter->dev,
+               sg_dma_address(&tx_ctl->sg));
+       if (err) {
+               sg_dma_address(&tx_ctl->sg) = 0;
+               goto err;
+       }
+
+       rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
+                                          (void *)(long)rx_ctl->channel);
+       if (!rx_ctl->chan) {
+               err = -ENODEV;
+               goto err;
+       }
+
+       tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
+               (unsigned long)netdev);
+
+       return 0;
+err:
+       ks8842_dealloc_dma_bufs(adapter);
+       return err;
+}
 
 /* Netdevice operations */
 
@@ -611,6 +985,25 @@ static int ks8842_open(struct net_device *netdev)
 
        netdev_dbg(netdev, "%s - entry\n", __func__);
 
+       if (KS8842_USE_DMA(adapter)) {
+               err = ks8842_alloc_dma_bufs(netdev);
+
+               if (!err) {
+                       /* start RX dma */
+                       err = __ks8842_start_new_rx_dma(netdev);
+                       if (err)
+                               ks8842_dealloc_dma_bufs(adapter);
+               }
+
+               if (err) {
+                       printk(KERN_WARNING DRV_NAME
+                               ": Failed to initiate DMA, running PIO\n");
+                       ks8842_dealloc_dma_bufs(adapter);
+                       adapter->dma_rx.channel = -1;
+                       adapter->dma_tx.channel = -1;
+               }
+       }
+
        /* reset the HW */
        ks8842_reset_hw(adapter);
 
@@ -636,6 +1029,9 @@ static int ks8842_close(struct net_device *netdev)
 
        cancel_work_sync(&adapter->timeout_work);
 
+       if (KS8842_USE_DMA(adapter))
+               ks8842_dealloc_dma_bufs(adapter);
+
        /* free the irq */
        free_irq(adapter->irq, netdev);
 
@@ -653,6 +1049,17 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
 
        netdev_dbg(netdev, "%s: entry\n", __func__);
 
+       if (KS8842_USE_DMA(adapter)) {
+               unsigned long flags;
+               ret = ks8842_tx_frame_dma(skb, netdev);
+               /* for now only allow one transfer at the time */
+               spin_lock_irqsave(&adapter->lock, flags);
+               if (adapter->dma_tx.adesc)
+                       netif_stop_queue(netdev);
+               spin_unlock_irqrestore(&adapter->lock, flags);
+               return ret;
+       }
+
        ret = ks8842_tx_frame(skb, netdev);
 
        if (ks8842_tx_fifo_space(adapter) <  netdev->mtu + 8)
@@ -688,6 +1095,10 @@ static void ks8842_tx_timeout_work(struct work_struct *work)
        netdev_dbg(netdev, "%s: entry\n", __func__);
 
        spin_lock_irqsave(&adapter->lock, flags);
+
+       if (KS8842_USE_DMA(adapter))
+               ks8842_stop_dma(adapter);
+
        /* disable interrupts */
        ks8842_write16(adapter, 18, 0, REG_IER);
        ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
@@ -701,6 +1112,9 @@ static void ks8842_tx_timeout_work(struct work_struct *work)
        ks8842_write_mac_addr(adapter, netdev->dev_addr);
 
        ks8842_update_link_status(netdev, adapter);
+
+       if (KS8842_USE_DMA(adapter))
+               __ks8842_start_new_rx_dma(netdev);
 }
 
 static void ks8842_tx_timeout(struct net_device *netdev)
@@ -760,6 +1174,19 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
                goto err_get_irq;
        }
 
+       adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev;
+
+       /* DMA is only supported when accessed via timberdale */
+       if (!(adapter->conf_flags & MICREL_KS884X) && pdata &&
+               (pdata->tx_dma_channel != -1) &&
+               (pdata->rx_dma_channel != -1)) {
+               adapter->dma_rx.channel = pdata->rx_dma_channel;
+               adapter->dma_tx.channel = pdata->tx_dma_channel;
+       } else {
+               adapter->dma_rx.channel = -1;
+               adapter->dma_tx.channel = -1;
+       }
+
        tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
        spin_lock_init(&adapter->lock);
 
index b3c010b8565826193f55795580b355e6f55b0b7d..8b32cc107f0f51c02a6116bb24fc4c2cd3bbdc2a 100644 (file)
@@ -6894,13 +6894,12 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
        i = j = num = got_num = 0;
        while (j < MAC_ADDR_LEN) {
                if (macaddr[i]) {
+                       int digit;
+
                        got_num = 1;
-                       if ('0' <= macaddr[i] && macaddr[i] <= '9')
-                               num = num * 16 + macaddr[i] - '0';
-                       else if ('A' <= macaddr[i] && macaddr[i] <= 'F')
-                               num = num * 16 + 10 + macaddr[i] - 'A';
-                       else if ('a' <= macaddr[i] && macaddr[i] <= 'f')
-                               num = num * 16 + 10 + macaddr[i] - 'a';
+                       digit = hex_to_bin(macaddr[i]);
+                       if (digit >= 0)
+                               num = num * 16 + digit;
                        else if (':' == macaddr[i])
                                got_num = 2;
                        else
index 1b28aaec0a5ab587ca130289fdeb6730e5cf4e74..0ef0eb0db94564dda7d0c7f28c7581db799e0e7a 100644 (file)
@@ -158,7 +158,8 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
        const struct macvlan_dev *vlan;
        const struct macvlan_dev *src;
        struct net_device *dev;
-       unsigned int len;
+       unsigned int len = 0;
+       int ret = NET_RX_DROP;
 
        port = macvlan_port_get_rcu(skb->dev);
        if (is_multicast_ether_addr(eth->h_dest)) {
@@ -195,14 +196,16 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
        }
        len = skb->len + ETH_HLEN;
        skb = skb_share_check(skb, GFP_ATOMIC);
-       macvlan_count_rx(vlan, len, skb != NULL, 0);
        if (!skb)
-               return NULL;
+               goto out;
 
        skb->dev = dev;
        skb->pkt_type = PACKET_HOST;
 
-       vlan->receive(skb);
+       ret = vlan->receive(skb);
+
+out:
+       macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
        return NULL;
 }
 
@@ -515,7 +518,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
 };
 
-static void macvlan_setup(struct net_device *dev)
+void macvlan_common_setup(struct net_device *dev)
 {
        ether_setup(dev);
 
@@ -524,6 +527,12 @@ static void macvlan_setup(struct net_device *dev)
        dev->destructor         = free_netdev;
        dev->header_ops         = &macvlan_hard_header_ops,
        dev->ethtool_ops        = &macvlan_ethtool_ops;
+}
+EXPORT_SYMBOL_GPL(macvlan_common_setup);
+
+static void macvlan_setup(struct net_device *dev)
+{
+       macvlan_common_setup(dev);
        dev->tx_queue_len       = 0;
 }
 
@@ -735,7 +744,6 @@ int macvlan_link_register(struct rtnl_link_ops *ops)
        /* common fields */
        ops->priv_size          = sizeof(struct macvlan_dev);
        ops->get_tx_queues      = macvlan_get_tx_queues;
-       ops->setup              = macvlan_setup;
        ops->validate           = macvlan_validate;
        ops->maxtype            = IFLA_MACVLAN_MAX;
        ops->policy             = macvlan_policy;
@@ -749,6 +757,7 @@ EXPORT_SYMBOL_GPL(macvlan_link_register);
 
 static struct rtnl_link_ops macvlan_link_ops = {
        .kind           = "macvlan",
+       .setup          = macvlan_setup,
        .newlink        = macvlan_newlink,
        .dellink        = macvlan_dellink,
 };
index 2b4d59b58b2cf0a30d648977df77cc4afdc442f9..3b1c54a9c6ef12bc255f0af833ef6de2788da219 100644 (file)
@@ -180,11 +180,18 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
 {
        struct macvtap_queue *q = macvtap_get_queue(dev, skb);
        if (!q)
-               return -ENOLINK;
+               goto drop;
+
+       if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
+               goto drop;
 
        skb_queue_tail(&q->sk.sk_receive_queue, skb);
        wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
-       return 0;
+       return NET_RX_SUCCESS;
+
+drop:
+       kfree_skb(skb);
+       return NET_RX_DROP;
 }
 
 /*
@@ -235,8 +242,15 @@ static void macvtap_dellink(struct net_device *dev,
        macvlan_dellink(dev, head);
 }
 
+static void macvtap_setup(struct net_device *dev)
+{
+       macvlan_common_setup(dev);
+       dev->tx_queue_len = TUN_READQ_SIZE;
+}
+
 static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
        .kind           = "macvtap",
+       .setup          = macvtap_setup,
        .newlink        = macvtap_newlink,
        .dellink        = macvtap_dellink,
 };
index 2fcdb1e1b99d068ab4791752c53f52a42f14f97a..2d488abcf62d7c789a84583dd1ad7564c00ba80c 100644 (file)
@@ -2675,7 +2675,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
         * Detect hardware parameters.
         */
        msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
-       msp->tx_csum_limit = pd->tx_csum_limit ? pd->tx_csum_limit : 9 * 1024;
+       msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
+                                       pd->tx_csum_limit : 9 * 1024;
        infer_hw_params(msp);
 
        platform_set_drvdata(pdev, msp);
index 78b74e83ce5df7f242fe579676f7623c8d84e6f3..721a090a01bcce6ad4a2c0244e62bdd3334f3029 100644 (file)
 #define MII_M1111_COPPER               0
 #define MII_M1111_FIBER                        1
 
+#define MII_88E1121_PHY_MSCR_PAGE      2
+#define MII_88E1121_PHY_MSCR_REG       21
+#define MII_88E1121_PHY_MSCR_RX_DELAY  BIT(5)
+#define MII_88E1121_PHY_MSCR_TX_DELAY  BIT(4)
+#define MII_88E1121_PHY_MSCR_DELAY_MASK        (~(0x3 << 4))
+
+#define MII_88EC048_PHY_MSCR1_REG      16
+#define MII_88EC048_PHY_MSCR1_PAD_ODD  BIT(6)
+
 #define MII_88E1121_PHY_LED_CTRL       16
 #define MII_88E1121_PHY_LED_PAGE       3
 #define MII_88E1121_PHY_LED_DEF                0x0030
@@ -180,7 +189,30 @@ static int marvell_config_aneg(struct phy_device *phydev)
 
 static int m88e1121_config_aneg(struct phy_device *phydev)
 {
-       int err, temp;
+       int err, oldpage, mscr;
+
+       oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+
+       err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+                       MII_88E1121_PHY_MSCR_PAGE);
+       if (err < 0)
+               return err;
+       mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
+               MII_88E1121_PHY_MSCR_DELAY_MASK;
+
+       if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+               mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY |
+                        MII_88E1121_PHY_MSCR_TX_DELAY);
+       else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+               mscr |= MII_88E1121_PHY_MSCR_RX_DELAY;
+       else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+               mscr |= MII_88E1121_PHY_MSCR_TX_DELAY;
+
+       err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
+       if (err < 0)
+               return err;
+
+       phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
 
        err = phy_write(phydev, MII_BMCR, BMCR_RESET);
        if (err < 0)
@@ -191,17 +223,42 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
        if (err < 0)
                return err;
 
-       temp = phy_read(phydev, MII_88E1121_PHY_PAGE);
+       oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
 
        phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
        phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
-       phy_write(phydev, MII_88E1121_PHY_PAGE, temp);
+       phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
 
        err = genphy_config_aneg(phydev);
 
        return err;
 }
 
+static int m88ec048_config_aneg(struct phy_device *phydev)
+{
+       int err, oldpage, mscr;
+
+       oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+
+       err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+                       MII_88E1121_PHY_MSCR_PAGE);
+       if (err < 0)
+               return err;
+
+       mscr = phy_read(phydev, MII_88EC048_PHY_MSCR1_REG);
+       mscr |= MII_88EC048_PHY_MSCR1_PAD_ODD;
+
+       err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+       if (err < 0)
+               return err;
+
+       return m88e1121_config_aneg(phydev);
+}
+
 static int m88e1111_config_init(struct phy_device *phydev)
 {
        int err;
@@ -592,6 +649,19 @@ static struct phy_driver marvell_drivers[] = {
                .did_interrupt = &m88e1121_did_interrupt,
                .driver = { .owner = THIS_MODULE },
        },
+       {
+               .phy_id = 0x01410e90,
+               .phy_id_mask = 0xfffffff0,
+               .name = "Marvell 88EC048",
+               .features = PHY_GBIT_FEATURES,
+               .flags = PHY_HAS_INTERRUPT,
+               .config_aneg = &m88ec048_config_aneg,
+               .read_status = &marvell_read_status,
+               .ack_interrupt = &marvell_ack_interrupt,
+               .config_intr = &marvell_config_intr,
+               .did_interrupt = &m88e1121_did_interrupt,
+               .driver = { .owner = THIS_MODULE },
+       },
        {
                .phy_id = 0x01410cd0,
                .phy_id_mask = 0xfffffff0,
@@ -657,6 +727,7 @@ static struct mdio_device_id marvell_tbl[] = {
        { 0x01410cb0, 0xfffffff0 },
        { 0x01410cd0, 0xfffffff0 },
        { 0x01410e30, 0xfffffff0 },
+       { 0x01410e90, 0xfffffff0 },
        { }
 };
 
index 54ebb65ada1863ecdc7ff97b15587d6516b1e3c7..6168a130f33fc1a18548427a73918d572db19107 100644 (file)
@@ -5,6 +5,8 @@
  * See LICENSE.qla3xxx for copyright and licensing details.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/types.h>
 
 #include "qla3xxx.h"
 
-#define DRV_NAME       "qla3xxx"
-#define DRV_STRING     "QLogic ISP3XXX Network Driver"
+#define DRV_NAME       "qla3xxx"
+#define DRV_STRING     "QLogic ISP3XXX Network Driver"
 #define DRV_VERSION    "v2.03.00-k5"
-#define PFX            DRV_NAME " "
 
 static const char ql3xxx_driver_name[] = DRV_NAME;
 static const char ql3xxx_driver_version[] = DRV_VERSION;
 
+#define TIMED_OUT_MSG                                                  \
+"Timed out waiting for management port to get free before issuing command\n"
+
 MODULE_AUTHOR("QLogic Corporation");
 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
 MODULE_LICENSE("GPL");
@@ -73,24 +77,24 @@ MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
 /*
  *  These are the known PHY's which are used
  */
-typedef enum {
+enum PHY_DEVICE_TYPE {
    PHY_TYPE_UNKNOWN   = 0,
    PHY_VITESSE_VSC8211,
    PHY_AGERE_ET1011C,
    MAX_PHY_DEV_TYPES
-} PHY_DEVICE_et;
-
-typedef struct {
-       PHY_DEVICE_et phyDevice;
-       u32             phyIdOUI;
-       u16             phyIdModel;
-       char            *name;
-} PHY_DEVICE_INFO_t;
-
-static const PHY_DEVICE_INFO_t PHY_DEVICES[] =
-       {{PHY_TYPE_UNKNOWN,    0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
-        {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
-        {PHY_AGERE_ET1011C,   0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
+};
+
+struct PHY_DEVICE_INFO {
+       const enum PHY_DEVICE_TYPE      phyDevice;
+       const u32               phyIdOUI;
+       const u16               phyIdModel;
+       const char              *name;
+};
+
+static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
+       {PHY_TYPE_UNKNOWN,    0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
+       {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
+       {PHY_AGERE_ET1011C,   0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
 };
 
 
@@ -100,7 +104,8 @@ static const PHY_DEVICE_INFO_t PHY_DEVICES[] =
 static int ql_sem_spinlock(struct ql3_adapter *qdev,
                            u32 sem_mask, u32 sem_bits)
 {
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
        u32 value;
        unsigned int seconds = 3;
 
@@ -111,20 +116,22 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev,
                if ((value & (sem_mask >> 16)) == sem_bits)
                        return 0;
                ssleep(1);
-       } while(--seconds);
+       } while (--seconds);
        return -1;
 }
 
 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
 {
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
        writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
        readl(&port_regs->CommonRegs.semaphoreReg);
 }
 
 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
 {
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
        u32 value;
 
        writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
@@ -139,32 +146,28 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
 {
        int i = 0;
 
-       while (1) {
-               if (!ql_sem_lock(qdev,
-                                QL_DRVR_SEM_MASK,
-                                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
-                                 * 2) << 1)) {
-                       if (i < 10) {
-                               ssleep(1);
-                               i++;
-                       } else {
-                               printk(KERN_ERR PFX "%s: Timed out waiting for "
-                                      "driver lock...\n",
-                                      qdev->ndev->name);
-                               return 0;
-                       }
-               } else {
-                       printk(KERN_DEBUG PFX
-                              "%s: driver lock acquired.\n",
-                              qdev->ndev->name);
+       while (i < 10) {
+               if (i)
+                       ssleep(1);
+
+               if (ql_sem_lock(qdev,
+                               QL_DRVR_SEM_MASK,
+                               (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
+                                * 2) << 1)) {
+                       netdev_printk(KERN_DEBUG, qdev->ndev,
+                                     "driver lock acquired\n");
                        return 1;
                }
        }
+
+       netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
+       return 0;
 }
 
 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
 {
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
 
        writel(((ISP_CONTROL_NP_MASK << 16) | page),
                        &port_regs->CommonRegs.ispControlStatus);
@@ -172,8 +175,7 @@ static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
        qdev->current_page = page;
 }
 
-static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
-                             u32 __iomem * reg)
+static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
 {
        u32 value;
        unsigned long hw_flags;
@@ -185,8 +187,7 @@ static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
        return value;
 }
 
-static u32 ql_read_common_reg(struct ql3_adapter *qdev,
-                             u32 __iomem * reg)
+static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
 {
        return readl(reg);
 }
@@ -199,7 +200,7 @@ static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 
        if (qdev->current_page != 0)
-               ql_set_register_page(qdev,0);
+               ql_set_register_page(qdev, 0);
        value = readl(reg);
 
        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -209,7 +210,7 @@ static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
 {
        if (qdev->current_page != 0)
-               ql_set_register_page(qdev,0);
+               ql_set_register_page(qdev, 0);
        return readl(reg);
 }
 
@@ -243,7 +244,7 @@ static void ql_write_page0_reg(struct ql3_adapter *qdev,
                               u32 __iomem *reg, u32 value)
 {
        if (qdev->current_page != 0)
-               ql_set_register_page(qdev,0);
+               ql_set_register_page(qdev, 0);
        writel(value, reg);
        readl(reg);
 }
@@ -255,7 +256,7 @@ static void ql_write_page1_reg(struct ql3_adapter *qdev,
                               u32 __iomem *reg, u32 value)
 {
        if (qdev->current_page != 1)
-               ql_set_register_page(qdev,1);
+               ql_set_register_page(qdev, 1);
        writel(value, reg);
        readl(reg);
 }
@@ -267,14 +268,15 @@ static void ql_write_page2_reg(struct ql3_adapter *qdev,
                               u32 __iomem *reg, u32 value)
 {
        if (qdev->current_page != 2)
-               ql_set_register_page(qdev,2);
+               ql_set_register_page(qdev, 2);
        writel(value, reg);
        readl(reg);
 }
 
 static void ql_disable_interrupts(struct ql3_adapter *qdev)
 {
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
 
        ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
                            (ISP_IMR_ENABLE_INT << 16));
@@ -283,7 +285,8 @@ static void ql_disable_interrupts(struct ql3_adapter *qdev)
 
 static void ql_enable_interrupts(struct ql3_adapter *qdev)
 {
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
 
        ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
                            ((0xff << 16) | ISP_IMR_ENABLE_INT));
@@ -308,8 +311,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
                lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
                                                   qdev->lrg_buffer_len);
                if (unlikely(!lrg_buf_cb->skb)) {
-                       printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
-                              qdev->ndev->name);
+                       netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
                        qdev->lrg_buf_skb_check++;
                } else {
                        /*
@@ -323,9 +325,10 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
                                             QL_HEADER_SPACE,
                                             PCI_DMA_FROMDEVICE);
                        err = pci_dma_mapping_error(qdev->pdev, map);
-                       if(err) {
-                               printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
-                                      qdev->ndev->name, err);
+                       if (err) {
+                               netdev_err(qdev->ndev,
+                                          "PCI mapping failed with error: %d\n",
+                                          err);
                                dev_kfree_skb(lrg_buf_cb->skb);
                                lrg_buf_cb->skb = NULL;
 
@@ -350,10 +353,11 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
                                                           *qdev)
 {
-       struct ql_rcv_buf_cb *lrg_buf_cb;
+       struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
 
-       if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
-               if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
+       if (lrg_buf_cb != NULL) {
+               qdev->lrg_buf_free_head = lrg_buf_cb->next;
+               if (qdev->lrg_buf_free_head == NULL)
                        qdev->lrg_buf_free_tail = NULL;
                qdev->lrg_buf_free_count--;
        }
@@ -374,13 +378,13 @@ static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
 static void fm93c56a_select(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
+       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 
        qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
-       ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
-                           ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
-       ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
-                           ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
+       ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+       ql_write_nvram_reg(qdev, spir,
+                          ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
 }
 
 /*
@@ -393,51 +397,40 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
        u32 dataBit;
        u32 previousBit;
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
+       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 
        /* Clock in a zero, then do the start bit */
-       ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
-                           ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
-                           AUBURN_EEPROM_DO_1);
-       ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
-                           ISP_NVRAM_MASK | qdev->
-                           eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
-                           AUBURN_EEPROM_CLK_RISE);
-       ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
-                           ISP_NVRAM_MASK | qdev->
-                           eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
-                           AUBURN_EEPROM_CLK_FALL);
+       ql_write_nvram_reg(qdev, spir,
+                          (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                           AUBURN_EEPROM_DO_1));
+       ql_write_nvram_reg(qdev, spir,
+                          (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                           AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
+       ql_write_nvram_reg(qdev, spir,
+                          (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                           AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
 
        mask = 1 << (FM93C56A_CMD_BITS - 1);
        /* Force the previous data bit to be different */
        previousBit = 0xffff;
        for (i = 0; i < FM93C56A_CMD_BITS; i++) {
-               dataBit =
-                   (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
+               dataBit = (cmd & mask)
+                       ? AUBURN_EEPROM_DO_1
+                       : AUBURN_EEPROM_DO_0;
                if (previousBit != dataBit) {
-                       /*
-                        * If the bit changed, then change the DO state to
-                        * match
-                        */
-                       ql_write_nvram_reg(qdev,
-                                           &port_regs->CommonRegs.
-                                           serialPortInterfaceReg,
-                                           ISP_NVRAM_MASK | qdev->
-                                           eeprom_cmd_data | dataBit);
+                       /* If the bit changed, change the DO state to match */
+                       ql_write_nvram_reg(qdev, spir,
+                                          (ISP_NVRAM_MASK |
+                                           qdev->eeprom_cmd_data | dataBit));
                        previousBit = dataBit;
                }
-               ql_write_nvram_reg(qdev,
-                                   &port_regs->CommonRegs.
-                                   serialPortInterfaceReg,
-                                   ISP_NVRAM_MASK | qdev->
-                                   eeprom_cmd_data | dataBit |
-                                   AUBURN_EEPROM_CLK_RISE);
-               ql_write_nvram_reg(qdev,
-                                   &port_regs->CommonRegs.
-                                   serialPortInterfaceReg,
-                                   ISP_NVRAM_MASK | qdev->
-                                   eeprom_cmd_data | dataBit |
-                                   AUBURN_EEPROM_CLK_FALL);
+               ql_write_nvram_reg(qdev, spir,
+                                  (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                                   dataBit | AUBURN_EEPROM_CLK_RISE));
+               ql_write_nvram_reg(qdev, spir,
+                                  (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                                   dataBit | AUBURN_EEPROM_CLK_FALL));
                cmd = cmd << 1;
        }
 
@@ -445,33 +438,24 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
        /* Force the previous data bit to be different */
        previousBit = 0xffff;
        for (i = 0; i < addrBits; i++) {
-               dataBit =
-                   (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
-                   AUBURN_EEPROM_DO_0;
+               dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
+                       : AUBURN_EEPROM_DO_0;
                if (previousBit != dataBit) {
                        /*
                         * If the bit changed, then change the DO state to
                         * match
                         */
-                       ql_write_nvram_reg(qdev,
-                                           &port_regs->CommonRegs.
-                                           serialPortInterfaceReg,
-                                           ISP_NVRAM_MASK | qdev->
-                                           eeprom_cmd_data | dataBit);
+                       ql_write_nvram_reg(qdev, spir,
+                                          (ISP_NVRAM_MASK |
+                                           qdev->eeprom_cmd_data | dataBit));
                        previousBit = dataBit;
                }
-               ql_write_nvram_reg(qdev,
-                                   &port_regs->CommonRegs.
-                                   serialPortInterfaceReg,
-                                   ISP_NVRAM_MASK | qdev->
-                                   eeprom_cmd_data | dataBit |
-                                   AUBURN_EEPROM_CLK_RISE);
-               ql_write_nvram_reg(qdev,
-                                   &port_regs->CommonRegs.
-                                   serialPortInterfaceReg,
-                                   ISP_NVRAM_MASK | qdev->
-                                   eeprom_cmd_data | dataBit |
-                                   AUBURN_EEPROM_CLK_FALL);
+               ql_write_nvram_reg(qdev, spir,
+                                  (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                                   dataBit | AUBURN_EEPROM_CLK_RISE));
+               ql_write_nvram_reg(qdev, spir,
+                                  (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                                   dataBit | AUBURN_EEPROM_CLK_FALL));
                eepromAddr = eepromAddr << 1;
        }
 }
@@ -482,10 +466,11 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
 static void fm93c56a_deselect(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
+       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
        qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
-       ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
-                           ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+       ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
 }
 
 /*
@@ -497,29 +482,23 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
        u32 data = 0;
        u32 dataBit;
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
+       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 
        /* Read the data bits */
        /* The first bit is a dummy.  Clock right over it. */
        for (i = 0; i < dataBits; i++) {
-               ql_write_nvram_reg(qdev,
-                                   &port_regs->CommonRegs.
-                                   serialPortInterfaceReg,
-                                   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
-                                   AUBURN_EEPROM_CLK_RISE);
-               ql_write_nvram_reg(qdev,
-                                   &port_regs->CommonRegs.
-                                   serialPortInterfaceReg,
-                                   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
-                                   AUBURN_EEPROM_CLK_FALL);
-               dataBit =
-                   (ql_read_common_reg
-                    (qdev,
-                     &port_regs->CommonRegs.
-                     serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
+               ql_write_nvram_reg(qdev, spir,
+                                  ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                                  AUBURN_EEPROM_CLK_RISE);
+               ql_write_nvram_reg(qdev, spir,
+                                  ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+                                  AUBURN_EEPROM_CLK_FALL);
+               dataBit = (ql_read_common_reg(qdev, spir) &
+                          AUBURN_EEPROM_DI_1) ? 1 : 0;
                data = (data << 1) | dataBit;
        }
-       *value = (u16) data;
+       *value = (u16)data;
 }
 
 /*
@@ -551,13 +530,12 @@ static int ql_get_nvram_params(struct ql3_adapter *qdev)
 
        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 
-       pEEPROMData = (u16 *) & qdev->nvram_data;
+       pEEPROMData = (u16 *)&qdev->nvram_data;
        qdev->eeprom_cmd_data = 0;
-       if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
+       if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
                        (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
                         2) << 10)) {
-               printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
-                       __func__);
+               pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
                return -1;
        }
@@ -570,8 +548,8 @@ static int ql_get_nvram_params(struct ql3_adapter *qdev)
        ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
 
        if (checksum != 0) {
-               printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
-                      qdev->ndev->name, checksum);
+               netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
+                          checksum);
                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
                return -1;
        }
@@ -587,7 +565,7 @@ static const u32 PHYAddr[2] = {
 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u32 temp;
        int count = 1000;
 
@@ -604,7 +582,7 @@ static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u32 scanControl;
 
        if (qdev->numPorts > 1) {
@@ -632,7 +610,7 @@ static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
 {
        u8 ret;
        struct ql3xxx_port_registers __iomem *port_regs =
-                                       qdev->mem_map_registers;
+                                       qdev->mem_map_registers;
 
        /* See if scan mode is enabled before we turn it off */
        if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
@@ -662,17 +640,13 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
                               u16 regAddr, u16 value, u32 phyAddr)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u8 scanWasEnabled;
 
        scanWasEnabled = ql_mii_disable_scan_mode(qdev);
 
        if (ql_wait_for_mii_ready(qdev)) {
-               if (netif_msg_link(qdev))
-                       printk(KERN_WARNING PFX
-                              "%s Timed out waiting for management port to "
-                              "get free before issuing command.\n",
-                              qdev->ndev->name);
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
                return -1;
        }
 
@@ -683,11 +657,7 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
 
        /* Wait for write to complete 9/10/04 SJP */
        if (ql_wait_for_mii_ready(qdev)) {
-               if (netif_msg_link(qdev))
-                       printk(KERN_WARNING PFX
-                              "%s: Timed out waiting for management port to "
-                              "get free before issuing command.\n",
-                              qdev->ndev->name);
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
                return -1;
        }
 
@@ -698,21 +668,17 @@ static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
 }
 
 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
-                             u16 * value, u32 phyAddr)
+                             u16 *value, u32 phyAddr)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u8 scanWasEnabled;
        u32 temp;
 
        scanWasEnabled = ql_mii_disable_scan_mode(qdev);
 
        if (ql_wait_for_mii_ready(qdev)) {
-               if (netif_msg_link(qdev))
-                       printk(KERN_WARNING PFX
-                              "%s: Timed out waiting for management port to "
-                              "get free before issuing command.\n",
-                              qdev->ndev->name);
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
                return -1;
        }
 
@@ -727,11 +693,7 @@ static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
 
        /* Wait for the read to complete */
        if (ql_wait_for_mii_ready(qdev)) {
-               if (netif_msg_link(qdev))
-                       printk(KERN_WARNING PFX
-                              "%s: Timed out waiting for management port to "
-                              "get free after issuing command.\n",
-                              qdev->ndev->name);
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
                return -1;
        }
 
@@ -747,16 +709,12 @@ static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
 
        ql_mii_disable_scan_mode(qdev);
 
        if (ql_wait_for_mii_ready(qdev)) {
-               if (netif_msg_link(qdev))
-                       printk(KERN_WARNING PFX
-                              "%s: Timed out waiting for management port to "
-                              "get free before issuing command.\n",
-                              qdev->ndev->name);
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
                return -1;
        }
 
@@ -767,11 +725,7 @@ static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
 
        /* Wait for write to complete. */
        if (ql_wait_for_mii_ready(qdev)) {
-               if (netif_msg_link(qdev))
-                       printk(KERN_WARNING PFX
-                              "%s: Timed out waiting for management port to "
-                              "get free before issuing command.\n",
-                              qdev->ndev->name);
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
                return -1;
        }
 
@@ -784,16 +738,12 @@ static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
 {
        u32 temp;
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
 
        ql_mii_disable_scan_mode(qdev);
 
        if (ql_wait_for_mii_ready(qdev)) {
-               if (netif_msg_link(qdev))
-                       printk(KERN_WARNING PFX
-                              "%s: Timed out waiting for management port to "
-                              "get free before issuing command.\n",
-                              qdev->ndev->name);
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
                return -1;
        }
 
@@ -808,11 +758,7 @@ static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
 
        /* Wait for the read to complete */
        if (ql_wait_for_mii_ready(qdev)) {
-               if (netif_msg_link(qdev))
-                       printk(KERN_WARNING PFX
-                              "%s: Timed out waiting for management port to "
-                              "get free before issuing command.\n",
-                              qdev->ndev->name);
+               netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
                return -1;
        }
 
@@ -898,7 +844,7 @@ static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
 
 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
 {
-       printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name);
+       netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
        /* power down device bit 11 = 1 */
        ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
        /* enable diagnostic mode bit 2 = 1 */
@@ -918,7 +864,8 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
        /* point to hidden reg 0x2806 */
        ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
        /* Write new PHYAD w/bit 5 set */
-       ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
+       ql_mii_write_reg_ex(qdev, 0x11,
+                           0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
        /*
         * Disable diagnostic mode bit 2 = 0
         * Power up device bit 11 = 0
@@ -929,21 +876,19 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
        ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
 }
 
-static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
-                                u16 phyIdReg0, u16 phyIdReg1)
+static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
+                                      u16 phyIdReg0, u16 phyIdReg1)
 {
-       PHY_DEVICE_et result = PHY_TYPE_UNKNOWN;
+       enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
        u32   oui;
        u16   model;
        int i;
 
-       if (phyIdReg0 == 0xffff) {
+       if (phyIdReg0 == 0xffff)
                return result;
-       }
 
-       if (phyIdReg1 == 0xffff) {
+       if (phyIdReg1 == 0xffff)
                return result;
-       }
 
        /* oui is split between two registers */
        oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
@@ -951,15 +896,13 @@ static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
        model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
 
        /* Scan table for this PHY */
-       for(i = 0; i < MAX_PHY_DEV_TYPES; i++) {
-               if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel))
-               {
+       for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
+               if ((oui == PHY_DEVICES[i].phyIdOUI) &&
+                   (model == PHY_DEVICES[i].phyIdModel)) {
+                       netdev_info(qdev->ndev, "Phy: %s\n",
+                                   PHY_DEVICES[i].name);
                        result = PHY_DEVICES[i].phyDevice;
-
-                       printk(KERN_INFO "%s: Phy: %s\n",
-                               qdev->ndev->name, PHY_DEVICES[i].name);
-
-                       break;
+                       break;
                }
        }
 
@@ -970,9 +913,8 @@ static int ql_phy_get_speed(struct ql3_adapter *qdev)
 {
        u16 reg;
 
-       switch(qdev->phyType) {
-       case PHY_AGERE_ET1011C:
-       {
+       switch (qdev->phyType) {
+       case PHY_AGERE_ET1011C: {
                if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
                        return 0;
 
@@ -980,20 +922,20 @@ static int ql_phy_get_speed(struct ql3_adapter *qdev)
                break;
        }
        default:
-       if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
-               return 0;
+               if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
+                       return 0;
 
-       reg = (((reg & 0x18) >> 3) & 3);
+               reg = (((reg & 0x18) >> 3) & 3);
        }
 
-       switch(reg) {
-               case 2:
+       switch (reg) {
+       case 2:
                return SPEED_1000;
-               case 1:
+       case 1:
                return SPEED_100;
-               case 0:
+       case 0:
                return SPEED_10;
-               default:
+       default:
                return -1;
        }
 }
@@ -1002,17 +944,15 @@ static int ql_is_full_dup(struct ql3_adapter *qdev)
 {
        u16 reg;
 
-       switch(qdev->phyType) {
-       case PHY_AGERE_ET1011C:
-       {
+       switch (qdev->phyType) {
+       case PHY_AGERE_ET1011C: {
                if (ql_mii_read_reg(qdev, 0x1A, &reg))
                        return 0;
 
                return ((reg & 0x0080) && (reg & 0x1000)) != 0;
        }
        case PHY_VITESSE_VSC8211:
-       default:
-       {
+       default: {
                if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
                        return 0;
                return (reg & PHY_AUX_DUPLEX_STAT) != 0;
@@ -1040,17 +980,15 @@ static int PHY_Setup(struct ql3_adapter *qdev)
 
        /*  Determine the PHY we are using by reading the ID's */
        err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
-       if(err != 0) {
-               printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
-                      qdev->ndev->name);
-                return err;
+       if (err != 0) {
+               netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
+               return err;
        }
 
        err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
-       if(err != 0) {
-               printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
-                      qdev->ndev->name);
-                return err;
+       if (err != 0) {
+               netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
+               return err;
        }
 
        /*  Check if we have a Agere PHY */
@@ -1058,24 +996,22 @@ static int PHY_Setup(struct ql3_adapter *qdev)
 
                /* Determine which MII address we should be using
                   determined by the index of the card */
-               if (qdev->mac_index == 0) {
+               if (qdev->mac_index == 0)
                        miiAddr = MII_AGERE_ADDR_1;
-               } else {
+               else
                        miiAddr = MII_AGERE_ADDR_2;
-               }
 
-               err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
-               if(err != 0) {
-                       printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
-                              qdev->ndev->name);
+               err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
+               if (err != 0) {
+                       netdev_err(qdev->ndev,
+                                  "Could not read from reg PHY_ID_0_REG after Agere detected\n");
                        return err;
                }
 
                err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
-               if(err != 0) {
-                       printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
-                              qdev->ndev->name);
-                       return err;
+               if (err != 0) {
+                       netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
+                       return err;
                }
 
                /*  We need to remember to initialize the Agere PHY */
@@ -1090,7 +1026,7 @@ static int PHY_Setup(struct ql3_adapter *qdev)
                /* need this here so address gets changed */
                phyAgereSpecificInit(qdev, miiAddr);
        } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
-               printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name);
+               netdev_err(qdev->ndev, "PHY is unknown\n");
                return -EIO;
        }
 
@@ -1103,7 +1039,7 @@ static int PHY_Setup(struct ql3_adapter *qdev)
 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u32 value;
 
        if (enable)
@@ -1123,7 +1059,7 @@ static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u32 value;
 
        if (enable)
@@ -1143,7 +1079,7 @@ static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u32 value;
 
        if (enable)
@@ -1163,7 +1099,7 @@ static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u32 value;
 
        if (enable)
@@ -1183,7 +1119,7 @@ static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u32 value;
 
        if (enable)
@@ -1205,7 +1141,7 @@ static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
 static int ql_is_fiber(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u32 bitToCheck = 0;
        u32 temp;
 
@@ -1235,7 +1171,7 @@ static int ql_is_auto_cfg(struct ql3_adapter *qdev)
 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u32 bitToCheck = 0;
        u32 temp;
 
@@ -1250,18 +1186,11 @@ static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
 
        temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
        if (temp & bitToCheck) {
-               if (netif_msg_link(qdev))
-                       printk(KERN_INFO PFX
-                              "%s: Auto-Negotiate complete.\n",
-                              qdev->ndev->name);
+               netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
                return 1;
-       } else {
-               if (netif_msg_link(qdev))
-                       printk(KERN_WARNING PFX
-                              "%s: Auto-Negotiate incomplete.\n",
-                              qdev->ndev->name);
-               return 0;
        }
+       netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
+       return 0;
 }
 
 /*
@@ -1278,7 +1207,7 @@ static int ql_is_neg_pause(struct ql3_adapter *qdev)
 static int ql_auto_neg_error(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u32 bitToCheck = 0;
        u32 temp;
 
@@ -1316,7 +1245,7 @@ static int ql_is_link_full_dup(struct ql3_adapter *qdev)
 static int ql_link_down_detect(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u32 bitToCheck = 0;
        u32 temp;
 
@@ -1340,7 +1269,7 @@ static int ql_link_down_detect(struct ql3_adapter *qdev)
 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
 
        switch (qdev->mac_index) {
        case 0:
@@ -1370,7 +1299,7 @@ static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u32 bitToCheck = 0;
        u32 temp;
 
@@ -1387,16 +1316,13 @@ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
 
        temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
        if (temp & bitToCheck) {
-               if (netif_msg_link(qdev))
-                       printk(KERN_DEBUG PFX
-                              "%s: is not link master.\n", qdev->ndev->name);
+               netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+                            "not link master\n");
                return 0;
-       } else {
-               if (netif_msg_link(qdev))
-                       printk(KERN_DEBUG PFX
-                              "%s: is link master.\n", qdev->ndev->name);
-               return 1;
        }
+
+       netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
+       return 1;
 }
 
 static void ql_phy_reset_ex(struct ql3_adapter *qdev)
@@ -1410,19 +1336,20 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
        u16 reg;
        u16 portConfiguration;
 
-       if(qdev->phyType == PHY_AGERE_ET1011C) {
-               /* turn off external loopback */
+       if (qdev->phyType == PHY_AGERE_ET1011C)
                ql_mii_write_reg(qdev, 0x13, 0x0000);
-       }
+                                       /* turn off external loopback */
 
-       if(qdev->mac_index == 0)
-               portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration;
+       if (qdev->mac_index == 0)
+               portConfiguration =
+                       qdev->nvram_data.macCfg_port0.portConfiguration;
        else
-               portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration;
+               portConfiguration =
+                       qdev->nvram_data.macCfg_port1.portConfiguration;
 
        /*  Some HBA's in the field are set to 0 and they need to
            be reinterpreted with a default value */
-       if(portConfiguration == 0)
+       if (portConfiguration == 0)
                portConfiguration = PORT_CONFIG_DEFAULT;
 
        /* Set the 1000 advertisements */
@@ -1430,8 +1357,8 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
                           PHYAddr[qdev->mac_index]);
        reg &= ~PHY_GIG_ALL_PARAMS;
 
-       if(portConfiguration & PORT_CONFIG_1000MB_SPEED) {
-               if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
+       if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
+               if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
                        reg |= PHY_GIG_ADV_1000F;
                else
                        reg |= PHY_GIG_ADV_1000H;
@@ -1445,29 +1372,27 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
                           PHYAddr[qdev->mac_index]);
        reg &= ~PHY_NEG_ALL_PARAMS;
 
-       if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
+       if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
                reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
 
-       if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
-               if(portConfiguration & PORT_CONFIG_100MB_SPEED)
+       if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
+               if (portConfiguration & PORT_CONFIG_100MB_SPEED)
                        reg |= PHY_NEG_ADV_100F;
 
-               if(portConfiguration & PORT_CONFIG_10MB_SPEED)
+               if (portConfiguration & PORT_CONFIG_10MB_SPEED)
                        reg |= PHY_NEG_ADV_10F;
        }
 
-       if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
-               if(portConfiguration & PORT_CONFIG_100MB_SPEED)
+       if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
+               if (portConfiguration & PORT_CONFIG_100MB_SPEED)
                        reg |= PHY_NEG_ADV_100H;
 
-               if(portConfiguration & PORT_CONFIG_10MB_SPEED)
+               if (portConfiguration & PORT_CONFIG_10MB_SPEED)
                        reg |= PHY_NEG_ADV_10H;
        }
 
-       if(portConfiguration &
-          PORT_CONFIG_1000MB_SPEED) {
+       if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
                reg |= 1;
-       }
 
        ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
                            PHYAddr[qdev->mac_index]);
@@ -1492,7 +1417,7 @@ static void ql_phy_init_ex(struct ql3_adapter *qdev)
 static u32 ql_get_link_state(struct ql3_adapter *qdev)
 {
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        u32 bitToCheck = 0;
        u32 temp, linkState;
 
@@ -1504,22 +1429,22 @@ static u32 ql_get_link_state(struct ql3_adapter *qdev)
                bitToCheck = PORT_STATUS_UP1;
                break;
        }
+
        temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
-       if (temp & bitToCheck) {
+       if (temp & bitToCheck)
                linkState = LS_UP;
-       } else {
+       else
                linkState = LS_DOWN;
-       }
+
        return linkState;
 }
 
 static int ql_port_start(struct ql3_adapter *qdev)
 {
-       if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+       if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
                         2) << 7)) {
-               printk(KERN_ERR "%s: Could not get hw lock for GIO\n",
-                      qdev->ndev->name);
+               netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
                return -1;
        }
 
@@ -1537,19 +1462,16 @@ static int ql_port_start(struct ql3_adapter *qdev)
 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
 {
 
-       if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+       if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
                         2) << 7))
                return -1;
 
        if (!ql_auto_neg_error(qdev)) {
-               if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
+               if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
                        /* configure the MAC */
-                       if (netif_msg_link(qdev))
-                               printk(KERN_DEBUG PFX
-                                      "%s: Configuring link.\n",
-                                      qdev->ndev->
-                                      name);
+                       netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+                                    "Configuring link\n");
                        ql_mac_cfg_soft_reset(qdev, 1);
                        ql_mac_cfg_gig(qdev,
                                       (ql_get_link_speed
@@ -1564,43 +1486,32 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev)
                        ql_mac_cfg_soft_reset(qdev, 0);
 
                        /* enable the MAC */
-                       if (netif_msg_link(qdev))
-                               printk(KERN_DEBUG PFX
-                                      "%s: Enabling mac.\n",
-                                      qdev->ndev->
-                                              name);
+                       netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+                                    "Enabling mac\n");
                        ql_mac_enable(qdev, 1);
                }
 
                qdev->port_link_state = LS_UP;
                netif_start_queue(qdev->ndev);
                netif_carrier_on(qdev->ndev);
-               if (netif_msg_link(qdev))
-                       printk(KERN_INFO PFX
-                              "%s: Link is up at %d Mbps, %s duplex.\n",
-                              qdev->ndev->name,
-                              ql_get_link_speed(qdev),
-                              ql_is_link_full_dup(qdev)
-                              ? "full" : "half");
+               netif_info(qdev, link, qdev->ndev,
+                          "Link is up at %d Mbps, %s duplex\n",
+                          ql_get_link_speed(qdev),
+                          ql_is_link_full_dup(qdev) ? "full" : "half");
 
        } else {        /* Remote error detected */
 
-               if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
-                       if (netif_msg_link(qdev))
-                               printk(KERN_DEBUG PFX
-                                      "%s: Remote error detected. "
-                                      "Calling ql_port_start().\n",
-                                      qdev->ndev->
-                                      name);
+               if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
+                       netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+                                    "Remote error detected. Calling ql_port_start()\n");
                        /*
                         * ql_port_start() is shared code and needs
                         * to lock the PHY on it's own.
                         */
                        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
-                       if(ql_port_start(qdev)) {/* Restart port */
+                       if (ql_port_start(qdev))        /* Restart port */
                                return -1;
-                       } else
-                               return 0;
+                       return 0;
                }
        }
        ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
@@ -1619,33 +1530,28 @@ static void ql_link_state_machine_work(struct work_struct *work)
 
        curr_link_state = ql_get_link_state(qdev);
 
-       if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
-               if (netif_msg_link(qdev))
-                       printk(KERN_INFO PFX
-                              "%s: Reset in progress, skip processing link "
-                              "state.\n", qdev->ndev->name);
+       if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
+               netif_info(qdev, link, qdev->ndev,
+                          "Reset in progress, skip processing link state\n");
 
                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 
                /* Restart timer on 2 second interval. */
-               mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);\
+               mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
 
                return;
        }
 
        switch (qdev->port_link_state) {
        default:
-               if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
+               if (test_bit(QL_LINK_MASTER, &qdev->flags))
                        ql_port_start(qdev);
-               }
                qdev->port_link_state = LS_DOWN;
                /* Fall Through */
 
        case LS_DOWN:
                if (curr_link_state == LS_UP) {
-                       if (netif_msg_link(qdev))
-                               printk(KERN_INFO PFX "%s: Link is up.\n",
-                                      qdev->ndev->name);
+                       netif_info(qdev, link, qdev->ndev, "Link is up\n");
                        if (ql_is_auto_neg_complete(qdev))
                                ql_finish_auto_neg(qdev);
 
@@ -1662,9 +1568,7 @@ static void ql_link_state_machine_work(struct work_struct *work)
                 * back up
                 */
                if (curr_link_state == LS_DOWN) {
-                       if (netif_msg_link(qdev))
-                               printk(KERN_INFO PFX "%s: Link is down.\n",
-                                      qdev->ndev->name);
+                       netif_info(qdev, link, qdev->ndev, "Link is down\n");
                        qdev->port_link_state = LS_DOWN;
                }
                if (ql_link_down_detect(qdev))
@@ -1683,9 +1587,9 @@ static void ql_link_state_machine_work(struct work_struct *work)
 static void ql_get_phy_owner(struct ql3_adapter *qdev)
 {
        if (ql_this_adapter_controls_port(qdev))
-               set_bit(QL_LINK_MASTER,&qdev->flags);
+               set_bit(QL_LINK_MASTER, &qdev->flags);
        else
-               clear_bit(QL_LINK_MASTER,&qdev->flags);
+               clear_bit(QL_LINK_MASTER, &qdev->flags);
 }
 
 /*
@@ -1695,7 +1599,7 @@ static void ql_init_scan_mode(struct ql3_adapter *qdev)
 {
        ql_mii_enable_scan_mode(qdev);
 
-       if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
+       if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
                if (ql_this_adapter_controls_port(qdev))
                        ql_petbi_init_ex(qdev);
        } else {
@@ -1705,18 +1609,18 @@ static void ql_init_scan_mode(struct ql3_adapter *qdev)
 }
 
 /*
- * MII_Setup needs to be called before taking the PHY out of reset so that the
- * management interface clock speed can be set properly.  It would be better if
- * we had a way to disable MDC until after the PHY is out of reset, but we
- * don't have that capability.
+ * MII_Setup needs to be called before taking the PHY out of reset
+ * so that the management interface clock speed can be set properly.
+ * It would be better if we had a way to disable MDC until after the
+ * PHY is out of reset, but we don't have that capability.
  */
 static int ql_mii_setup(struct ql3_adapter *qdev)
 {
        u32 reg;
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
 
-       if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+       if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
                        (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
                         2) << 7))
                return -1;
@@ -1735,24 +1639,24 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
        return 0;
 }
 
+#define SUPPORTED_OPTICAL_MODES        (SUPPORTED_1000baseT_Full |     \
+                                SUPPORTED_FIBRE |              \
+                                SUPPORTED_Autoneg)
+#define SUPPORTED_TP_MODES     (SUPPORTED_10baseT_Half |       \
+                                SUPPORTED_10baseT_Full |       \
+                                SUPPORTED_100baseT_Half |      \
+                                SUPPORTED_100baseT_Full |      \
+                                SUPPORTED_1000baseT_Half |     \
+                                SUPPORTED_1000baseT_Full |     \
+                                SUPPORTED_Autoneg |            \
+                                SUPPORTED_TP);                 \
+
 static u32 ql_supported_modes(struct ql3_adapter *qdev)
 {
-       u32 supported;
+       if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
+               return SUPPORTED_OPTICAL_MODES;
 
-       if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
-               supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
-                   | SUPPORTED_Autoneg;
-       } else {
-               supported = SUPPORTED_10baseT_Half
-                   | SUPPORTED_10baseT_Full
-                   | SUPPORTED_100baseT_Half
-                   | SUPPORTED_100baseT_Full
-                   | SUPPORTED_1000baseT_Half
-                   | SUPPORTED_1000baseT_Full
-                   | SUPPORTED_Autoneg | SUPPORTED_TP;
-       }
-
-       return supported;
+       return SUPPORTED_TP_MODES;
 }
 
 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
@@ -1760,9 +1664,9 @@ static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
        int status;
        unsigned long hw_flags;
        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
-       if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
-               (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-                        2) << 7)) {
+       if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+                           (QL_RESOURCE_BITS_BASE_CODE |
+                            (qdev->mac_index) * 2) << 7)) {
                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
                return 0;
        }
@@ -1777,9 +1681,9 @@ static u32 ql_get_speed(struct ql3_adapter *qdev)
        u32 status;
        unsigned long hw_flags;
        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
-       if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
-               (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-                        2) << 7)) {
+       if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+                           (QL_RESOURCE_BITS_BASE_CODE |
+                            (qdev->mac_index) * 2) << 7)) {
                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
                return 0;
        }
@@ -1794,9 +1698,9 @@ static int ql_get_full_dup(struct ql3_adapter *qdev)
        int status;
        unsigned long hw_flags;
        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
-       if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
-               (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-                        2) << 7)) {
+       if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+                           (QL_RESOURCE_BITS_BASE_CODE |
+                            (qdev->mac_index) * 2) << 7)) {
                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
                return 0;
        }
@@ -1806,7 +1710,6 @@ static int ql_get_full_dup(struct ql3_adapter *qdev)
        return status;
 }
 
-
 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
 {
        struct ql3_adapter *qdev = netdev_priv(ndev);
@@ -1814,7 +1717,7 @@ static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
        ecmd->transceiver = XCVR_INTERNAL;
        ecmd->supported = ql_supported_modes(qdev);
 
-       if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
+       if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
                ecmd->port = PORT_FIBRE;
        } else {
                ecmd->port = PORT_TP;
@@ -1855,10 +1758,11 @@ static void ql_get_pauseparam(struct net_device *ndev,
                              struct ethtool_pauseparam *pause)
 {
        struct ql3_adapter *qdev = netdev_priv(ndev);
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
 
        u32 reg;
-       if(qdev->mac_index == 0)
+       if (qdev->mac_index == 0)
                reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
        else
                reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
@@ -1885,12 +1789,12 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
 
        while (lrg_buf_cb) {
                if (!lrg_buf_cb->skb) {
-                       lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
-                                                          qdev->lrg_buffer_len);
+                       lrg_buf_cb->skb =
+                               netdev_alloc_skb(qdev->ndev,
+                                                qdev->lrg_buffer_len);
                        if (unlikely(!lrg_buf_cb->skb)) {
-                               printk(KERN_DEBUG PFX
-                                      "%s: Failed netdev_alloc_skb().\n",
-                                      qdev->ndev->name);
+                               netdev_printk(KERN_DEBUG, qdev->ndev,
+                                             "Failed netdev_alloc_skb()\n");
                                break;
                        } else {
                                /*
@@ -1905,9 +1809,10 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
                                                     PCI_DMA_FROMDEVICE);
 
                                err = pci_dma_mapping_error(qdev->pdev, map);
-                               if(err) {
-                                       printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
-                                              qdev->ndev->name, err);
+                               if (err) {
+                                       netdev_err(qdev->ndev,
+                                                  "PCI mapping failed with error: %d\n",
+                                                  err);
                                        dev_kfree_skb(lrg_buf_cb->skb);
                                        lrg_buf_cb->skb = NULL;
                                        break;
@@ -1915,9 +1820,9 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
 
 
                                lrg_buf_cb->buf_phy_addr_low =
-                                   cpu_to_le32(LS_64BITS(map));
+                                       cpu_to_le32(LS_64BITS(map));
                                lrg_buf_cb->buf_phy_addr_high =
-                                   cpu_to_le32(MS_64BITS(map));
+                                       cpu_to_le32(MS_64BITS(map));
                                dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
                                dma_unmap_len_set(lrg_buf_cb, maplen,
                                                  qdev->lrg_buffer_len -
@@ -1937,7 +1842,9 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
  */
 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
 {
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+
        if (qdev->small_buf_release_cnt >= 16) {
                while (qdev->small_buf_release_cnt >= 16) {
                        qdev->small_buf_q_producer_index++;
@@ -1961,7 +1868,8 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
        struct bufq_addr_element *lrg_buf_q_ele;
        int i;
        struct ql_rcv_buf_cb *lrg_buf_cb;
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
 
        if ((qdev->lrg_buf_free_count >= 8) &&
            (qdev->lrg_buf_release_cnt >= 16)) {
@@ -1989,7 +1897,8 @@ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
 
                        qdev->lrg_buf_q_producer_index++;
 
-                       if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
+                       if (qdev->lrg_buf_q_producer_index ==
+                           qdev->num_lbufq_entries)
                                qdev->lrg_buf_q_producer_index = 0;
 
                        if (qdev->lrg_buf_q_producer_index ==
@@ -2011,23 +1920,26 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
        int i;
        int retval = 0;
 
-       if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
-               printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
+       if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
+               netdev_warn(qdev->ndev,
+                           "Frame too short but it was padded and sent\n");
        }
 
        tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
 
        /*  Check the transmit response flags for any errors */
-       if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
-               printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
+       if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
+               netdev_err(qdev->ndev,
+                          "Frame too short to be legal, frame not sent\n");
 
                qdev->ndev->stats.tx_errors++;
                retval = -EIO;
                goto frame_not_sent;
        }
 
-       if(tx_cb->seg_count == 0) {
-               printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
+       if (tx_cb->seg_count == 0) {
+               netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
+                          mac_rsp->transaction_id);
 
                qdev->ndev->stats.tx_errors++;
                retval = -EIO;
@@ -2073,7 +1985,7 @@ static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
        qdev->lrg_buf_release_cnt++;
        if (++qdev->lrg_buf_index == qdev->num_large_buffers)
                qdev->lrg_buf_index = 0;
-       return(lrg_buf_cb);
+       return lrg_buf_cb;
 }
 
 /*
@@ -2177,12 +2089,11 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
                if (checksum &
                        (IB_IP_IOCB_RSP_3032_ICE |
                         IB_IP_IOCB_RSP_3032_CE)) {
-                       printk(KERN_ERR
-                              "%s: Bad checksum for this %s packet, checksum = %x.\n",
-                              __func__,
-                              ((checksum &
-                               IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
-                               "UDP"),checksum);
+                       netdev_err(ndev,
+                                  "%s: Bad checksum for this %s packet, checksum = %x\n",
+                                  __func__,
+                                  ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
+                                   "TCP" : "UDP"), checksum);
                } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
                                (checksum & IB_IP_IOCB_RSP_3032_UDP &&
                                !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
@@ -2215,8 +2126,8 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
                net_rsp = qdev->rsp_current;
                rmb();
                /*
-                * Fix 4032 chipe undocumented "feature" where bit-8 is set if the
-                * inbound completion is for a VLAN.
+                * Fix 4032 chip's undocumented "feature" where bit-8 is set
+                * if the inbound completion is for a VLAN.
                 */
                if (qdev->device_id == QL3032_DEVICE_ID)
                        net_rsp->opcode &= 0x7f;
@@ -2242,22 +2153,18 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
                                                 net_rsp);
                        (*rx_cleaned)++;
                        break;
-               default:
-                       {
-                               u32 *tmp = (u32 *) net_rsp;
-                               printk(KERN_ERR PFX
-                                      "%s: Hit default case, not "
-                                      "handled!\n"
-                                      "        dropping the packet, opcode = "
-                                      "%x.\n",
-                                      ndev->name, net_rsp->opcode);
-                               printk(KERN_ERR PFX
-                                      "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
-                                      (unsigned long int)tmp[0],
-                                      (unsigned long int)tmp[1],
-                                      (unsigned long int)tmp[2],
-                                      (unsigned long int)tmp[3]);
-                       }
+               default: {
+                       u32 *tmp = (u32 *)net_rsp;
+                       netdev_err(ndev,
+                                  "Hit default case, not handled!\n"
+                                  "    dropping the packet, opcode = %x\n"
+                                  "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
+                                  net_rsp->opcode,
+                                  (unsigned long int)tmp[0],
+                                  (unsigned long int)tmp[1],
+                                  (unsigned long int)tmp[2],
+                                  (unsigned long int)tmp[3]);
+               }
                }
 
                qdev->rsp_consumer_index++;
@@ -2280,7 +2187,8 @@ static int ql_poll(struct napi_struct *napi, int budget)
        struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
        int rx_cleaned = 0, tx_cleaned = 0;
        unsigned long hw_flags;
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
 
        ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
 
@@ -2303,15 +2211,14 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
 
        struct net_device *ndev = dev_id;
        struct ql3_adapter *qdev = netdev_priv(ndev);
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
        u32 value;
        int handled = 1;
        u32 var;
 
-       port_regs = qdev->mem_map_registers;
-
-       value =
-           ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
+       value = ql_read_common_reg_l(qdev,
+                                    &port_regs->CommonRegs.ispControlStatus);
 
        if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
                spin_lock(&qdev->adapter_lock);
@@ -2319,7 +2226,7 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
                netif_carrier_off(qdev->ndev);
                ql_disable_interrupts(qdev);
                qdev->port_link_state = LS_DOWN;
-               set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
+               set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
 
                if (value & ISP_CONTROL_FE) {
                        /*
@@ -2328,69 +2235,53 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
                        var =
                            ql_read_page0_reg_l(qdev,
                                              &port_regs->PortFatalErrStatus);
-                       printk(KERN_WARNING PFX
-                              "%s: Resetting chip. PortFatalErrStatus "
-                              "register = 0x%x\n", ndev->name, var);
-                       set_bit(QL_RESET_START,&qdev->flags) ;
+                       netdev_warn(ndev,
+                                   "Resetting chip. PortFatalErrStatus register = 0x%x\n",
+                                   var);
+                       set_bit(QL_RESET_START, &qdev->flags) ;
                } else {
                        /*
                         * Soft Reset Requested.
                         */
-                       set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
-                       printk(KERN_ERR PFX
-                              "%s: Another function issued a reset to the "
-                              "chip. ISR value = %x.\n", ndev->name, value);
+                       set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
+                       netdev_err(ndev,
+                                  "Another function issued a reset to the chip. ISR value = %x\n",
+                                  value);
                }
                queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
                spin_unlock(&qdev->adapter_lock);
        } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
                ql_disable_interrupts(qdev);
-               if (likely(napi_schedule_prep(&qdev->napi))) {
+               if (likely(napi_schedule_prep(&qdev->napi)))
                        __napi_schedule(&qdev->napi);
-               }
-       } else {
+       } else
                return IRQ_NONE;
-       }
 
        return IRQ_RETVAL(handled);
 }
 
 /*
- * Get the total number of segments needed for the
- * given number of fragments.  This is necessary because
- * outbound address lists (OAL) will be used when more than
- * two frags are given.  Each address list has 5 addr/len
- * pairs.  The 5th pair in each AOL is used to  point to
- * the next AOL if more frags are coming.
- * That is why the frags:segment count  ratio is not linear.
+ * Get the total number of segments needed for the given number of fragments.
+ * This is necessary because outbound address lists (OAL) will be used when
+ * more than two frags are given.  Each address list has 5 addr/len pairs.
+ * The 5th pair in each OAL is used to  point to the next OAL if more frags
+ * are coming.  That is why the frags:segment count ratio is not linear.
  */
-static int ql_get_seg_count(struct ql3_adapter *qdev,
-                           unsigned short frags)
+static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
 {
        if (qdev->device_id == QL3022_DEVICE_ID)
                return 1;
 
-       switch(frags) {
-       case 0: return 1;       /* just the skb->data seg */
-       case 1: return 2;       /* skb->data + 1 frag */
-       case 2: return 3;       /* skb->data + 2 frags */
-       case 3: return 5;       /* skb->data + 1 frag + 1 AOL containting 2 frags */
-       case 4: return 6;
-       case 5: return 7;
-       case 6: return 8;
-       case 7: return 10;
-       case 8: return 11;
-       case 9: return 12;
-       case 10: return 13;
-       case 11: return 15;
-       case 12: return 16;
-       case 13: return 17;
-       case 14: return 18;
-       case 15: return 20;
-       case 16: return 21;
-       case 17: return 22;
-       case 18: return 23;
-       }
+       if (frags <= 2)
+               return frags + 1;
+       else if (frags <= 6)
+               return frags + 2;
+       else if (frags <= 10)
+               return frags + 3;
+       else if (frags <= 14)
+               return frags + 4;
+       else if (frags <= 18)
+               return frags + 5;
        return -1;
 }
 
@@ -2413,8 +2304,8 @@ static void ql_hw_csum_setup(const struct sk_buff *skb,
 }
 
 /*
- * Map the buffers for this transmit.  This will return
- * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
+ * Map the buffers for this transmit.
+ * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
  */
 static int ql_send_map(struct ql3_adapter *qdev,
                                struct ob_mac_iocb_req *mac_iocb_ptr,
@@ -2437,9 +2328,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
        map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
 
        err = pci_dma_mapping_error(qdev->pdev, map);
-       if(err) {
-               printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
-                      qdev->ndev->name, err);
+       if (err) {
+               netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
+                          err);
 
                return NETDEV_TX_BUSY;
        }
@@ -2455,65 +2346,67 @@ static int ql_send_map(struct ql3_adapter *qdev,
        if (seg_cnt == 1) {
                /* Terminate the last segment. */
                oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
-       } else {
-               oal = tx_cb->oal;
-               for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
-                       skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
-                       oal_entry++;
-                       if ((seg == 2 && seg_cnt > 3) ||        /* Check for continuation */
-                           (seg == 7 && seg_cnt > 8) ||        /* requirements. It's strange */
-                           (seg == 12 && seg_cnt > 13) ||      /* but necessary. */
-                           (seg == 17 && seg_cnt > 18)) {
-                               /* Continuation entry points to outbound address list. */
-                               map = pci_map_single(qdev->pdev, oal,
-                                                    sizeof(struct oal),
-                                                    PCI_DMA_TODEVICE);
-
-                               err = pci_dma_mapping_error(qdev->pdev, map);
-                               if(err) {
-
-                                       printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
-                                              qdev->ndev->name, err);
-                                       goto map_error;
-                               }
-
-                               oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
-                               oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
-                               oal_entry->len =
-                                   cpu_to_le32(sizeof(struct oal) |
-                                               OAL_CONT_ENTRY);
-                               dma_unmap_addr_set(&tx_cb->map[seg], mapaddr,
-                                                  map);
-                               dma_unmap_len_set(&tx_cb->map[seg], maplen,
-                                                 sizeof(struct oal));
-                               oal_entry = (struct oal_entry *)oal;
-                               oal++;
-                               seg++;
-                       }
-
-                       map =
-                           pci_map_page(qdev->pdev, frag->page,
-                                        frag->page_offset, frag->size,
-                                        PCI_DMA_TODEVICE);
+               return NETDEV_TX_OK;
+       }
+       oal = tx_cb->oal;
+       for (completed_segs = 0;
+            completed_segs < frag_cnt;
+            completed_segs++, seg++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
+               oal_entry++;
+               /*
+                * Check for continuation requirements.
+                * It's strange but necessary.
+                * Continuation entry points to outbound address list.
+                */
+               if ((seg == 2 && seg_cnt > 3) ||
+                   (seg == 7 && seg_cnt > 8) ||
+                   (seg == 12 && seg_cnt > 13) ||
+                   (seg == 17 && seg_cnt > 18)) {
+                       map = pci_map_single(qdev->pdev, oal,
+                                            sizeof(struct oal),
+                                            PCI_DMA_TODEVICE);
 
                        err = pci_dma_mapping_error(qdev->pdev, map);
-                       if(err) {
-                               printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
-                                      qdev->ndev->name, err);
+                       if (err) {
+                               netdev_err(qdev->ndev,
+                                          "PCI mapping outbound address list with error: %d\n",
+                                          err);
                                goto map_error;
                        }
 
                        oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
                        oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
-                       oal_entry->len = cpu_to_le32(frag->size);
+                       oal_entry->len = cpu_to_le32(sizeof(struct oal) |
+                                                    OAL_CONT_ENTRY);
                        dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
                        dma_unmap_len_set(&tx_cb->map[seg], maplen,
-                                         frag->size);
+                                         sizeof(struct oal));
+                       oal_entry = (struct oal_entry *)oal;
+                       oal++;
+                       seg++;
+               }
+
+               map = pci_map_page(qdev->pdev, frag->page,
+                                  frag->page_offset, frag->size,
+                                  PCI_DMA_TODEVICE);
+
+               err = pci_dma_mapping_error(qdev->pdev, map);
+               if (err) {
+                       netdev_err(qdev->ndev,
+                                  "PCI mapping frags failed with error: %d\n",
+                                  err);
+                       goto map_error;
                }
-               /* Terminate the last segment. */
-               oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
-       }
 
+               oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+               oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+               oal_entry->len = cpu_to_le32(frag->size);
+               dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+               dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size);
+               }
+       /* Terminate the last segment. */
+       oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
        return NETDEV_TX_OK;
 
 map_error:
@@ -2525,13 +2418,18 @@ map_error:
        seg = 1;
        oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
        oal = tx_cb->oal;
-       for (i=0; i<completed_segs; i++,seg++) {
+       for (i = 0; i < completed_segs; i++, seg++) {
                oal_entry++;
 
-               if((seg == 2 && seg_cnt > 3) ||        /* Check for continuation */
-                  (seg == 7 && seg_cnt > 8) ||        /* requirements. It's strange */
-                  (seg == 12 && seg_cnt > 13) ||      /* but necessary. */
-                  (seg == 17 && seg_cnt > 18)) {
+               /*
+                * Check for continuation requirements.
+                * It's strange but necessary.
+                */
+
+               if ((seg == 2 && seg_cnt > 3) ||
+                   (seg == 7 && seg_cnt > 8) ||
+                   (seg == 12 && seg_cnt > 13) ||
+                   (seg == 17 && seg_cnt > 18)) {
                        pci_unmap_single(qdev->pdev,
                                dma_unmap_addr(&tx_cb->map[seg], mapaddr),
                                dma_unmap_len(&tx_cb->map[seg], maplen),
@@ -2570,19 +2468,20 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
                               struct net_device *ndev)
 {
        struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+                       qdev->mem_map_registers;
        struct ql_tx_buf_cb *tx_cb;
        u32 tot_len = skb->len;
        struct ob_mac_iocb_req *mac_iocb_ptr;
 
-       if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
+       if (unlikely(atomic_read(&qdev->tx_count) < 2))
                return NETDEV_TX_BUSY;
-       }
 
-       tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
-       if((tx_cb->seg_count = ql_get_seg_count(qdev,
-                                               (skb_shinfo(skb)->nr_frags))) == -1) {
-               printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
+       tx_cb = &qdev->tx_buf[qdev->req_producer_index];
+       tx_cb->seg_count = ql_get_seg_count(qdev,
+                                            skb_shinfo(skb)->nr_frags);
+       if (tx_cb->seg_count == -1) {
+               netdev_err(ndev, "%s: invalid segment count!\n", __func__);
                return NETDEV_TX_OK;
        }
 
@@ -2598,8 +2497,8 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
            skb->ip_summed == CHECKSUM_PARTIAL)
                ql_hw_csum_setup(skb, mac_iocb_ptr);
 
-       if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
-               printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
+       if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
+               netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
                return NETDEV_TX_BUSY;
        }
 
@@ -2612,9 +2511,9 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
                            &port_regs->CommonRegs.reqQProducerIndex,
                            qdev->req_producer_index);
 
-       if (netif_msg_tx_queued(qdev))
-               printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
-                      ndev->name, qdev->req_producer_index, skb->len);
+       netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
+                    "tx queued, slot %d, len %d\n",
+                    qdev->req_producer_index, skb->len);
 
        atomic_dec(&qdev->tx_count);
        return NETDEV_TX_OK;
@@ -2632,8 +2531,7 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
 
        if ((qdev->req_q_virt_addr == NULL) ||
            LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
-               printk(KERN_ERR PFX "%s: reqQ failed.\n",
-                      qdev->ndev->name);
+               netdev_err(qdev->ndev, "reqQ failed\n");
                return -ENOMEM;
        }
 
@@ -2646,25 +2544,22 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
 
        if ((qdev->rsp_q_virt_addr == NULL) ||
            LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
-               printk(KERN_ERR PFX
-                      "%s: rspQ allocation failed\n",
-                      qdev->ndev->name);
+               netdev_err(qdev->ndev, "rspQ allocation failed\n");
                pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
                                    qdev->req_q_virt_addr,
                                    qdev->req_q_phy_addr);
                return -ENOMEM;
        }
 
-       set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
+       set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
 
        return 0;
 }
 
 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
 {
-       if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
-               printk(KERN_INFO PFX
-                      "%s: Already done.\n", qdev->ndev->name);
+       if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
+               netdev_info(qdev->ndev, "Already done\n");
                return;
        }
 
@@ -2680,34 +2575,34 @@ static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
 
        qdev->rsp_q_virt_addr = NULL;
 
-       clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
+       clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
 }
 
 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
 {
        /* Create Large Buffer Queue */
        qdev->lrg_buf_q_size =
-           qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
+               qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
        if (qdev->lrg_buf_q_size < PAGE_SIZE)
                qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
        else
                qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
 
-       qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
+       qdev->lrg_buf =
+               kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),
+                       GFP_KERNEL);
        if (qdev->lrg_buf == NULL) {
-               printk(KERN_ERR PFX
-                      "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
+               netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n");
                return -ENOMEM;
        }
 
        qdev->lrg_buf_q_alloc_virt_addr =
-           pci_alloc_consistent(qdev->pdev,
-                                qdev->lrg_buf_q_alloc_size,
-                                &qdev->lrg_buf_q_alloc_phy_addr);
+               pci_alloc_consistent(qdev->pdev,
+                                    qdev->lrg_buf_q_alloc_size,
+                                    &qdev->lrg_buf_q_alloc_phy_addr);
 
        if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
-               printk(KERN_ERR PFX
-                      "%s: lBufQ failed\n", qdev->ndev->name);
+               netdev_err(qdev->ndev, "lBufQ failed\n");
                return -ENOMEM;
        }
        qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
@@ -2715,21 +2610,19 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
 
        /* Create Small Buffer Queue */
        qdev->small_buf_q_size =
-           NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
+               NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
        if (qdev->small_buf_q_size < PAGE_SIZE)
                qdev->small_buf_q_alloc_size = PAGE_SIZE;
        else
                qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
 
        qdev->small_buf_q_alloc_virt_addr =
-           pci_alloc_consistent(qdev->pdev,
-                                qdev->small_buf_q_alloc_size,
-                                &qdev->small_buf_q_alloc_phy_addr);
+               pci_alloc_consistent(qdev->pdev,
+                                    qdev->small_buf_q_alloc_size,
+                                    &qdev->small_buf_q_alloc_phy_addr);
 
        if (qdev->small_buf_q_alloc_virt_addr == NULL) {
-               printk(KERN_ERR PFX
-                      "%s: Small Buffer Queue allocation failed.\n",
-                      qdev->ndev->name);
+               netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
                pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
                                    qdev->lrg_buf_q_alloc_virt_addr,
                                    qdev->lrg_buf_q_alloc_phy_addr);
@@ -2738,18 +2631,17 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
 
        qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
        qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
-       set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
+       set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
        return 0;
 }
 
 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
 {
-       if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
-               printk(KERN_INFO PFX
-                      "%s: Already done.\n", qdev->ndev->name);
+       if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
+               netdev_info(qdev->ndev, "Already done\n");
                return;
        }
-       if(qdev->lrg_buf) kfree(qdev->lrg_buf);
+       kfree(qdev->lrg_buf);
        pci_free_consistent(qdev->pdev,
                            qdev->lrg_buf_q_alloc_size,
                            qdev->lrg_buf_q_alloc_virt_addr,
@@ -2764,7 +2656,7 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev)
 
        qdev->small_buf_q_virt_addr = NULL;
 
-       clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
+       clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
 }
 
 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
@@ -2774,18 +2666,16 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
 
        /* Currently we allocate on one of memory and use it for smallbuffers */
        qdev->small_buf_total_size =
-           (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
-            QL_SMALL_BUFFER_SIZE);
+               (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
+                QL_SMALL_BUFFER_SIZE);
 
        qdev->small_buf_virt_addr =
-           pci_alloc_consistent(qdev->pdev,
-                                qdev->small_buf_total_size,
-                                &qdev->small_buf_phy_addr);
+               pci_alloc_consistent(qdev->pdev,
+                                    qdev->small_buf_total_size,
+                                    &qdev->small_buf_phy_addr);
 
        if (qdev->small_buf_virt_addr == NULL) {
-               printk(KERN_ERR PFX
-                      "%s: Failed to get small buffer memory.\n",
-                      qdev->ndev->name);
+               netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
                return -ENOMEM;
        }
 
@@ -2804,15 +2694,14 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
                small_buf_q_entry++;
        }
        qdev->small_buf_index = 0;
-       set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
+       set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
        return 0;
 }
 
 static void ql_free_small_buffers(struct ql3_adapter *qdev)
 {
-       if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
-               printk(KERN_INFO PFX
-                      "%s: Already done.\n", qdev->ndev->name);
+       if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
+               netdev_info(qdev->ndev, "Already done\n");
                return;
        }
        if (qdev->small_buf_virt_addr != NULL) {
@@ -2874,11 +2763,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
                                       qdev->lrg_buffer_len);
                if (unlikely(!skb)) {
                        /* Better luck next round */
-                       printk(KERN_ERR PFX
-                              "%s: large buff alloc failed, "
-                              "for %d bytes at index %d.\n",
-                              qdev->ndev->name,
-                              qdev->lrg_buffer_len * 2, i);
+                       netdev_err(qdev->ndev,
+                                  "large buff alloc failed for %d bytes at index %d\n",
+                                  qdev->lrg_buffer_len * 2, i);
                        ql_free_large_buffers(qdev);
                        return -ENOMEM;
                } else {
@@ -2899,9 +2786,10 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
                                             PCI_DMA_FROMDEVICE);
 
                        err = pci_dma_mapping_error(qdev->pdev, map);
-                       if(err) {
-                               printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
-                                      qdev->ndev->name, err);
+                       if (err) {
+                               netdev_err(qdev->ndev,
+                                          "PCI mapping failed with error: %d\n",
+                                          err);
                                ql_free_large_buffers(qdev);
                                return -ENOMEM;
                        }
@@ -2926,10 +2814,8 @@ static void ql_free_send_free_list(struct ql3_adapter *qdev)
 
        tx_cb = &qdev->tx_buf[0];
        for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
-               if (tx_cb->oal) {
-                       kfree(tx_cb->oal);
-                       tx_cb->oal = NULL;
-               }
+               kfree(tx_cb->oal);
+               tx_cb->oal = NULL;
                tx_cb++;
        }
 }
@@ -2938,8 +2824,7 @@ static int ql_create_send_free_list(struct ql3_adapter *qdev)
 {
        struct ql_tx_buf_cb *tx_cb;
        int i;
-       struct ob_mac_iocb_req *req_q_curr =
-                                       qdev->req_q_virt_addr;
+       struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
 
        /* Create free list of transmit buffers */
        for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
@@ -2960,23 +2845,22 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
        if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
                qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
                qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
-       }
-       else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
+       } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
                /*
                 * Bigger buffers, so less of them.
                 */
                qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
                qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
        } else {
-               printk(KERN_ERR PFX
-                      "%s: Invalid mtu size.  Only 1500 and 9000 are accepted.\n",
-                      qdev->ndev->name);
+               netdev_err(qdev->ndev, "Invalid mtu size: %d.  Only %d and %d are accepted.\n",
+                          qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
                return -ENOMEM;
        }
-       qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
+       qdev->num_large_buffers =
+               qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
        qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
        qdev->max_frame_size =
-           (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
+               (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
 
        /*
         * First allocate a page of shared memory and use it for shadow
@@ -2984,51 +2868,44 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
         * Network Completion Queue Producer Index Register
         */
        qdev->shadow_reg_virt_addr =
-           pci_alloc_consistent(qdev->pdev,
-                                PAGE_SIZE, &qdev->shadow_reg_phy_addr);
+               pci_alloc_consistent(qdev->pdev,
+                                    PAGE_SIZE, &qdev->shadow_reg_phy_addr);
 
        if (qdev->shadow_reg_virt_addr != NULL) {
                qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
                qdev->req_consumer_index_phy_addr_high =
-                   MS_64BITS(qdev->shadow_reg_phy_addr);
+                       MS_64BITS(qdev->shadow_reg_phy_addr);
                qdev->req_consumer_index_phy_addr_low =
-                   LS_64BITS(qdev->shadow_reg_phy_addr);
+                       LS_64BITS(qdev->shadow_reg_phy_addr);
 
                qdev->prsp_producer_index =
-                   (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
+                       (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
                qdev->rsp_producer_index_phy_addr_high =
-                   qdev->req_consumer_index_phy_addr_high;
+                       qdev->req_consumer_index_phy_addr_high;
                qdev->rsp_producer_index_phy_addr_low =
-                   qdev->req_consumer_index_phy_addr_low + 8;
+                       qdev->req_consumer_index_phy_addr_low + 8;
        } else {
-               printk(KERN_ERR PFX
-                      "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
+               netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
                return -ENOMEM;
        }
 
        if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
-               printk(KERN_ERR PFX
-                      "%s: ql_alloc_net_req_rsp_queues failed.\n",
-                      qdev->ndev->name);
+               netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
                goto err_req_rsp;
        }
 
        if (ql_alloc_buffer_queues(qdev) != 0) {
-               printk(KERN_ERR PFX
-                      "%s: ql_alloc_buffer_queues failed.\n",
-                      qdev->ndev->name);
+               netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
                goto err_buffer_queues;
        }
 
        if (ql_alloc_small_buffers(qdev) != 0) {
-               printk(KERN_ERR PFX
-                      "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
+               netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
                goto err_small_buffers;
        }
 
        if (ql_alloc_large_buffers(qdev) != 0) {
-               printk(KERN_ERR PFX
-                      "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
+               netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
                goto err_small_buffers;
        }
 
@@ -3076,7 +2953,7 @@ static int ql_init_misc_registers(struct ql3_adapter *qdev)
        struct ql3xxx_local_ram_registers __iomem *local_ram =
            (void __iomem *)qdev->mem_map_registers;
 
-       if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
+       if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
                        (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
                         2) << 4))
                return -1;
@@ -3132,18 +3009,20 @@ static int ql_init_misc_registers(struct ql3_adapter *qdev)
 static int ql_adapter_initialize(struct ql3_adapter *qdev)
 {
        u32 value;
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
+       u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
        struct ql3xxx_host_memory_registers __iomem *hmem_regs =
-                                               (void __iomem *)port_regs;
+               (void __iomem *)port_regs;
        u32 delay = 10;
        int status = 0;
        unsigned long hw_flags = 0;
 
-       if(ql_mii_setup(qdev))
+       if (ql_mii_setup(qdev))
                return -1;
 
        /* Bring out PHY out of reset */
-       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+       ql_write_common_reg(qdev, spir,
                            (ISP_SERIAL_PORT_IF_WE |
                             (ISP_SERIAL_PORT_IF_WE << 16)));
        /* Give the PHY time to come out of reset. */
@@ -3152,13 +3031,13 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
        netif_carrier_off(qdev->ndev);
 
        /* V2 chip fix for ARS-39168. */
-       ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
+       ql_write_common_reg(qdev, spir,
                            (ISP_SERIAL_PORT_IF_SDE |
                             (ISP_SERIAL_PORT_IF_SDE << 16)));
 
        /* Request Queue Registers */
-       *((u32 *) (qdev->preq_consumer_index)) = 0;
-       atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
+       *((u32 *)(qdev->preq_consumer_index)) = 0;
+       atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
        qdev->req_producer_index = 0;
 
        ql_write_page1_reg(qdev,
@@ -3208,7 +3087,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
                           &hmem_regs->rxLargeQBaseAddrLow,
                           LS_64BITS(qdev->lrg_buf_q_phy_addr));
 
-       ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
+       ql_write_page1_reg(qdev,
+                          &hmem_regs->rxLargeQLength,
+                          qdev->num_lbufq_entries);
 
        ql_write_page1_reg(qdev,
                           &hmem_regs->rxLargeBufferLength,
@@ -3258,7 +3139,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
        if ((value & PORT_STATUS_IC) == 0) {
 
                /* Chip has not been configured yet, so let it rip. */
-               if(ql_init_misc_registers(qdev)) {
+               if (ql_init_misc_registers(qdev)) {
                        status = -1;
                        goto out;
                }
@@ -3268,7 +3149,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
 
                value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
 
-               if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
+               if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
                                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
                                 * 2) << 13)) {
                        status = -1;
@@ -3291,7 +3172,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
                                           &port_regs->mac0MaxFrameLengthReg,
                                           qdev->max_frame_size);
 
-       if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+       if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
                        (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
                         2) << 7)) {
                status = -1;
@@ -3353,8 +3234,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
        } while (--delay);
 
        if (delay == 0) {
-               printk(KERN_ERR PFX
-                      "%s: Hw Initialization timeout.\n", qdev->ndev->name);
+               netdev_err(qdev->ndev, "Hw Initialization timeout\n");
                status = -1;
                goto out;
        }
@@ -3385,7 +3265,8 @@ out:
  */
 static int ql_adapter_reset(struct ql3_adapter *qdev)
 {
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
        int status = 0;
        u16 value;
        int max_wait_time;
@@ -3396,17 +3277,14 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
        /*
         * Issue soft reset to chip.
         */
-       printk(KERN_DEBUG PFX
-              "%s: Issue soft reset to chip.\n",
-              qdev->ndev->name);
+       netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
        ql_write_common_reg(qdev,
                            &port_regs->CommonRegs.ispControlStatus,
                            ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
 
        /* Wait 3 seconds for reset to complete. */
-       printk(KERN_DEBUG PFX
-              "%s: Wait 10 milliseconds for reset to complete.\n",
-              qdev->ndev->name);
+       netdev_printk(KERN_DEBUG, qdev->ndev,
+                     "Wait 10 milliseconds for reset to complete\n");
 
        /* Wait until the firmware tells us the Soft Reset is done */
        max_wait_time = 5;
@@ -3427,8 +3305,8 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
        value =
            ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
        if (value & ISP_CONTROL_RI) {
-               printk(KERN_DEBUG PFX
-                      "ql_adapter_reset: clearing RI after reset.\n");
+               netdev_printk(KERN_DEBUG, qdev->ndev,
+                             "clearing RI after reset\n");
                ql_write_common_reg(qdev,
                                    &port_regs->CommonRegs.
                                    ispControlStatus,
@@ -3448,13 +3326,11 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
                 */
                max_wait_time = 5;
                do {
-                       value =
-                           ql_read_common_reg(qdev,
-                                              &port_regs->CommonRegs.
-                                              ispControlStatus);
-                       if ((value & ISP_CONTROL_FSR) == 0) {
+                       value = ql_read_common_reg(qdev,
+                                                  &port_regs->CommonRegs.
+                                                  ispControlStatus);
+                       if ((value & ISP_CONTROL_FSR) == 0)
                                break;
-                       }
                        ssleep(1);
                } while ((--max_wait_time));
        }
@@ -3468,7 +3344,8 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
 
 static void ql_set_mac_info(struct ql3_adapter *qdev)
 {
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
        u32 value, port_status;
        u8 func_number;
 
@@ -3484,9 +3361,9 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
                qdev->mb_bit_mask = FN0_MA_BITS_MASK;
                qdev->PHYAddr = PORT0_PHY_ADDRESS;
                if (port_status & PORT_STATUS_SM0)
-                       set_bit(QL_LINK_OPTICAL,&qdev->flags);
+                       set_bit(QL_LINK_OPTICAL, &qdev->flags);
                else
-                       clear_bit(QL_LINK_OPTICAL,&qdev->flags);
+                       clear_bit(QL_LINK_OPTICAL, &qdev->flags);
                break;
 
        case ISP_CONTROL_FN1_NET:
@@ -3495,17 +3372,17 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
                qdev->mb_bit_mask = FN1_MA_BITS_MASK;
                qdev->PHYAddr = PORT1_PHY_ADDRESS;
                if (port_status & PORT_STATUS_SM1)
-                       set_bit(QL_LINK_OPTICAL,&qdev->flags);
+                       set_bit(QL_LINK_OPTICAL, &qdev->flags);
                else
-                       clear_bit(QL_LINK_OPTICAL,&qdev->flags);
+                       clear_bit(QL_LINK_OPTICAL, &qdev->flags);
                break;
 
        case ISP_CONTROL_FN0_SCSI:
        case ISP_CONTROL_FN1_SCSI:
        default:
-               printk(KERN_DEBUG PFX
-                      "%s: Invalid function number, ispControlStatus = 0x%x\n",
-                      qdev->ndev->name,value);
+               netdev_printk(KERN_DEBUG, qdev->ndev,
+                             "Invalid function number, ispControlStatus = 0x%x\n",
+                             value);
                break;
        }
        qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
@@ -3516,32 +3393,26 @@ static void ql_display_dev_info(struct net_device *ndev)
        struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
        struct pci_dev *pdev = qdev->pdev;
 
-       printk(KERN_INFO PFX
-              "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
-              DRV_NAME, qdev->index, qdev->chip_rev_id,
-              (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
-              qdev->pci_slot);
-       printk(KERN_INFO PFX
-              "%s Interface.\n",
-              test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
+       netdev_info(ndev,
+                   "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
+                   DRV_NAME, qdev->index, qdev->chip_rev_id,
+                   qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
+                   qdev->pci_slot);
+       netdev_info(ndev, "%s Interface\n",
+               test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
 
        /*
         * Print PCI bus width/type.
         */
-       printk(KERN_INFO PFX
-              "Bus interface is %s %s.\n",
-              ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
-              ((qdev->pci_x) ? "PCI-X" : "PCI"));
+       netdev_info(ndev, "Bus interface is %s %s\n",
+                   ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
+                   ((qdev->pci_x) ? "PCI-X" : "PCI"));
 
-       printk(KERN_INFO PFX
-              "mem  IO base address adjusted = 0x%p\n",
-              qdev->mem_map_registers);
-       printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
+       netdev_info(ndev, "mem  IO base address adjusted = 0x%p\n",
+                   qdev->mem_map_registers);
+       netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
 
-       if (netif_msg_probe(qdev))
-               printk(KERN_INFO PFX
-                      "%s: MAC address %pM\n",
-                      ndev->name, ndev->dev_addr);
+       netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
 }
 
 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
@@ -3552,17 +3423,16 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
        netif_stop_queue(ndev);
        netif_carrier_off(ndev);
 
-       clear_bit(QL_ADAPTER_UP,&qdev->flags);
-       clear_bit(QL_LINK_MASTER,&qdev->flags);
+       clear_bit(QL_ADAPTER_UP, &qdev->flags);
+       clear_bit(QL_LINK_MASTER, &qdev->flags);
 
        ql_disable_interrupts(qdev);
 
        free_irq(qdev->pdev->irq, ndev);
 
-       if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
-               printk(KERN_INFO PFX
-                      "%s: calling pci_disable_msi().\n", qdev->ndev->name);
-               clear_bit(QL_MSI_ENABLED,&qdev->flags);
+       if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+               netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
+               clear_bit(QL_MSI_ENABLED, &qdev->flags);
                pci_disable_msi(qdev->pdev);
        }
 
@@ -3576,17 +3446,16 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
 
                spin_lock_irqsave(&qdev->hw_lock, hw_flags);
                if (ql_wait_for_drvr_lock(qdev)) {
-                       if ((soft_reset = ql_adapter_reset(qdev))) {
-                               printk(KERN_ERR PFX
-                                      "%s: ql_adapter_reset(%d) FAILED!\n",
-                                      ndev->name, qdev->index);
+                       soft_reset = ql_adapter_reset(qdev);
+                       if (soft_reset) {
+                               netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
+                                          qdev->index);
                        }
-                       printk(KERN_ERR PFX
-                               "%s: Releaseing driver lock via chip reset.\n",ndev->name);
+                       netdev_err(ndev,
+                                  "Releasing driver lock via chip reset\n");
                } else {
-                       printk(KERN_ERR PFX
-                              "%s: Could not acquire driver lock to do "
-                              "reset!\n", ndev->name);
+                       netdev_err(ndev,
+                                  "Could not acquire driver lock to do reset!\n");
                        retval = -1;
                }
                spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
@@ -3603,56 +3472,50 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
        unsigned long hw_flags;
 
        if (ql_alloc_mem_resources(qdev)) {
-               printk(KERN_ERR PFX
-                      "%s Unable to  allocate buffers.\n", ndev->name);
+               netdev_err(ndev, "Unable to  allocate buffers\n");
                return -ENOMEM;
        }
 
        if (qdev->msi) {
                if (pci_enable_msi(qdev->pdev)) {
-                       printk(KERN_ERR PFX
-                              "%s: User requested MSI, but MSI failed to "
-                              "initialize.  Continuing without MSI.\n",
-                              qdev->ndev->name);
+                       netdev_err(ndev,
+                                  "User requested MSI, but MSI failed to initialize.  Continuing without MSI.\n");
                        qdev->msi = 0;
                } else {
-                       printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
-                       set_bit(QL_MSI_ENABLED,&qdev->flags);
+                       netdev_info(ndev, "MSI Enabled...\n");
+                       set_bit(QL_MSI_ENABLED, &qdev->flags);
                        irq_flags &= ~IRQF_SHARED;
                }
        }
 
-       if ((err = request_irq(qdev->pdev->irq,
-                              ql3xxx_isr,
-                              irq_flags, ndev->name, ndev))) {
-               printk(KERN_ERR PFX
-                      "%s: Failed to reserve interrupt %d already in use.\n",
-                      ndev->name, qdev->pdev->irq);
+       err = request_irq(qdev->pdev->irq, ql3xxx_isr,
+                         irq_flags, ndev->name, ndev);
+       if (err) {
+               netdev_err(ndev,
+                          "Failed to reserve interrupt %d - already in use\n",
+                          qdev->pdev->irq);
                goto err_irq;
        }
 
        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 
-       if ((err = ql_wait_for_drvr_lock(qdev))) {
-               if ((err = ql_adapter_initialize(qdev))) {
-                       printk(KERN_ERR PFX
-                              "%s: Unable to initialize adapter.\n",
-                              ndev->name);
+       err = ql_wait_for_drvr_lock(qdev);
+       if (err) {
+               err = ql_adapter_initialize(qdev);
+               if (err) {
+                       netdev_err(ndev, "Unable to initialize adapter\n");
                        goto err_init;
                }
-               printk(KERN_ERR PFX
-                               "%s: Releaseing driver lock.\n",ndev->name);
+               netdev_err(ndev, "Releasing driver lock\n");
                ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
        } else {
-               printk(KERN_ERR PFX
-                      "%s: Could not acquire driver lock.\n",
-                      ndev->name);
+               netdev_err(ndev, "Could not acquire driver lock\n");
                goto err_lock;
        }
 
        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 
-       set_bit(QL_ADAPTER_UP,&qdev->flags);
+       set_bit(QL_ADAPTER_UP, &qdev->flags);
 
        mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
 
@@ -3666,11 +3529,9 @@ err_lock:
        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
        free_irq(qdev->pdev->irq, ndev);
 err_irq:
-       if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
-               printk(KERN_INFO PFX
-                      "%s: calling pci_disable_msi().\n",
-                      qdev->ndev->name);
-               clear_bit(QL_MSI_ENABLED,&qdev->flags);
+       if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+               netdev_info(ndev, "calling pci_disable_msi()\n");
+               clear_bit(QL_MSI_ENABLED, &qdev->flags);
                pci_disable_msi(qdev->pdev);
        }
        return err;
@@ -3678,10 +3539,9 @@ err_irq:
 
 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
 {
-       if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
-               printk(KERN_ERR PFX
-                               "%s: Driver up/down cycle failed, "
-                               "closing device\n",qdev->ndev->name);
+       if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
+               netdev_err(qdev->ndev,
+                          "Driver up/down cycle failed, closing device\n");
                rtnl_lock();
                dev_close(qdev->ndev);
                rtnl_unlock();
@@ -3698,24 +3558,24 @@ static int ql3xxx_close(struct net_device *ndev)
         * Wait for device to recover from a reset.
         * (Rarely happens, but possible.)
         */
-       while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
+       while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
                msleep(50);
 
-       ql_adapter_down(qdev,QL_DO_RESET);
+       ql_adapter_down(qdev, QL_DO_RESET);
        return 0;
 }
 
 static int ql3xxx_open(struct net_device *ndev)
 {
        struct ql3_adapter *qdev = netdev_priv(ndev);
-       return (ql_adapter_up(qdev));
+       return ql_adapter_up(qdev);
 }
 
 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
 {
        struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
        struct ql3xxx_port_registers __iomem *port_regs =
-                       qdev->mem_map_registers;
+                       qdev->mem_map_registers;
        struct sockaddr *addr = p;
        unsigned long hw_flags;
 
@@ -3750,7 +3610,7 @@ static void ql3xxx_tx_timeout(struct net_device *ndev)
 {
        struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
 
-       printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
+       netdev_err(ndev, "Resetting...\n");
        /*
         * Stop the queues, we've got a problem.
         */
@@ -3770,11 +3630,12 @@ static void ql_reset_work(struct work_struct *work)
        u32 value;
        struct ql_tx_buf_cb *tx_cb;
        int max_wait_time, i;
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
        unsigned long hw_flags;
 
-       if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
-               clear_bit(QL_LINK_MASTER,&qdev->flags);
+       if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
+               clear_bit(QL_LINK_MASTER, &qdev->flags);
 
                /*
                 * Loop through the active list and return the skb.
@@ -3783,17 +3644,19 @@ static void ql_reset_work(struct work_struct *work)
                        int j;
                        tx_cb = &qdev->tx_buf[i];
                        if (tx_cb->skb) {
-                               printk(KERN_DEBUG PFX
-                                      "%s: Freeing lost SKB.\n",
-                                      qdev->ndev->name);
+                               netdev_printk(KERN_DEBUG, ndev,
+                                             "Freeing lost SKB\n");
                                pci_unmap_single(qdev->pdev,
-                                        dma_unmap_addr(&tx_cb->map[0], mapaddr),
+                                        dma_unmap_addr(&tx_cb->map[0],
+                                                       mapaddr),
                                         dma_unmap_len(&tx_cb->map[0], maplen),
                                         PCI_DMA_TODEVICE);
-                               for(j=1;j<tx_cb->seg_count;j++) {
+                               for (j = 1; j < tx_cb->seg_count; j++) {
                                        pci_unmap_page(qdev->pdev,
-                                              dma_unmap_addr(&tx_cb->map[j],mapaddr),
-                                              dma_unmap_len(&tx_cb->map[j],maplen),
+                                              dma_unmap_addr(&tx_cb->map[j],
+                                                             mapaddr),
+                                              dma_unmap_len(&tx_cb->map[j],
+                                                            maplen),
                                               PCI_DMA_TODEVICE);
                                }
                                dev_kfree_skb(tx_cb->skb);
@@ -3801,8 +3664,7 @@ static void ql_reset_work(struct work_struct *work)
                        }
                }
 
-               printk(KERN_ERR PFX
-                      "%s: Clearing NRI after reset.\n", qdev->ndev->name);
+               netdev_err(ndev, "Clearing NRI after reset\n");
                spin_lock_irqsave(&qdev->hw_lock, hw_flags);
                ql_write_common_reg(qdev,
                                    &port_regs->CommonRegs.
@@ -3818,16 +3680,14 @@ static void ql_reset_work(struct work_struct *work)
 
                                                   ispControlStatus);
                        if ((value & ISP_CONTROL_SR) == 0) {
-                               printk(KERN_DEBUG PFX
-                                      "%s: reset completed.\n",
-                                      qdev->ndev->name);
+                               netdev_printk(KERN_DEBUG, ndev,
+                                             "reset completed\n");
                                break;
                        }
 
                        if (value & ISP_CONTROL_RI) {
-                               printk(KERN_DEBUG PFX
-                                      "%s: clearing NRI after reset.\n",
-                                      qdev->ndev->name);
+                               netdev_printk(KERN_DEBUG, ndev,
+                                             "clearing NRI after reset\n");
                                ql_write_common_reg(qdev,
                                                    &port_regs->
                                                    CommonRegs.
@@ -3848,21 +3708,19 @@ static void ql_reset_work(struct work_struct *work)
                         * Set the reset flags and clear the board again.
                         * Nothing else to do...
                         */
-                       printk(KERN_ERR PFX
-                              "%s: Timed out waiting for reset to "
-                              "complete.\n", ndev->name);
-                       printk(KERN_ERR PFX
-                              "%s: Do a reset.\n", ndev->name);
-                       clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
-                       clear_bit(QL_RESET_START,&qdev->flags);
-                       ql_cycle_adapter(qdev,QL_DO_RESET);
+                       netdev_err(ndev,
+                                  "Timed out waiting for reset to complete\n");
+                       netdev_err(ndev, "Do a reset\n");
+                       clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
+                       clear_bit(QL_RESET_START, &qdev->flags);
+                       ql_cycle_adapter(qdev, QL_DO_RESET);
                        return;
                }
 
-               clear_bit(QL_RESET_ACTIVE,&qdev->flags);
-               clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
-               clear_bit(QL_RESET_START,&qdev->flags);
-               ql_cycle_adapter(qdev,QL_NO_RESET);
+               clear_bit(QL_RESET_ACTIVE, &qdev->flags);
+               clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
+               clear_bit(QL_RESET_START, &qdev->flags);
+               ql_cycle_adapter(qdev, QL_NO_RESET);
        }
 }
 
@@ -3876,7 +3734,8 @@ static void ql_tx_timeout_work(struct work_struct *work)
 
 static void ql_get_board_info(struct ql3_adapter *qdev)
 {
-       struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+       struct ql3xxx_port_registers __iomem *port_regs =
+               qdev->mem_map_registers;
        u32 value;
 
        value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
@@ -3915,20 +3774,18 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
 {
        struct net_device *ndev = NULL;
        struct ql3_adapter *qdev = NULL;
-       static int cards_found = 0;
+       static int cards_found;
        int uninitialized_var(pci_using_dac), err;
 
        err = pci_enable_device(pdev);
        if (err) {
-               printk(KERN_ERR PFX "%s cannot enable PCI device\n",
-                      pci_name(pdev));
+               pr_err("%s cannot enable PCI device\n", pci_name(pdev));
                goto err_out;
        }
 
        err = pci_request_regions(pdev, DRV_NAME);
        if (err) {
-               printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
-                      pci_name(pdev));
+               pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
                goto err_out_disable_pdev;
        }
 
@@ -3943,15 +3800,13 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
        }
 
        if (err) {
-               printk(KERN_ERR PFX "%s no usable DMA configuration\n",
-                      pci_name(pdev));
+               pr_err("%s no usable DMA configuration\n", pci_name(pdev));
                goto err_out_free_regions;
        }
 
        ndev = alloc_etherdev(sizeof(struct ql3_adapter));
        if (!ndev) {
-               printk(KERN_ERR PFX "%s could not alloc etherdev\n",
-                      pci_name(pdev));
+               pr_err("%s could not alloc etherdev\n", pci_name(pdev));
                err = -ENOMEM;
                goto err_out_free_regions;
        }
@@ -3978,8 +3833,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
 
        qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
        if (!qdev->mem_map_registers) {
-               printk(KERN_ERR PFX "%s: cannot map device registers\n",
-                      pci_name(pdev));
+               pr_err("%s: cannot map device registers\n", pci_name(pdev));
                err = -EIO;
                goto err_out_free_ndev;
        }
@@ -3998,9 +3852,8 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
 
        /* make sure the EEPROM is good */
        if (ql_get_nvram_params(qdev)) {
-               printk(KERN_ALERT PFX
-                      "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
-                      qdev->index);
+               pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
+                        __func__, qdev->index);
                err = -EIO;
                goto err_out_iounmap;
        }
@@ -4026,14 +3879,12 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
         * Set the Maximum Memory Read Byte Count value. We do this to handle
         * jumbo frames.
         */
-       if (qdev->pci_x) {
+       if (qdev->pci_x)
                pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
-       }
 
        err = register_netdev(ndev);
        if (err) {
-               printk(KERN_ERR PFX "%s: cannot register net device\n",
-                      pci_name(pdev));
+               pr_err("%s: cannot register net device\n", pci_name(pdev));
                goto err_out_iounmap;
        }
 
@@ -4052,10 +3903,10 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
        qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
        qdev->adapter_timer.data = (unsigned long)qdev;
 
-       if(!cards_found) {
-               printk(KERN_ALERT PFX "%s\n", DRV_STRING);
-               printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
-                  DRV_NAME, DRV_VERSION);
+       if (!cards_found) {
+               pr_alert("%s\n", DRV_STRING);
+               pr_alert("Driver name: %s, Version: %s\n",
+                        DRV_NAME, DRV_VERSION);
        }
        ql_display_dev_info(ndev);
 
index e1894775e5aa21f26e5255917849b2394524e423..970389331bbc4cb5c91da180fd04d282db872ac5 100644 (file)
@@ -1074,8 +1074,8 @@ struct qlcnic_eswitch {
 /* Return codes for Error handling */
 #define QL_STATUS_INVALID_PARAM        -1
 
-#define MAX_BW                 10000
-#define MIN_BW                 100
+#define MAX_BW                 100
+#define MIN_BW                 1
 #define MAX_VLAN_ID            4095
 #define MIN_VLAN_ID            2
 #define MAX_TX_QUEUES          1
@@ -1083,8 +1083,7 @@ struct qlcnic_eswitch {
 #define DEFAULT_MAC_LEARN      1
 
 #define IS_VALID_VLAN(vlan)    (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID)
-#define IS_VALID_BW(bw)                (bw >= MIN_BW && bw <= MAX_BW \
-                                                       && (bw % 100) == 0)
+#define IS_VALID_BW(bw)                (bw >= MIN_BW && bw <= MAX_BW)
 #define IS_VALID_TX_QUEUES(que)        (que > 0 && que <= MAX_TX_QUEUES)
 #define IS_VALID_RX_QUEUES(que)        (que > 0 && que <= MAX_RX_QUEUES)
 #define IS_VALID_MODE(mode)    (mode == 0 || mode == 1)
@@ -1302,8 +1301,6 @@ struct qlcnic_nic_template {
        int (*get_mac_addr) (struct qlcnic_adapter *, u8*);
        int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
        int (*config_led) (struct qlcnic_adapter *, u32, u32);
-       int (*set_ilb_mode) (struct qlcnic_adapter *);
-       void (*clear_ilb_mode) (struct qlcnic_adapter *);
        int (*start_firmware) (struct qlcnic_adapter *);
 };
 
index 7d6558e33dca46cd663cd3939da60b3468a72809..9328d59e21e0c6c2ff8011b19eb30f5f041fed5d 100644 (file)
@@ -678,6 +678,12 @@ static int qlcnic_loopback_test(struct net_device *netdev)
        int max_sds_rings = adapter->max_sds_rings;
        int ret;
 
+       if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
+               dev_warn(&adapter->pdev->dev, "Loopback test not supported"
+                               "for non privilege function\n");
+               return 0;
+       }
+
        if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
                return -EIO;
 
@@ -685,13 +691,13 @@ static int qlcnic_loopback_test(struct net_device *netdev)
        if (ret)
                goto clear_it;
 
-       ret = adapter->nic_ops->set_ilb_mode(adapter);
+       ret = qlcnic_set_ilb_mode(adapter);
        if (ret)
                goto done;
 
        ret = qlcnic_do_ilb_test(adapter);
 
-       adapter->nic_ops->clear_ilb_mode(adapter);
+       qlcnic_clear_ilb_mode(adapter);
 
 done:
        qlcnic_diag_free_res(netdev, max_sds_rings);
index f1f7acfbf412a661405e32ef37cacf5995afe737..b9615bd745ea5d9175e6d6153d5c924084d550d2 100644 (file)
@@ -107,8 +107,6 @@ static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
 static int qlcnic_start_firmware(struct qlcnic_adapter *);
 
 static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
-static void qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *);
-static int qlcnicvf_set_ilb_mode(struct qlcnic_adapter *);
 static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
 static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
 static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
@@ -381,8 +379,6 @@ static struct qlcnic_nic_template qlcnic_ops = {
        .get_mac_addr = qlcnic_get_mac_address,
        .config_bridged_mode = qlcnic_config_bridged_mode,
        .config_led = qlcnic_config_led,
-       .set_ilb_mode = qlcnic_set_ilb_mode,
-       .clear_ilb_mode = qlcnic_clear_ilb_mode,
        .start_firmware = qlcnic_start_firmware
 };
 
@@ -390,8 +386,6 @@ static struct qlcnic_nic_template qlcnic_vf_ops = {
        .get_mac_addr = qlcnic_get_mac_address,
        .config_bridged_mode = qlcnicvf_config_bridged_mode,
        .config_led = qlcnicvf_config_led,
-       .set_ilb_mode = qlcnicvf_set_ilb_mode,
-       .clear_ilb_mode = qlcnicvf_clear_ilb_mode,
        .start_firmware = qlcnicvf_start_firmware
 };
 
@@ -1182,6 +1176,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
        ret = qlcnic_fw_create_ctx(adapter);
        if (ret) {
                qlcnic_detach(adapter);
+               netif_device_attach(netdev);
                return ret;
        }
 
@@ -2841,18 +2836,6 @@ qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
        return -EOPNOTSUPP;
 }
 
-static int
-qlcnicvf_set_ilb_mode(struct qlcnic_adapter *adapter)
-{
-       return -EOPNOTSUPP;
-}
-
-static void
-qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *adapter)
-{
-       return;
-}
-
 static ssize_t
 qlcnic_store_bridged_mode(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t len)
index 06b2188f6368e79f6dbc3e5b9f11ed2010aca7bb..a478786840a65e8f35a5a825263926fd0f7078d2 100644 (file)
@@ -18,8 +18,6 @@
 #define DRV_STRING     "QLogic 10 Gigabit PCI-E Ethernet Driver "
 #define DRV_VERSION    "v1.00.00.25.00.00-01"
 
-#define PFX "qlge: "
-
 #define WQ_ADDR_ALIGN  0x3     /* 4 byte alignment */
 
 #define QLGE_VENDOR_ID    0x1077
index 548e9010b20bc67af02ac3f89c7be12e51c948db..4747492935ef8bf190cc575fe7146e3072417623 100644 (file)
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/slab.h>
 
 #include "qlge.h"
@@ -446,7 +448,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
                                        MAC_ADDR_TYPE_CAM_MAC, i, value);
                if (status) {
                        netif_err(qdev, drv, qdev->ndev,
-                                 "Failed read of mac index register.\n");
+                                 "Failed read of mac index register\n");
                        goto err;
                }
                *buf++ = value[0];      /* lower MAC address */
@@ -458,7 +460,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
                                        MAC_ADDR_TYPE_MULTI_MAC, i, value);
                if (status) {
                        netif_err(qdev, drv, qdev->ndev,
-                                 "Failed read of mac index register.\n");
+                                 "Failed read of mac index register\n");
                        goto err;
                }
                *buf++ = value[0];      /* lower Mcast address */
@@ -482,7 +484,7 @@ static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
                status = ql_get_routing_reg(qdev, i, &value);
                if (status) {
                        netif_err(qdev, drv, qdev->ndev,
-                                 "Failed read of routing index register.\n");
+                                 "Failed read of routing index register\n");
                        goto err;
                } else {
                        *buf++ = value;
@@ -668,7 +670,7 @@ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
                        max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
                        break;
                default:
-                       printk(KERN_ERR"Bad type!!! 0x%08x\n", type);
+                       pr_err("Bad type!!! 0x%08x\n", type);
                        max_index = 0;
                        max_offset = 0;
                        break;
@@ -738,7 +740,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
        int i;
 
        if (!mpi_coredump) {
-               netif_err(qdev, drv, qdev->ndev, "No memory available.\n");
+               netif_err(qdev, drv, qdev->ndev, "No memory available\n");
                return -ENOMEM;
        }
 
@@ -1234,7 +1236,7 @@ static void ql_get_core_dump(struct ql_adapter *qdev)
 
        if (!netif_running(qdev->ndev)) {
                netif_err(qdev, ifup, qdev->ndev,
-                         "Force Coredump can only be done from interface that is up.\n");
+                         "Force Coredump can only be done from interface that is up\n");
                return;
        }
        ql_queue_fw_error(qdev);
@@ -1334,7 +1336,7 @@ void ql_mpi_core_to_log(struct work_struct *work)
                     "Core is dumping to log file!\n");
 
        for (i = 0; i < count; i += 8) {
-               printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
+               pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
                        "%.08x %.08x %.08x\n", i,
                        tmp[i + 0],
                        tmp[i + 1],
@@ -1356,71 +1358,43 @@ static void ql_dump_intr_states(struct ql_adapter *qdev)
        for (i = 0; i < qdev->intr_count; i++) {
                ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
                value = ql_read32(qdev, INTR_EN);
-               printk(KERN_ERR PFX
-                      "%s: Interrupt %d is %s.\n",
+               pr_err("%s: Interrupt %d is %s\n",
                       qdev->ndev->name, i,
                       (value & INTR_EN_EN ? "enabled" : "disabled"));
        }
 }
 
+#define DUMP_XGMAC(qdev, reg)                                  \
+do {                                                           \
+       u32 data;                                               \
+       ql_read_xgmac_reg(qdev, reg, &data);                    \
+       pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
+} while (0)
+
 void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
 {
-       u32 data;
        if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
-               printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__);
+               pr_err("%s: Couldn't get xgmac sem\n", __func__);
                return;
        }
-       ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data);
-       printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name,
-              data);
-       ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data);
-       printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name,
-              data);
-       ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
-       printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name,
-              data);
-       ql_read_xgmac_reg(qdev, TX_CFG, &data);
-       printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
-       ql_read_xgmac_reg(qdev, RX_CFG, &data);
-       printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
-       ql_read_xgmac_reg(qdev, FLOW_CTL, &data);
-       printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name,
-              data);
-       ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data);
-       printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name,
-              data);
-       ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data);
-       printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name,
-              data);
-       ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data);
-       printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n",
-              qdev->ndev->name, data);
-       ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data);
-       printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n",
-              qdev->ndev->name, data);
-       ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data);
-       printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
-              data);
-       ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data);
-       printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
-              data);
-       ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data);
-       printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name,
-              data);
-       ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data);
-       printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n",
-              qdev->ndev->name, data);
-       ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data);
-       printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name,
-              data);
-       ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data);
-       printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n",
-              qdev->ndev->name, data);
-       ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data);
-       printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name,
-              data);
+       DUMP_XGMAC(qdev, PAUSE_SRC_LO);
+       DUMP_XGMAC(qdev, PAUSE_SRC_HI);
+       DUMP_XGMAC(qdev, GLOBAL_CFG);
+       DUMP_XGMAC(qdev, TX_CFG);
+       DUMP_XGMAC(qdev, RX_CFG);
+       DUMP_XGMAC(qdev, FLOW_CTL);
+       DUMP_XGMAC(qdev, PAUSE_OPCODE);
+       DUMP_XGMAC(qdev, PAUSE_TIMER);
+       DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
+       DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
+       DUMP_XGMAC(qdev, MAC_TX_PARAMS);
+       DUMP_XGMAC(qdev, MAC_RX_PARAMS);
+       DUMP_XGMAC(qdev, MAC_SYS_INT);
+       DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
+       DUMP_XGMAC(qdev, MAC_MGMT_INT);
+       DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
+       DUMP_XGMAC(qdev, EXT_ARB_MODE);
        ql_sem_unlock(qdev, qdev->xg_sem_mask);
-
 }
 
 static void ql_dump_ets_regs(struct ql_adapter *qdev)
@@ -1437,14 +1411,12 @@ static void ql_dump_cam_entries(struct ql_adapter *qdev)
                return;
        for (i = 0; i < 4; i++) {
                if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
-                       printk(KERN_ERR PFX
-                              "%s: Failed read of mac index register.\n",
+                       pr_err("%s: Failed read of mac index register\n",
                               __func__);
                        return;
                } else {
                        if (value[0])
-                               printk(KERN_ERR PFX
-                                      "%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n",
+                               pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
                                       qdev->ndev->name, i, value[1], value[0],
                                       value[2]);
                }
@@ -1452,14 +1424,12 @@ static void ql_dump_cam_entries(struct ql_adapter *qdev)
        for (i = 0; i < 32; i++) {
                if (ql_get_mac_addr_reg
                    (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
-                       printk(KERN_ERR PFX
-                              "%s: Failed read of mac index register.\n",
+                       pr_err("%s: Failed read of mac index register\n",
                               __func__);
                        return;
                } else {
                        if (value[0])
-                               printk(KERN_ERR PFX
-                                      "%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n",
+                               pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
                                       qdev->ndev->name, i, value[1], value[0]);
                }
        }
@@ -1476,129 +1446,77 @@ void ql_dump_routing_entries(struct ql_adapter *qdev)
        for (i = 0; i < 16; i++) {
                value = 0;
                if (ql_get_routing_reg(qdev, i, &value)) {
-                       printk(KERN_ERR PFX
-                              "%s: Failed read of routing index register.\n",
+                       pr_err("%s: Failed read of routing index register\n",
                               __func__);
                        return;
                } else {
                        if (value)
-                               printk(KERN_ERR PFX
-                                      "%s: Routing Mask %d = 0x%.08x.\n",
+                               pr_err("%s: Routing Mask %d = 0x%.08x\n",
                                       qdev->ndev->name, i, value);
                }
        }
        ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
 }
 
+#define DUMP_REG(qdev, reg)                    \
+       pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
+
 void ql_dump_regs(struct ql_adapter *qdev)
 {
-       printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func);
-       printk(KERN_ERR PFX "SYS                                = 0x%x.\n",
-              ql_read32(qdev, SYS));
-       printk(KERN_ERR PFX "RST_FO                     = 0x%x.\n",
-              ql_read32(qdev, RST_FO));
-       printk(KERN_ERR PFX "FSC                                = 0x%x.\n",
-              ql_read32(qdev, FSC));
-       printk(KERN_ERR PFX "CSR                                = 0x%x.\n",
-              ql_read32(qdev, CSR));
-       printk(KERN_ERR PFX "ICB_RID                    = 0x%x.\n",
-              ql_read32(qdev, ICB_RID));
-       printk(KERN_ERR PFX "ICB_L                              = 0x%x.\n",
-              ql_read32(qdev, ICB_L));
-       printk(KERN_ERR PFX "ICB_H                              = 0x%x.\n",
-              ql_read32(qdev, ICB_H));
-       printk(KERN_ERR PFX "CFG                                = 0x%x.\n",
-              ql_read32(qdev, CFG));
-       printk(KERN_ERR PFX "BIOS_ADDR                  = 0x%x.\n",
-              ql_read32(qdev, BIOS_ADDR));
-       printk(KERN_ERR PFX "STS                                = 0x%x.\n",
-              ql_read32(qdev, STS));
-       printk(KERN_ERR PFX "INTR_EN                    = 0x%x.\n",
-              ql_read32(qdev, INTR_EN));
-       printk(KERN_ERR PFX "INTR_MASK                  = 0x%x.\n",
-              ql_read32(qdev, INTR_MASK));
-       printk(KERN_ERR PFX "ISR1                               = 0x%x.\n",
-              ql_read32(qdev, ISR1));
-       printk(KERN_ERR PFX "ISR2                               = 0x%x.\n",
-              ql_read32(qdev, ISR2));
-       printk(KERN_ERR PFX "ISR3                               = 0x%x.\n",
-              ql_read32(qdev, ISR3));
-       printk(KERN_ERR PFX "ISR4                               = 0x%x.\n",
-              ql_read32(qdev, ISR4));
-       printk(KERN_ERR PFX "REV_ID                     = 0x%x.\n",
-              ql_read32(qdev, REV_ID));
-       printk(KERN_ERR PFX "FRC_ECC_ERR                        = 0x%x.\n",
-              ql_read32(qdev, FRC_ECC_ERR));
-       printk(KERN_ERR PFX "ERR_STS                    = 0x%x.\n",
-              ql_read32(qdev, ERR_STS));
-       printk(KERN_ERR PFX "RAM_DBG_ADDR                       = 0x%x.\n",
-              ql_read32(qdev, RAM_DBG_ADDR));
-       printk(KERN_ERR PFX "RAM_DBG_DATA                       = 0x%x.\n",
-              ql_read32(qdev, RAM_DBG_DATA));
-       printk(KERN_ERR PFX "ECC_ERR_CNT                        = 0x%x.\n",
-              ql_read32(qdev, ECC_ERR_CNT));
-       printk(KERN_ERR PFX "SEM                                = 0x%x.\n",
-              ql_read32(qdev, SEM));
-       printk(KERN_ERR PFX "GPIO_1                     = 0x%x.\n",
-              ql_read32(qdev, GPIO_1));
-       printk(KERN_ERR PFX "GPIO_2                     = 0x%x.\n",
-              ql_read32(qdev, GPIO_2));
-       printk(KERN_ERR PFX "GPIO_3                     = 0x%x.\n",
-              ql_read32(qdev, GPIO_3));
-       printk(KERN_ERR PFX "XGMAC_ADDR                         = 0x%x.\n",
-              ql_read32(qdev, XGMAC_ADDR));
-       printk(KERN_ERR PFX "XGMAC_DATA                         = 0x%x.\n",
-              ql_read32(qdev, XGMAC_DATA));
-       printk(KERN_ERR PFX "NIC_ETS                    = 0x%x.\n",
-              ql_read32(qdev, NIC_ETS));
-       printk(KERN_ERR PFX "CNA_ETS                    = 0x%x.\n",
-              ql_read32(qdev, CNA_ETS));
-       printk(KERN_ERR PFX "FLASH_ADDR                         = 0x%x.\n",
-              ql_read32(qdev, FLASH_ADDR));
-       printk(KERN_ERR PFX "FLASH_DATA                         = 0x%x.\n",
-              ql_read32(qdev, FLASH_DATA));
-       printk(KERN_ERR PFX "CQ_STOP                    = 0x%x.\n",
-              ql_read32(qdev, CQ_STOP));
-       printk(KERN_ERR PFX "PAGE_TBL_RID                       = 0x%x.\n",
-              ql_read32(qdev, PAGE_TBL_RID));
-       printk(KERN_ERR PFX "WQ_PAGE_TBL_LO             = 0x%x.\n",
-              ql_read32(qdev, WQ_PAGE_TBL_LO));
-       printk(KERN_ERR PFX "WQ_PAGE_TBL_HI             = 0x%x.\n",
-              ql_read32(qdev, WQ_PAGE_TBL_HI));
-       printk(KERN_ERR PFX "CQ_PAGE_TBL_LO             = 0x%x.\n",
-              ql_read32(qdev, CQ_PAGE_TBL_LO));
-       printk(KERN_ERR PFX "CQ_PAGE_TBL_HI             = 0x%x.\n",
-              ql_read32(qdev, CQ_PAGE_TBL_HI));
-       printk(KERN_ERR PFX "COS_DFLT_CQ1                       = 0x%x.\n",
-              ql_read32(qdev, COS_DFLT_CQ1));
-       printk(KERN_ERR PFX "COS_DFLT_CQ2                       = 0x%x.\n",
-              ql_read32(qdev, COS_DFLT_CQ2));
-       printk(KERN_ERR PFX "SPLT_HDR                   = 0x%x.\n",
-              ql_read32(qdev, SPLT_HDR));
-       printk(KERN_ERR PFX "FC_PAUSE_THRES             = 0x%x.\n",
-              ql_read32(qdev, FC_PAUSE_THRES));
-       printk(KERN_ERR PFX "NIC_PAUSE_THRES            = 0x%x.\n",
-              ql_read32(qdev, NIC_PAUSE_THRES));
-       printk(KERN_ERR PFX "FC_ETHERTYPE                       = 0x%x.\n",
-              ql_read32(qdev, FC_ETHERTYPE));
-       printk(KERN_ERR PFX "FC_RCV_CFG                         = 0x%x.\n",
-              ql_read32(qdev, FC_RCV_CFG));
-       printk(KERN_ERR PFX "NIC_RCV_CFG                        = 0x%x.\n",
-              ql_read32(qdev, NIC_RCV_CFG));
-       printk(KERN_ERR PFX "FC_COS_TAGS                        = 0x%x.\n",
-              ql_read32(qdev, FC_COS_TAGS));
-       printk(KERN_ERR PFX "NIC_COS_TAGS                       = 0x%x.\n",
-              ql_read32(qdev, NIC_COS_TAGS));
-       printk(KERN_ERR PFX "MGMT_RCV_CFG                       = 0x%x.\n",
-              ql_read32(qdev, MGMT_RCV_CFG));
-       printk(KERN_ERR PFX "XG_SERDES_ADDR             = 0x%x.\n",
-              ql_read32(qdev, XG_SERDES_ADDR));
-       printk(KERN_ERR PFX "XG_SERDES_DATA             = 0x%x.\n",
-              ql_read32(qdev, XG_SERDES_DATA));
-       printk(KERN_ERR PFX "PRB_MX_ADDR                        = 0x%x.\n",
-              ql_read32(qdev, PRB_MX_ADDR));
-       printk(KERN_ERR PFX "PRB_MX_DATA                        = 0x%x.\n",
-              ql_read32(qdev, PRB_MX_DATA));
+       pr_err("reg dump for function #%d\n", qdev->func);
+       DUMP_REG(qdev, SYS);
+       DUMP_REG(qdev, RST_FO);
+       DUMP_REG(qdev, FSC);
+       DUMP_REG(qdev, CSR);
+       DUMP_REG(qdev, ICB_RID);
+       DUMP_REG(qdev, ICB_L);
+       DUMP_REG(qdev, ICB_H);
+       DUMP_REG(qdev, CFG);
+       DUMP_REG(qdev, BIOS_ADDR);
+       DUMP_REG(qdev, STS);
+       DUMP_REG(qdev, INTR_EN);
+       DUMP_REG(qdev, INTR_MASK);
+       DUMP_REG(qdev, ISR1);
+       DUMP_REG(qdev, ISR2);
+       DUMP_REG(qdev, ISR3);
+       DUMP_REG(qdev, ISR4);
+       DUMP_REG(qdev, REV_ID);
+       DUMP_REG(qdev, FRC_ECC_ERR);
+       DUMP_REG(qdev, ERR_STS);
+       DUMP_REG(qdev, RAM_DBG_ADDR);
+       DUMP_REG(qdev, RAM_DBG_DATA);
+       DUMP_REG(qdev, ECC_ERR_CNT);
+       DUMP_REG(qdev, SEM);
+       DUMP_REG(qdev, GPIO_1);
+       DUMP_REG(qdev, GPIO_2);
+       DUMP_REG(qdev, GPIO_3);
+       DUMP_REG(qdev, XGMAC_ADDR);
+       DUMP_REG(qdev, XGMAC_DATA);
+       DUMP_REG(qdev, NIC_ETS);
+       DUMP_REG(qdev, CNA_ETS);
+       DUMP_REG(qdev, FLASH_ADDR);
+       DUMP_REG(qdev, FLASH_DATA);
+       DUMP_REG(qdev, CQ_STOP);
+       DUMP_REG(qdev, PAGE_TBL_RID);
+       DUMP_REG(qdev, WQ_PAGE_TBL_LO);
+       DUMP_REG(qdev, WQ_PAGE_TBL_HI);
+       DUMP_REG(qdev, CQ_PAGE_TBL_LO);
+       DUMP_REG(qdev, CQ_PAGE_TBL_HI);
+       DUMP_REG(qdev, COS_DFLT_CQ1);
+       DUMP_REG(qdev, COS_DFLT_CQ2);
+       DUMP_REG(qdev, SPLT_HDR);
+       DUMP_REG(qdev, FC_PAUSE_THRES);
+       DUMP_REG(qdev, NIC_PAUSE_THRES);
+       DUMP_REG(qdev, FC_ETHERTYPE);
+       DUMP_REG(qdev, FC_RCV_CFG);
+       DUMP_REG(qdev, NIC_RCV_CFG);
+       DUMP_REG(qdev, FC_COS_TAGS);
+       DUMP_REG(qdev, NIC_COS_TAGS);
+       DUMP_REG(qdev, MGMT_RCV_CFG);
+       DUMP_REG(qdev, XG_SERDES_ADDR);
+       DUMP_REG(qdev, XG_SERDES_DATA);
+       DUMP_REG(qdev, PRB_MX_ADDR);
+       DUMP_REG(qdev, PRB_MX_DATA);
        ql_dump_intr_states(qdev);
        ql_dump_xgmac_control_regs(qdev);
        ql_dump_ets_regs(qdev);
@@ -1608,191 +1526,124 @@ void ql_dump_regs(struct ql_adapter *qdev)
 #endif
 
 #ifdef QL_STAT_DUMP
+
+#define DUMP_STAT(qdev, stat)  \
+       pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
+
 void ql_dump_stat(struct ql_adapter *qdev)
 {
-       printk(KERN_ERR "%s: Enter.\n", __func__);
-       printk(KERN_ERR "tx_pkts = %ld\n",
-              (unsigned long)qdev->nic_stats.tx_pkts);
-       printk(KERN_ERR "tx_bytes = %ld\n",
-              (unsigned long)qdev->nic_stats.tx_bytes);
-       printk(KERN_ERR "tx_mcast_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.tx_mcast_pkts);
-       printk(KERN_ERR "tx_bcast_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.tx_bcast_pkts);
-       printk(KERN_ERR "tx_ucast_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.tx_ucast_pkts);
-       printk(KERN_ERR "tx_ctl_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.tx_ctl_pkts);
-       printk(KERN_ERR "tx_pause_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.tx_pause_pkts);
-       printk(KERN_ERR "tx_64_pkt = %ld.\n",
-              (unsigned long)qdev->nic_stats.tx_64_pkt);
-       printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n",
-              (unsigned long)qdev->nic_stats.tx_65_to_127_pkt);
-       printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n",
-              (unsigned long)qdev->nic_stats.tx_128_to_255_pkt);
-       printk(KERN_ERR "tx_256_511_pkt = %ld.\n",
-              (unsigned long)qdev->nic_stats.tx_256_511_pkt);
-       printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n",
-              (unsigned long)qdev->nic_stats.tx_512_to_1023_pkt);
-       printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n",
-              (unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt);
-       printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n",
-              (unsigned long)qdev->nic_stats.tx_1519_to_max_pkt);
-       printk(KERN_ERR "tx_undersize_pkt = %ld.\n",
-              (unsigned long)qdev->nic_stats.tx_undersize_pkt);
-       printk(KERN_ERR "tx_oversize_pkt = %ld.\n",
-              (unsigned long)qdev->nic_stats.tx_oversize_pkt);
-       printk(KERN_ERR "rx_bytes = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_bytes);
-       printk(KERN_ERR "rx_bytes_ok = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_bytes_ok);
-       printk(KERN_ERR "rx_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_pkts);
-       printk(KERN_ERR "rx_pkts_ok = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_pkts_ok);
-       printk(KERN_ERR "rx_bcast_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_bcast_pkts);
-       printk(KERN_ERR "rx_mcast_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_mcast_pkts);
-       printk(KERN_ERR "rx_ucast_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_ucast_pkts);
-       printk(KERN_ERR "rx_undersize_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_undersize_pkts);
-       printk(KERN_ERR "rx_oversize_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_oversize_pkts);
-       printk(KERN_ERR "rx_jabber_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_jabber_pkts);
-       printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts);
-       printk(KERN_ERR "rx_drop_events = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_drop_events);
-       printk(KERN_ERR "rx_fcerr_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_fcerr_pkts);
-       printk(KERN_ERR "rx_align_err = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_align_err);
-       printk(KERN_ERR "rx_symbol_err = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_symbol_err);
-       printk(KERN_ERR "rx_mac_err = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_mac_err);
-       printk(KERN_ERR "rx_ctl_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_ctl_pkts);
-       printk(KERN_ERR "rx_pause_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_pause_pkts);
-       printk(KERN_ERR "rx_64_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_64_pkts);
-       printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_65_to_127_pkts);
-       printk(KERN_ERR "rx_128_255_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_128_255_pkts);
-       printk(KERN_ERR "rx_256_511_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_256_511_pkts);
-       printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_512_to_1023_pkts);
-       printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts);
-       printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_1519_to_max_pkts);
-       printk(KERN_ERR "rx_len_err_pkts = %ld.\n",
-              (unsigned long)qdev->nic_stats.rx_len_err_pkts);
+       pr_err("%s: Enter\n", __func__);
+       DUMP_STAT(qdev, tx_pkts);
+       DUMP_STAT(qdev, tx_bytes);
+       DUMP_STAT(qdev, tx_mcast_pkts);
+       DUMP_STAT(qdev, tx_bcast_pkts);
+       DUMP_STAT(qdev, tx_ucast_pkts);
+       DUMP_STAT(qdev, tx_ctl_pkts);
+       DUMP_STAT(qdev, tx_pause_pkts);
+       DUMP_STAT(qdev, tx_64_pkt);
+       DUMP_STAT(qdev, tx_65_to_127_pkt);
+       DUMP_STAT(qdev, tx_128_to_255_pkt);
+       DUMP_STAT(qdev, tx_256_511_pkt);
+       DUMP_STAT(qdev, tx_512_to_1023_pkt);
+       DUMP_STAT(qdev, tx_1024_to_1518_pkt);
+       DUMP_STAT(qdev, tx_1519_to_max_pkt);
+       DUMP_STAT(qdev, tx_undersize_pkt);
+       DUMP_STAT(qdev, tx_oversize_pkt);
+       DUMP_STAT(qdev, rx_bytes);
+       DUMP_STAT(qdev, rx_bytes_ok);
+       DUMP_STAT(qdev, rx_pkts);
+       DUMP_STAT(qdev, rx_pkts_ok);
+       DUMP_STAT(qdev, rx_bcast_pkts);
+       DUMP_STAT(qdev, rx_mcast_pkts);
+       DUMP_STAT(qdev, rx_ucast_pkts);
+       DUMP_STAT(qdev, rx_undersize_pkts);
+       DUMP_STAT(qdev, rx_oversize_pkts);
+       DUMP_STAT(qdev, rx_jabber_pkts);
+       DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
+       DUMP_STAT(qdev, rx_drop_events);
+       DUMP_STAT(qdev, rx_fcerr_pkts);
+       DUMP_STAT(qdev, rx_align_err);
+       DUMP_STAT(qdev, rx_symbol_err);
+       DUMP_STAT(qdev, rx_mac_err);
+       DUMP_STAT(qdev, rx_ctl_pkts);
+       DUMP_STAT(qdev, rx_pause_pkts);
+       DUMP_STAT(qdev, rx_64_pkts);
+       DUMP_STAT(qdev, rx_65_to_127_pkts);
+       DUMP_STAT(qdev, rx_128_255_pkts);
+       DUMP_STAT(qdev, rx_256_511_pkts);
+       DUMP_STAT(qdev, rx_512_to_1023_pkts);
+       DUMP_STAT(qdev, rx_1024_to_1518_pkts);
+       DUMP_STAT(qdev, rx_1519_to_max_pkts);
+       DUMP_STAT(qdev, rx_len_err_pkts);
 };
 #endif
 
 #ifdef QL_DEV_DUMP
+
+#define DUMP_QDEV_FIELD(qdev, type, field)             \
+       pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
+#define DUMP_QDEV_DMA_FIELD(qdev, field)               \
+       pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
+#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
+       pr_err("%s[%d].%s = " type "\n",                 \
+              #array, index, #field, qdev->array[index].field);
 void ql_dump_qdev(struct ql_adapter *qdev)
 {
        int i;
-       printk(KERN_ERR PFX "qdev->flags                        = %lx.\n",
-              qdev->flags);
-       printk(KERN_ERR PFX "qdev->vlgrp                        = %p.\n",
-              qdev->vlgrp);
-       printk(KERN_ERR PFX "qdev->pdev                         = %p.\n",
-              qdev->pdev);
-       printk(KERN_ERR PFX "qdev->ndev                         = %p.\n",
-              qdev->ndev);
-       printk(KERN_ERR PFX "qdev->chip_rev_id          = %d.\n",
-              qdev->chip_rev_id);
-       printk(KERN_ERR PFX "qdev->reg_base             = %p.\n",
-              qdev->reg_base);
-       printk(KERN_ERR PFX "qdev->doorbell_area        = %p.\n",
-              qdev->doorbell_area);
-       printk(KERN_ERR PFX "qdev->doorbell_area_size   = %d.\n",
-              qdev->doorbell_area_size);
-       printk(KERN_ERR PFX "msg_enable                 = %x.\n",
-              qdev->msg_enable);
-       printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area      = %p.\n",
-              qdev->rx_ring_shadow_reg_area);
-       printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma       = %llx.\n",
-              (unsigned long long) qdev->rx_ring_shadow_reg_dma);
-       printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area      = %p.\n",
-              qdev->tx_ring_shadow_reg_area);
-       printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma       = %llx.\n",
-              (unsigned long long) qdev->tx_ring_shadow_reg_dma);
-       printk(KERN_ERR PFX "qdev->intr_count           = %d.\n",
-              qdev->intr_count);
+       DUMP_QDEV_FIELD(qdev, "%lx", flags);
+       DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
+       DUMP_QDEV_FIELD(qdev, "%p", pdev);
+       DUMP_QDEV_FIELD(qdev, "%p", ndev);
+       DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
+       DUMP_QDEV_FIELD(qdev, "%p", reg_base);
+       DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
+       DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
+       DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
+       DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
+       DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
+       DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
+       DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
+       DUMP_QDEV_FIELD(qdev, "%d", intr_count);
        if (qdev->msi_x_entry)
                for (i = 0; i < qdev->intr_count; i++) {
-                       printk(KERN_ERR PFX
-                              "msi_x_entry.[%d]vector  = %d.\n", i,
-                              qdev->msi_x_entry[i].vector);
-                       printk(KERN_ERR PFX
-                              "msi_x_entry.[%d]entry   = %d.\n", i,
-                              qdev->msi_x_entry[i].entry);
+                       DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
+                       DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
                }
        for (i = 0; i < qdev->intr_count; i++) {
-               printk(KERN_ERR PFX
-                      "intr_context[%d].qdev           = %p.\n", i,
-                      qdev->intr_context[i].qdev);
-               printk(KERN_ERR PFX
-                      "intr_context[%d].intr           = %d.\n", i,
-                      qdev->intr_context[i].intr);
-               printk(KERN_ERR PFX
-                      "intr_context[%d].hooked         = %d.\n", i,
-                      qdev->intr_context[i].hooked);
-               printk(KERN_ERR PFX
-                      "intr_context[%d].intr_en_mask   = 0x%08x.\n", i,
-                      qdev->intr_context[i].intr_en_mask);
-               printk(KERN_ERR PFX
-                      "intr_context[%d].intr_dis_mask  = 0x%08x.\n", i,
-                      qdev->intr_context[i].intr_dis_mask);
-               printk(KERN_ERR PFX
-                      "intr_context[%d].intr_read_mask = 0x%08x.\n", i,
-                      qdev->intr_context[i].intr_read_mask);
+               DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
+               DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
+               DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
+               DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
+               DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
+               DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
        }
-       printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count);
-       printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count);
-       printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size);
-       printk(KERN_ERR PFX "qdev->ring_mem     = %p.\n", qdev->ring_mem);
-       printk(KERN_ERR PFX "qdev->intr_count   = %d.\n", qdev->intr_count);
-       printk(KERN_ERR PFX "qdev->tx_ring              = %p.\n",
-              qdev->tx_ring);
-       printk(KERN_ERR PFX "qdev->rss_ring_count       = %d.\n",
-              qdev->rss_ring_count);
-       printk(KERN_ERR PFX "qdev->rx_ring      = %p.\n", qdev->rx_ring);
-       printk(KERN_ERR PFX "qdev->default_rx_queue     = %d.\n",
-              qdev->default_rx_queue);
-       printk(KERN_ERR PFX "qdev->xg_sem_mask          = 0x%08x.\n",
-              qdev->xg_sem_mask);
-       printk(KERN_ERR PFX "qdev->port_link_up         = 0x%08x.\n",
-              qdev->port_link_up);
-       printk(KERN_ERR PFX "qdev->port_init            = 0x%08x.\n",
-              qdev->port_init);
-
+       DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
+       DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
+       DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
+       DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
+       DUMP_QDEV_FIELD(qdev, "%d", intr_count);
+       DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
+       DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
+       DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
+       DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
+       DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
+       DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
+       DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
 }
 #endif
 
 #ifdef QL_CB_DUMP
 void ql_dump_wqicb(struct wqicb *wqicb)
 {
-       printk(KERN_ERR PFX "Dumping wqicb stuff...\n");
-       printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len));
-       printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags));
-       printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
+       pr_err("Dumping wqicb stuff...\n");
+       pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
+       pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
+       pr_err("wqicb->cq_id_rss = %d\n",
               le16_to_cpu(wqicb->cq_id_rss));
-       printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
-       printk(KERN_ERR PFX "wqicb->wq_addr = 0x%llx.\n",
+       pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
+       pr_err("wqicb->wq_addr = 0x%llx\n",
               (unsigned long long) le64_to_cpu(wqicb->addr));
-       printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr = 0x%llx.\n",
+       pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
               (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
 }
 
@@ -1800,40 +1651,34 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring)
 {
        if (tx_ring == NULL)
                return;
-       printk(KERN_ERR PFX
-              "===================== Dumping tx_ring %d ===============.\n",
+       pr_err("===================== Dumping tx_ring %d ===============\n",
               tx_ring->wq_id);
-       printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
-       printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
+       pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
+       pr_err("tx_ring->base_dma = 0x%llx\n",
               (unsigned long long) tx_ring->wq_base_dma);
-       printk(KERN_ERR PFX
-              "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d.\n",
+       pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
               tx_ring->cnsmr_idx_sh_reg,
               tx_ring->cnsmr_idx_sh_reg
                        ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
-       printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
-       printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
-       printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
-              tx_ring->prod_idx_db_reg);
-       printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n",
-              tx_ring->valid_db_reg);
-       printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx);
-       printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id);
-       printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id);
-       printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q);
-       printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n",
-              atomic_read(&tx_ring->tx_count));
+       pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
+       pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
+       pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
+       pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
+       pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
+       pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
+       pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
+       pr_err("tx_ring->q = %p\n", tx_ring->q);
+       pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
 }
 
 void ql_dump_ricb(struct ricb *ricb)
 {
        int i;
-       printk(KERN_ERR PFX
-              "===================== Dumping ricb ===============.\n");
-       printk(KERN_ERR PFX "Dumping ricb stuff...\n");
+       pr_err("===================== Dumping ricb ===============\n");
+       pr_err("Dumping ricb stuff...\n");
 
-       printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f);
-       printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n",
+       pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
+       pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
               ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
               ricb->flags & RSS_L6K ? "RSS_L6K " : "",
               ricb->flags & RSS_LI ? "RSS_LI " : "",
@@ -1843,44 +1688,44 @@ void ql_dump_ricb(struct ricb *ricb)
               ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
               ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
               ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
-       printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask));
+       pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
        for (i = 0; i < 16; i++)
-               printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i,
+               pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
                       le32_to_cpu(ricb->hash_cq_id[i]));
        for (i = 0; i < 10; i++)
-               printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i,
+               pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
                       le32_to_cpu(ricb->ipv6_hash_key[i]));
        for (i = 0; i < 4; i++)
-               printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i,
+               pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
                       le32_to_cpu(ricb->ipv4_hash_key[i]));
 }
 
 void ql_dump_cqicb(struct cqicb *cqicb)
 {
-       printk(KERN_ERR PFX "Dumping cqicb stuff...\n");
+       pr_err("Dumping cqicb stuff...\n");
 
-       printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
-       printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
-       printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
-       printk(KERN_ERR PFX "cqicb->addr = 0x%llx.\n",
+       pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
+       pr_err("cqicb->flags = %x\n", cqicb->flags);
+       pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
+       pr_err("cqicb->addr = 0x%llx\n",
               (unsigned long long) le64_to_cpu(cqicb->addr));
-       printk(KERN_ERR PFX "cqicb->prod_idx_addr = 0x%llx.\n",
+       pr_err("cqicb->prod_idx_addr = 0x%llx\n",
               (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
-       printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
+       pr_err("cqicb->pkt_delay = 0x%.04x\n",
               le16_to_cpu(cqicb->pkt_delay));
-       printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
+       pr_err("cqicb->irq_delay = 0x%.04x\n",
               le16_to_cpu(cqicb->irq_delay));
-       printk(KERN_ERR PFX "cqicb->lbq_addr = 0x%llx.\n",
+       pr_err("cqicb->lbq_addr = 0x%llx\n",
               (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
-       printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
+       pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
               le16_to_cpu(cqicb->lbq_buf_size));
-       printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
+       pr_err("cqicb->lbq_len = 0x%.04x\n",
               le16_to_cpu(cqicb->lbq_len));
-       printk(KERN_ERR PFX "cqicb->sbq_addr = 0x%llx.\n",
+       pr_err("cqicb->sbq_addr = 0x%llx\n",
               (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
-       printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
+       pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
               le16_to_cpu(cqicb->sbq_buf_size));
-       printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
+       pr_err("cqicb->sbq_len = 0x%.04x\n",
               le16_to_cpu(cqicb->sbq_len));
 }
 
@@ -1888,100 +1733,85 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
 {
        if (rx_ring == NULL)
                return;
-       printk(KERN_ERR PFX
-              "===================== Dumping rx_ring %d ===============.\n",
+       pr_err("===================== Dumping rx_ring %d ===============\n",
               rx_ring->cq_id);
-       printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n",
+       pr_err("Dumping rx_ring %d, type = %s%s%s\n",
               rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
               rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
               rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
-       printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb);
-       printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base);
-       printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n",
+       pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
+       pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
+       pr_err("rx_ring->cq_base_dma = %llx\n",
               (unsigned long long) rx_ring->cq_base_dma);
-       printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
-       printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
-       printk(KERN_ERR PFX
-              "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d.\n",
+       pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
+       pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
+       pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
               rx_ring->prod_idx_sh_reg,
               rx_ring->prod_idx_sh_reg
                        ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
-       printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
+       pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
               (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
-       printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
+       pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
               rx_ring->cnsmr_idx_db_reg);
-       printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx);
-       printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry);
-       printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n",
-              rx_ring->valid_db_reg);
+       pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
+       pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
+       pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
 
-       printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base);
-       printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n",
+       pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
+       pr_err("rx_ring->lbq_base_dma = %llx\n",
               (unsigned long long) rx_ring->lbq_base_dma);
-       printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n",
+       pr_err("rx_ring->lbq_base_indirect = %p\n",
               rx_ring->lbq_base_indirect);
-       printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n",
+       pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
               (unsigned long long) rx_ring->lbq_base_indirect_dma);
-       printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq);
-       printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len);
-       printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size);
-       printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n",
+       pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
+       pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
+       pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
+       pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
               rx_ring->lbq_prod_idx_db_reg);
-       printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n",
-              rx_ring->lbq_prod_idx);
-       printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n",
-              rx_ring->lbq_curr_idx);
-       printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n",
-              rx_ring->lbq_clean_idx);
-       printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n",
-              rx_ring->lbq_free_cnt);
-       printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n",
-              rx_ring->lbq_buf_size);
-
-       printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base);
-       printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n",
+       pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
+       pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
+       pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
+       pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
+       pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
+
+       pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
+       pr_err("rx_ring->sbq_base_dma = %llx\n",
               (unsigned long long) rx_ring->sbq_base_dma);
-       printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n",
+       pr_err("rx_ring->sbq_base_indirect = %p\n",
               rx_ring->sbq_base_indirect);
-       printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n",
+       pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
               (unsigned long long) rx_ring->sbq_base_indirect_dma);
-       printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq);
-       printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len);
-       printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size);
-       printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n",
+       pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
+       pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
+       pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
+       pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
               rx_ring->sbq_prod_idx_db_reg);
-       printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n",
-              rx_ring->sbq_prod_idx);
-       printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n",
-              rx_ring->sbq_curr_idx);
-       printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n",
-              rx_ring->sbq_clean_idx);
-       printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n",
-              rx_ring->sbq_free_cnt);
-       printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n",
-              rx_ring->sbq_buf_size);
-       printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id);
-       printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq);
-       printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu);
-       printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev);
+       pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
+       pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
+       pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
+       pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
+       pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
+       pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
+       pr_err("rx_ring->irq = %d\n", rx_ring->irq);
+       pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
+       pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
 }
 
 void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
 {
        void *ptr;
 
-       printk(KERN_ERR PFX "%s: Enter.\n", __func__);
+       pr_err("%s: Enter\n", __func__);
 
        ptr = kmalloc(size, GFP_ATOMIC);
        if (ptr == NULL) {
-               printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n",
-                      __func__);
+               pr_err("%s: Couldn't allocate a buffer\n", __func__);
                return;
        }
 
        if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
-               printk(KERN_ERR "%s: Failed to upload control block!\n",
-                      __func__);
+               pr_err("%s: Failed to upload control block!\n", __func__);
                goto fail_it;
        }
        switch (bit) {
@@ -1995,8 +1825,7 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
                ql_dump_ricb((struct ricb *)ptr);
                break;
        default:
-               printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n",
-                      __func__, bit);
+               pr_err("%s: Invalid bit value = %x\n", __func__, bit);
                break;
        }
 fail_it:
@@ -2007,27 +1836,27 @@ fail_it:
 #ifdef QL_OB_DUMP
 void ql_dump_tx_desc(struct tx_buf_desc *tbd)
 {
-       printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+       pr_err("tbd->addr  = 0x%llx\n",
               le64_to_cpu((u64) tbd->addr));
-       printk(KERN_ERR PFX "tbd->len   = %d\n",
+       pr_err("tbd->len   = %d\n",
               le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
-       printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+       pr_err("tbd->flags = %s %s\n",
               tbd->len & TX_DESC_C ? "C" : ".",
               tbd->len & TX_DESC_E ? "E" : ".");
        tbd++;
-       printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+       pr_err("tbd->addr  = 0x%llx\n",
               le64_to_cpu((u64) tbd->addr));
-       printk(KERN_ERR PFX "tbd->len   = %d\n",
+       pr_err("tbd->len   = %d\n",
               le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
-       printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+       pr_err("tbd->flags = %s %s\n",
               tbd->len & TX_DESC_C ? "C" : ".",
               tbd->len & TX_DESC_E ? "E" : ".");
        tbd++;
-       printk(KERN_ERR PFX "tbd->addr  = 0x%llx\n",
+       pr_err("tbd->addr  = 0x%llx\n",
               le64_to_cpu((u64) tbd->addr));
-       printk(KERN_ERR PFX "tbd->len   = %d\n",
+       pr_err("tbd->len   = %d\n",
               le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
-       printk(KERN_ERR PFX "tbd->flags = %s %s\n",
+       pr_err("tbd->flags = %s %s\n",
               tbd->len & TX_DESC_C ? "C" : ".",
               tbd->len & TX_DESC_E ? "E" : ".");
 
@@ -2040,38 +1869,38 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
        struct tx_buf_desc *tbd;
        u16 frame_len;
 
-       printk(KERN_ERR PFX "%s\n", __func__);
-       printk(KERN_ERR PFX "opcode         = %s\n",
+       pr_err("%s\n", __func__);
+       pr_err("opcode         = %s\n",
               (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
-       printk(KERN_ERR PFX "flags1          = %s %s %s %s %s\n",
+       pr_err("flags1          = %s %s %s %s %s\n",
               ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
               ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
               ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
               ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
               ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
-       printk(KERN_ERR PFX "flags2          = %s %s %s\n",
+       pr_err("flags2          = %s %s %s\n",
               ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
               ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
               ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
-       printk(KERN_ERR PFX "flags3          = %s %s %s\n",
+       pr_err("flags3          = %s %s %s\n",
               ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
               ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
               ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
-       printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid);
-       printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx);
-       printk(KERN_ERR PFX "vlan_tci      = %x\n", ob_mac_tso_iocb->vlan_tci);
+       pr_err("tid = %x\n", ob_mac_iocb->tid);
+       pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
+       pr_err("vlan_tci      = %x\n", ob_mac_tso_iocb->vlan_tci);
        if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
-               printk(KERN_ERR PFX "frame_len      = %d\n",
+               pr_err("frame_len      = %d\n",
                       le32_to_cpu(ob_mac_tso_iocb->frame_len));
-               printk(KERN_ERR PFX "mss      = %d\n",
+               pr_err("mss      = %d\n",
                       le16_to_cpu(ob_mac_tso_iocb->mss));
-               printk(KERN_ERR PFX "prot_hdr_len   = %d\n",
+               pr_err("prot_hdr_len   = %d\n",
                       le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
-               printk(KERN_ERR PFX "hdr_offset     = 0x%.04x\n",
+               pr_err("hdr_offset     = 0x%.04x\n",
                       le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
                frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
        } else {
-               printk(KERN_ERR PFX "frame_len      = %d\n",
+               pr_err("frame_len      = %d\n",
                       le16_to_cpu(ob_mac_iocb->frame_len));
                frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
        }
@@ -2081,9 +1910,9 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
 
 void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
 {
-       printk(KERN_ERR PFX "%s\n", __func__);
-       printk(KERN_ERR PFX "opcode         = %d\n", ob_mac_rsp->opcode);
-       printk(KERN_ERR PFX "flags          = %s %s %s %s %s %s %s\n",
+       pr_err("%s\n", __func__);
+       pr_err("opcode         = %d\n", ob_mac_rsp->opcode);
+       pr_err("flags          = %s %s %s %s %s %s %s\n",
               ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
               ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
               ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
@@ -2091,16 +1920,16 @@ void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
               ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
               ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
               ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
-       printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid);
+       pr_err("tid = %x\n", ob_mac_rsp->tid);
 }
 #endif
 
 #ifdef QL_IB_DUMP
 void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
 {
-       printk(KERN_ERR PFX "%s\n", __func__);
-       printk(KERN_ERR PFX "opcode         = 0x%x\n", ib_mac_rsp->opcode);
-       printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n",
+       pr_err("%s\n", __func__);
+       pr_err("opcode         = 0x%x\n", ib_mac_rsp->opcode);
+       pr_err("flags1 = %s%s%s%s%s%s\n",
               ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
               ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
               ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
@@ -2109,7 +1938,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
               ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
 
        if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
-               printk(KERN_ERR PFX "%s%s%s Multicast.\n",
+               pr_err("%s%s%s Multicast\n",
                       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
                       IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
                       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
@@ -2117,7 +1946,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
                       (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
                       IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
 
-       printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n",
+       pr_err("flags2 = %s%s%s%s%s\n",
               (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
               (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
               (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
@@ -2125,7 +1954,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
               (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
 
        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
-               printk(KERN_ERR PFX "%s%s%s%s%s error.\n",
+               pr_err("%s%s%s%s%s error\n",
                       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
                       IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
                       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
@@ -2137,12 +1966,12 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
                       (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
                       IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
 
-       printk(KERN_ERR PFX "flags3 = %s%s.\n",
+       pr_err("flags3 = %s%s\n",
               ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
               ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
 
        if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
-               printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n",
+               pr_err("RSS flags = %s%s%s%s\n",
                       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
                        IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
                       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
@@ -2152,26 +1981,26 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
                       ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
                        IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
 
-       printk(KERN_ERR PFX "data_len   = %d\n",
+       pr_err("data_len        = %d\n",
               le32_to_cpu(ib_mac_rsp->data_len));
-       printk(KERN_ERR PFX "data_addr    = 0x%llx\n",
+       pr_err("data_addr    = 0x%llx\n",
               (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
        if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
-               printk(KERN_ERR PFX "rss    = %x\n",
+               pr_err("rss    = %x\n",
                       le32_to_cpu(ib_mac_rsp->rss));
        if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
-               printk(KERN_ERR PFX "vlan_id    = %x\n",
+               pr_err("vlan_id    = %x\n",
                       le16_to_cpu(ib_mac_rsp->vlan_id));
 
-       printk(KERN_ERR PFX "flags4 = %s%s%s.\n",
+       pr_err("flags4 = %s%s%s\n",
                ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
                ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
                ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
 
        if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
-               printk(KERN_ERR PFX "hdr length = %d.\n",
+               pr_err("hdr length      = %d\n",
                       le32_to_cpu(ib_mac_rsp->hdr_len));
-               printk(KERN_ERR PFX "hdr addr    = 0x%llx.\n",
+               pr_err("hdr addr    = 0x%llx\n",
                       (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
        }
 }
index 7d482a2316acf65c922f6696400ad2db17f4090d..142c381e1d73e0644eac3df74fea212385ad1115 100644 (file)
@@ -510,7 +510,7 @@ static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        if (!lp->phydev)
                return -EINVAL;
 
-       return phy_mii_ioctl(lp->phydev, if_mii(rq), cmd);
+       return phy_mii_ioctl(lp->phydev, rq, cmd);
 }
 
 static int r6040_rx(struct net_device *dev, int limit)
index b8b85843c614f61126e3255940ecd2362ad2cb18..18bc5b718bbb51875dd5e2fbf14be6f626d0fa88 100644 (file)
@@ -5796,7 +5796,7 @@ static void s2io_vpd_read(struct s2io_nic *nic)
 {
        u8 *vpd_data;
        u8 data;
-       int i = 0, cnt, fail = 0;
+       int i = 0, cnt, len, fail = 0;
        int vpd_addr = 0x80;
        struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
 
@@ -5837,20 +5837,28 @@ static void s2io_vpd_read(struct s2io_nic *nic)
 
        if (!fail) {
                /* read serial number of adapter */
-               for (cnt = 0; cnt < 256; cnt++) {
+               for (cnt = 0; cnt < 252; cnt++) {
                        if ((vpd_data[cnt] == 'S') &&
-                           (vpd_data[cnt+1] == 'N') &&
-                           (vpd_data[cnt+2] < VPD_STRING_LEN)) {
-                               memset(nic->serial_num, 0, VPD_STRING_LEN);
-                               memcpy(nic->serial_num, &vpd_data[cnt + 3],
-                                      vpd_data[cnt+2]);
-                               break;
+                           (vpd_data[cnt+1] == 'N')) {
+                               len = vpd_data[cnt+2];
+                               if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
+                                       memcpy(nic->serial_num,
+                                              &vpd_data[cnt + 3],
+                                              len);
+                                       memset(nic->serial_num+len,
+                                              0,
+                                              VPD_STRING_LEN-len);
+                                       break;
+                               }
                        }
                }
        }
 
-       if ((!fail) && (vpd_data[1] < VPD_STRING_LEN))
-               memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
+       if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
+               len = vpd_data[1];
+               memcpy(nic->product_name, &vpd_data[3], len);
+               nic->product_name[len] = 0;
+       }
        kfree(vpd_data);
        swstats->mem_freed += 256;
 }
index 3645fb3673db6c3637f36bd28bf7b7cbc3c55c6d..0af0335339053f8512280077d66b0ab4f41a2941 100644 (file)
@@ -65,7 +65,7 @@ static int debug_level = ERR_DBG;
 
 /* DEBUG message print. */
 #define DBG_PRINT(dbg_level, fmt, args...) do {                        \
-       if (dbg_level >= debug_level)                           \
+       if (dbg_level <= debug_level)                           \
                pr_info(fmt, ##args);                           \
        } while (0)
 
index c762c6ac055b119870977e644fdca2d80642a6ae..194e5cf8c763d8055e988ea14d1c5c43a8819029 100644 (file)
@@ -79,7 +79,7 @@
 
 #define SKY2_EEPROM_MAGIC      0x9955aabb
 
-#define RING_NEXT(x,s) (((x)+1) & ((s)-1))
+#define RING_NEXT(x, s)        (((x)+1) & ((s)-1))
 
 static const u32 default_msg =
     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
@@ -172,7 +172,7 @@ static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
                udelay(10);
        }
 
-       dev_warn(&hw->pdev->dev,"%s: phy write timeout\n", hw->dev[port]->name);
+       dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name);
        return -ETIMEDOUT;
 
 io_error:
@@ -1067,7 +1067,7 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
        return le;
 }
 
-static unsigned sky2_get_rx_threshold(struct sky2_portsky2)
+static unsigned sky2_get_rx_threshold(struct sky2_port *sky2)
 {
        unsigned size;
 
@@ -1078,7 +1078,7 @@ static unsigned sky2_get_rx_threshold(struct sky2_port* sky2)
        return (size - 8) / sizeof(u32);
 }
 
-static unsigned sky2_get_rx_data_size(struct sky2_portsky2)
+static unsigned sky2_get_rx_data_size(struct sky2_port *sky2)
 {
        struct rx_ring_info *re;
        unsigned size;
@@ -1102,7 +1102,7 @@ static unsigned sky2_get_rx_data_size(struct sky2_port* sky2)
 }
 
 /* Build description to hardware for one receive segment */
-static void sky2_rx_add(struct sky2_port *sky2,  u8 op,
+static void sky2_rx_add(struct sky2_port *sky2, u8 op,
                        dma_addr_t map, unsigned len)
 {
        struct sky2_rx_le *le;
@@ -3014,7 +3014,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
        hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
        hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
 
-       switch(hw->chip_id) {
+       switch (hw->chip_id) {
        case CHIP_ID_YUKON_XL:
                hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
                if (hw->chip_rev < CHIP_REV_YU_XL_A2)
@@ -3685,7 +3685,7 @@ static int sky2_set_mac_address(struct net_device *dev, void *p)
        return 0;
 }
 
-static void inline sky2_add_filter(u8 filter[8], const u8 *addr)
+static inline void sky2_add_filter(u8 filter[8], const u8 *addr)
 {
        u32 bit;
 
@@ -3911,7 +3911,7 @@ static int sky2_set_coalesce(struct net_device *dev,
                return -EINVAL;
        if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
                return -EINVAL;
-       if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING)
+       if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING)
                return -EINVAL;
 
        if (ecmd->tx_coalesce_usecs == 0)
@@ -4372,7 +4372,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
                        seq_printf(seq, "%u:", idx);
                sop = 0;
 
-               switch(le->opcode & ~HW_OWNER) {
+               switch (le->opcode & ~HW_OWNER) {
                case OP_ADDR64:
                        seq_printf(seq, " %#x:", a);
                        break;
@@ -4441,7 +4441,7 @@ static int sky2_device_event(struct notifier_block *unused,
        if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug)
                return NOTIFY_DONE;
 
-       switch(event) {
+       switch (event) {
        case NETDEV_CHANGENAME:
                if (sky2->debugfs) {
                        sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
@@ -4636,7 +4636,7 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
        struct pci_dev *pdev = hw->pdev;
        int err;
 
-       init_waitqueue_head (&hw->msi_wait);
+       init_waitqueue_head(&hw->msi_wait);
 
        sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
 
@@ -4753,7 +4753,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
         * this driver uses software swapping.
         */
        reg &= ~PCI_REV_DESC;
-       err = pci_write_config_dword(pdev,PCI_DEV_REG2, reg);
+       err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
        if (err) {
                dev_err(&pdev->dev, "PCI write config failed\n");
                goto err_out_free_regions;
index 144f76fd3e39c71cd7870acb01a5db211f99a860..66b9da0260fe70b6177c6980604f24184dcdb431 100644 (file)
@@ -108,6 +108,7 @@ enum rx_frame_status { /* IPC status */
        good_frame = 0,
        discard_frame = 1,
        csum_none = 2,
+       llc_snap = 4,
 };
 
 enum tx_dma_irq_status {
index d8d0f3553770333145ca016a0277cc8b526f99fa..8b20b19971cbe6e0156ce5bee9edc96f73f06785 100644 (file)
@@ -93,7 +93,7 @@ enum inter_frame_gap {
 #define GMAC_CONTROL_IPC       0x00000400 /* Checksum Offload */
 #define GMAC_CONTROL_DR                0x00000200 /* Disable Retry */
 #define GMAC_CONTROL_LUD       0x00000100 /* Link up/down */
-#define GMAC_CONTROL_ACS       0x00000080 /* Automatic Pad Stripping */
+#define GMAC_CONTROL_ACS       0x00000080 /* Automatic Pad/FCS Stripping */
 #define GMAC_CONTROL_DC                0x00000010 /* Deferral Check */
 #define GMAC_CONTROL_TE                0x00000008 /* Transmitter Enable */
 #define GMAC_CONTROL_RE                0x00000004 /* Receiver Enable */
index 917b4e16923b800b550fee4502fae7239d67657b..2b2f5c8caf1c52ea010843789417bfb40768366b 100644 (file)
@@ -220,6 +220,8 @@ struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
                ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
 
        mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+       if (!mac)
+               return NULL;
 
        mac->mac = &dwmac1000_ops;
        mac->dma = &dwmac1000_dma_ops;
index 6f270a0e151afa8974356920ea72584293354e5a..2fb165fa2ba075b256533a7d1058ea00c36f2b33 100644 (file)
@@ -179,6 +179,8 @@ struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
        struct mac_device_info *mac;
 
        mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+       if (!mac)
+               return NULL;
 
        pr_info("\tDWMAC100\n");
 
index 3c18ebece043a303ee2eb3e3e7ec684aae1bac9b..f612f986a7e16b186ba5c5c1aafcc4ae98fa2313 100644 (file)
@@ -123,7 +123,7 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
         */
        if (status == 0x0) {
                CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
-               ret = good_frame;
+               ret = llc_snap;
        } else if (status == 0x4) {
                CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
                ret = good_frame;
index acf06168694035253c91d584c48c45bde654138f..bbb7951b9c4c34bb3992deed9c7a8e84ff52f68e 100644 (file)
@@ -829,7 +829,6 @@ static int stmmac_open(struct net_device *dev)
         * In case of failure continue without timer. */
        if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
                pr_warning("stmmaceth: cannot attach the external timer.\n");
-               tmrate = 0;
                priv->tm->freq = 0;
                priv->tm->timer_start = stmmac_no_timer_started;
                priv->tm->timer_stop = stmmac_no_timer_stopped;
@@ -1217,9 +1216,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                        priv->dev->stats.rx_errors++;
                else {
                        struct sk_buff *skb;
-                       /* Length should omit the CRC */
-                       int frame_len = priv->hw->desc->get_rx_frame_len(p) - 4;
+                       int frame_len;
 
+                       frame_len = priv->hw->desc->get_rx_frame_len(p);
+                       /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+                        * Type frames (LLC/LLC-SNAP) */
+                       if (unlikely(status != llc_snap))
+                               frame_len -= ETH_FCS_LEN;
 #ifdef STMMAC_RX_DEBUG
                        if (frame_len > ETH_FRAME_LEN)
                                pr_debug("\tRX frame size %d, COE status: %d\n",
@@ -1558,15 +1561,15 @@ static int stmmac_mac_device_setup(struct net_device *dev)
        else
                device = dwmac100_setup(ioaddr);
 
+       if (!device)
+               return -ENOMEM;
+
        if (priv->enh_desc) {
                device->desc = &enh_desc_ops;
                pr_info("\tEnhanced descriptor structure\n");
        } else
                device->desc = &ndesc_ops;
 
-       if (!device)
-               return -ENOMEM;
-
        priv->hw = device;
 
        priv->wolenabled = priv->hw->pmt;       /* PMT supported */
index b26a577829390003fd1af2b80923624dec482e8d..bc3af78a869ff52881077b89cf6e5e544b6e8a91 100644 (file)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    112
+#define TG3_MIN_NUM                    113
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "July 11, 2010"
+#define DRV_MODULE_RELDATE     "August 2, 2010"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
@@ -221,12 +221,9 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
-       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
-       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
-       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
@@ -882,7 +879,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
        unsigned int loops;
        int ret;
 
-       if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
+       if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
            (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
                return 0;
 
@@ -1178,7 +1175,7 @@ static int tg3_mdio_init(struct tg3 *tp)
        case PHY_ID_BCMAC131:
                phydev->interface = PHY_INTERFACE_MODE_MII;
                phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
-               tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
+               tp->phy_flags |= TG3_PHYFLG_IS_FET;
                break;
        }
 
@@ -1271,7 +1268,7 @@ static void tg3_ump_link_report(struct tg3 *tp)
        tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
 
        val = 0;
-       if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
+       if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
                if (!tg3_readphy(tp, MII_CTRL1000, &reg))
                        val = reg << 16;
                if (!tg3_readphy(tp, MII_STAT1000, &reg))
@@ -1379,7 +1376,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
 
        if (autoneg == AUTONEG_ENABLE &&
            (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
-               if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
+               if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
                        flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
                else
                        flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
@@ -1493,7 +1490,7 @@ static int tg3_phy_init(struct tg3 *tp)
 {
        struct phy_device *phydev;
 
-       if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
+       if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
                return 0;
 
        /* Bring the PHY back to a known state. */
@@ -1513,7 +1510,7 @@ static int tg3_phy_init(struct tg3 *tp)
        switch (phydev->interface) {
        case PHY_INTERFACE_MODE_GMII:
        case PHY_INTERFACE_MODE_RGMII:
-               if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
+               if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
                        phydev->supported &= (PHY_GBIT_FEATURES |
                                              SUPPORTED_Pause |
                                              SUPPORTED_Asym_Pause);
@@ -1530,7 +1527,7 @@ static int tg3_phy_init(struct tg3 *tp)
                return -EINVAL;
        }
 
-       tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
+       tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
 
        phydev->advertising = phydev->supported;
 
@@ -1541,13 +1538,13 @@ static void tg3_phy_start(struct tg3 *tp)
 {
        struct phy_device *phydev;
 
-       if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
+       if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                return;
 
        phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
-       if (tp->link_config.phy_is_low_power) {
-               tp->link_config.phy_is_low_power = 0;
+       if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+               tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
                phydev->speed = tp->link_config.orig_speed;
                phydev->duplex = tp->link_config.orig_duplex;
                phydev->autoneg = tp->link_config.orig_autoneg;
@@ -1561,7 +1558,7 @@ static void tg3_phy_start(struct tg3 *tp)
 
 static void tg3_phy_stop(struct tg3 *tp)
 {
-       if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
+       if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                return;
 
        phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
@@ -1569,16 +1566,21 @@ static void tg3_phy_stop(struct tg3 *tp)
 
 static void tg3_phy_fini(struct tg3 *tp)
 {
-       if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
+       if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
                phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
-               tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
+               tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
        }
 }
 
-static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
+static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
 {
-       tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
-       tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
+       int err;
+
+       err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+       if (!err)
+               err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
+
+       return err;
 }
 
 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
@@ -1608,10 +1610,10 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
            ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
-            (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
+            (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
                return;
 
-       if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+       if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
                tg3_phy_fet_toggle_apd(tp, enable);
                return;
        }
@@ -1642,10 +1644,10 @@ static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
        u32 phy;
 
        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
-           (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
+           (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
                return;
 
-       if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+       if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
                u32 ephy;
 
                if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
@@ -1681,7 +1683,7 @@ static void tg3_phy_set_wirespeed(struct tg3 *tp)
 {
        u32 val;
 
-       if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
+       if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
                return;
 
        if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
@@ -1740,7 +1742,7 @@ static int tg3_wait_macro_done(struct tg3 *tp)
        while (limit--) {
                u32 tmp32;
 
-               if (!tg3_readphy(tp, 0x16, &tmp32)) {
+               if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
                        if ((tmp32 & 0x1000) == 0)
                                break;
                }
@@ -1766,13 +1768,13 @@ static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
 
                tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
                             (chan * 0x2000) | 0x0200);
-               tg3_writephy(tp, 0x16, 0x0002);
+               tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
 
                for (i = 0; i < 6; i++)
                        tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
                                     test_pat[chan][i]);
 
-               tg3_writephy(tp, 0x16, 0x0202);
+               tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
                if (tg3_wait_macro_done(tp)) {
                        *resetp = 1;
                        return -EBUSY;
@@ -1780,13 +1782,13 @@ static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
 
                tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
                             (chan * 0x2000) | 0x0200);
-               tg3_writephy(tp, 0x16, 0x0082);
+               tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
                if (tg3_wait_macro_done(tp)) {
                        *resetp = 1;
                        return -EBUSY;
                }
 
-               tg3_writephy(tp, 0x16, 0x0802);
+               tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
                if (tg3_wait_macro_done(tp)) {
                        *resetp = 1;
                        return -EBUSY;
@@ -1826,10 +1828,10 @@ static int tg3_phy_reset_chanpat(struct tg3 *tp)
 
                tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
                             (chan * 0x2000) | 0x0200);
-               tg3_writephy(tp, 0x16, 0x0002);
+               tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
                for (i = 0; i < 6; i++)
                        tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
-               tg3_writephy(tp, 0x16, 0x0202);
+               tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
                if (tg3_wait_macro_done(tp))
                        return -EBUSY;
        }
@@ -1875,8 +1877,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
 
                /* Block the PHY control access.  */
-               tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
-               tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
+               tg3_phydsp_write(tp, 0x8005, 0x0800);
 
                err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
                if (!err)
@@ -1887,11 +1888,10 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
        if (err)
                return err;
 
-       tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
-       tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
+       tg3_phydsp_write(tp, 0x8005, 0x0000);
 
        tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
-       tg3_writephy(tp, 0x16, 0x0000);
+       tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
@@ -1984,42 +1984,37 @@ static int tg3_phy_reset(struct tg3 *tp)
 
        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
-           (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
+           (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
                return 0;
 
        tg3_phy_apply_otp(tp);
 
-       if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
+       if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
                tg3_phy_toggle_apd(tp, true);
        else
                tg3_phy_toggle_apd(tp, false);
 
 out:
-       if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
+       if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) {
                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
-               tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
-               tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
-               tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
-               tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
+               tg3_phydsp_write(tp, 0x201f, 0x2aaa);
+               tg3_phydsp_write(tp, 0x000a, 0x0323);
                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
        }
-       if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
-               tg3_writephy(tp, 0x1c, 0x8d68);
-               tg3_writephy(tp, 0x1c, 0x8d68);
+       if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
+               tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+               tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
        }
-       if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
+       if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
-               tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
-               tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
-               tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
-               tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
-               tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
-               tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
+               tg3_phydsp_write(tp, 0x000a, 0x310b);
+               tg3_phydsp_write(tp, 0x201f, 0x9506);
+               tg3_phydsp_write(tp, 0x401f, 0x14e2);
                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
-       } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
+       } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
                tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
-               if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
+               if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
                        tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
                        tg3_writephy(tp, MII_TG3_TEST1,
                                     MII_TG3_TEST1_TRIM_EN | 0x4);
@@ -2204,7 +2199,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
 {
        u32 val;
 
-       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+       if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
                        u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
                        u32 serdes_cfg = tr32(MAC_SERDES_CFG);
@@ -2223,7 +2218,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
                tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
                udelay(40);
                return;
-       } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+       } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
                u32 phytest;
                if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
                        u32 phy;
@@ -2260,7 +2255,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
-            (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
+            (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
                return;
 
        if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
@@ -2563,14 +2558,14 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
 
        if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
                do_low_power = false;
-               if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
-                   !tp->link_config.phy_is_low_power) {
+               if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
+                   !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
                        struct phy_device *phydev;
                        u32 phyid, advertising;
 
                        phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 
-                       tp->link_config.phy_is_low_power = 1;
+                       tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 
                        tp->link_config.orig_speed = phydev->speed;
                        tp->link_config.orig_duplex = phydev->duplex;
@@ -2609,14 +2604,14 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
        } else {
                do_low_power = true;
 
-               if (tp->link_config.phy_is_low_power == 0) {
-                       tp->link_config.phy_is_low_power = 1;
+               if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+                       tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
                        tp->link_config.orig_speed = tp->link_config.speed;
                        tp->link_config.orig_duplex = tp->link_config.duplex;
                        tp->link_config.orig_autoneg = tp->link_config.autoneg;
                }
 
-               if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
+               if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
                        tp->link_config.speed = SPEED_10;
                        tp->link_config.duplex = DUPLEX_HALF;
                        tp->link_config.autoneg = AUTONEG_ENABLE;
@@ -2649,13 +2644,13 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
        if (device_should_wake) {
                u32 mac_mode;
 
-               if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
+               if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
                        if (do_low_power) {
                                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
                                udelay(40);
                        }
 
-                       if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+                       if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
                                mac_mode = MAC_MODE_PORT_MODE_GMII;
                        else
                                mac_mode = MAC_MODE_PORT_MODE_MII;
@@ -2823,7 +2818,7 @@ static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8
                break;
 
        default:
-               if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+               if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
                        *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
                                 SPEED_10;
                        *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
@@ -2841,7 +2836,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
        u32 new_adv;
        int i;
 
-       if (tp->link_config.phy_is_low_power) {
+       if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
                /* Entering low power mode.  Disable gigabit and
                 * 100baseT advertisements.
                 */
@@ -2854,7 +2849,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
 
                tg3_writephy(tp, MII_ADVERTISE, new_adv);
        } else if (tp->link_config.speed == SPEED_INVALID) {
-               if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
+               if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
                        tp->link_config.advertising &=
                                ~(ADVERTISED_1000baseT_Half |
                                  ADVERTISED_1000baseT_Full);
@@ -2880,7 +2875,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
                                new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
                        if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
                                new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
-                       if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
+                       if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
                            (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
                                new_adv |= (MII_TG3_CTRL_AS_MASTER |
@@ -2982,20 +2977,11 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
        /* Set Extended packet length bit */
        err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
 
-       err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
-       err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
-
-       err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
-       err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
-
-       err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
-       err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
-
-       err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
-       err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
-
-       err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
-       err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
+       err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
+       err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
+       err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
+       err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
+       err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
 
        udelay(40);
 
@@ -3020,7 +3006,7 @@ static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
 
        if ((adv_reg & all_mask) != all_mask)
                return 0;
-       if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
+       if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
                u32 tg3_ctrl;
 
                all_mask = 0;
@@ -3148,18 +3134,18 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
                   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
                /* 5701 {A0,B0} CRC bug workaround */
                tg3_writephy(tp, 0x15, 0x0a75);
-               tg3_writephy(tp, 0x1c, 0x8c68);
-               tg3_writephy(tp, 0x1c, 0x8d68);
-               tg3_writephy(tp, 0x1c, 0x8c68);
+               tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
+               tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+               tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
        }
 
        /* Clear pending interrupts... */
        tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
        tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
 
-       if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
+       if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
                tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
-       else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
+       else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
                tg3_writephy(tp, MII_TG3_IMASK, ~0);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
@@ -3175,7 +3161,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
        current_speed = SPEED_INVALID;
        current_duplex = DUPLEX_INVALID;
 
-       if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
+       if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
                u32 val;
 
                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
@@ -3251,7 +3237,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
        }
 
 relink:
-       if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
+       if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
                u32 tmp;
 
                tg3_phy_copper_begin(tp);
@@ -3269,7 +3255,7 @@ relink:
                        tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
                else
                        tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
-       } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
+       } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
                tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
        else
                tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
@@ -3820,7 +3806,7 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
                expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
 
        if (sg_dig_ctrl != expected_sg_dig_ctrl) {
-               if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
+               if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
                    tp->serdes_counter &&
                    ((mac_status & (MAC_STATUS_PCS_SYNCED |
                                    MAC_STATUS_RCVD_CFG)) ==
@@ -3837,7 +3823,7 @@ restart_autoneg:
                tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
 
                tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
-               tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+               tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
        } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
                                 MAC_STATUS_SIGNAL_DET)) {
                sg_dig_status = tr32(SG_DIG_STATUS);
@@ -3860,7 +3846,7 @@ restart_autoneg:
                        tg3_setup_flow_control(tp, local_adv, remote_adv);
                        current_link_up = 1;
                        tp->serdes_counter = 0;
-                       tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+                       tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
                } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
                        if (tp->serdes_counter)
                                tp->serdes_counter--;
@@ -3887,8 +3873,8 @@ restart_autoneg:
                                    !(mac_status & MAC_STATUS_RCVD_CFG)) {
                                        tg3_setup_flow_control(tp, 0, 0);
                                        current_link_up = 1;
-                                       tp->tg3_flags2 |=
-                                               TG3_FLG2_PARALLEL_DETECT;
+                                       tp->phy_flags |=
+                                               TG3_PHYFLG_PARALLEL_DETECT;
                                        tp->serdes_counter =
                                                SERDES_PARALLEL_DET_TIMEOUT;
                                } else
@@ -3897,7 +3883,7 @@ restart_autoneg:
                }
        } else {
                tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
-               tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+               tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
        }
 
 out:
@@ -4114,7 +4100,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
        err |= tg3_readphy(tp, MII_BMCR, &bmcr);
 
        if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
-           (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
+           (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
                /* do nothing, just check for link up at the end */
        } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
                u32 adv, new_adv;
@@ -4139,7 +4125,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
 
                        tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
                        tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
-                       tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+                       tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 
                        return err;
                }
@@ -4184,7 +4170,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
                                else
                                        bmsr &= ~BMSR_LSTATUS;
                        }
-                       tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+                       tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
                }
        }
 
@@ -4239,7 +4225,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
                        netif_carrier_on(tp->dev);
                else {
                        netif_carrier_off(tp->dev);
-                       tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+                       tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
                }
                tg3_link_report(tp);
        }
@@ -4263,13 +4249,14 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
                        u32 phy1, phy2;
 
                        /* Select shadow register 0x1f */
-                       tg3_writephy(tp, 0x1c, 0x7c00);
-                       tg3_readphy(tp, 0x1c, &phy1);
+                       tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
+                       tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
 
                        /* Select expansion interrupt status register */
-                       tg3_writephy(tp, 0x17, 0x0f01);
-                       tg3_readphy(tp, 0x15, &phy2);
-                       tg3_readphy(tp, 0x15, &phy2);
+                       tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+                                        MII_TG3_DSP_EXP1_INT_STAT);
+                       tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
+                       tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 
                        if ((phy1 & 0x10) && !(phy2 & 0x20)) {
                                /* We have signal detect and not receiving
@@ -4280,17 +4267,18 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
                                bmcr &= ~BMCR_ANENABLE;
                                bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
                                tg3_writephy(tp, MII_BMCR, bmcr);
-                               tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
+                               tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
                        }
                }
        } else if (netif_carrier_ok(tp->dev) &&
                   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
-                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
+                  (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
                u32 phy2;
 
                /* Select expansion interrupt status register */
-               tg3_writephy(tp, 0x17, 0x0f01);
-               tg3_readphy(tp, 0x15, &phy2);
+               tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+                                MII_TG3_DSP_EXP1_INT_STAT);
+               tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
                if (phy2 & 0x20) {
                        u32 bmcr;
 
@@ -4298,7 +4286,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
                        tg3_readphy(tp, MII_BMCR, &bmcr);
                        tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
 
-                       tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+                       tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 
                }
        }
@@ -4308,9 +4296,9 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
 {
        int err;
 
-       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+       if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
                err = tg3_setup_fiber_phy(tp, force_reset);
-       else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+       else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
                err = tg3_setup_fiber_mii_phy(tp, force_reset);
        else
                err = tg3_setup_copper_phy(tp, force_reset);
@@ -4389,7 +4377,8 @@ static void tg3_tx_recover(struct tg3 *tp)
 
 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
 {
-       smp_mb();
+       /* Tell compiler to fetch tx indices from memory. */
+       barrier();
        return tnapi->tx_pending -
               ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
 }
@@ -5673,6 +5662,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
        tnapi->tx_prod = entry;
        if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
                netif_tx_stop_queue(txq);
+
+               /* netif_tx_stop_queue() must be done before checking
+                * checking tx index in tg3_tx_avail() below, because in
+                * tg3_tx(), we update tx index before checking for
+                * netif_tx_queue_stopped().
+                */
+               smp_mb();
                if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
                        netif_tx_wake_queue(txq);
        }
@@ -5718,6 +5714,13 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
        /* Estimate the number of fragments in the worst case */
        if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
                netif_stop_queue(tp->dev);
+
+               /* netif_tx_stop_queue() must be done before checking
+                * checking tx index in tg3_tx_avail() below, because in
+                * tg3_tx(), we update tx index before checking for
+                * netif_tx_queue_stopped().
+                */
+               smp_mb();
                if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
                        return NETDEV_TX_BUSY;
 
@@ -5953,6 +5956,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
        tnapi->tx_prod = entry;
        if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
                netif_tx_stop_queue(txq);
+
+               /* netif_tx_stop_queue() must be done before checking
+                * checking tx index in tg3_tx_avail() below, because in
+                * tg3_tx(), we update tx index before checking for
+                * netif_tx_queue_stopped().
+                */
+               smp_mb();
                if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
                        netif_tx_wake_queue(txq);
        }
@@ -6929,9 +6939,13 @@ static int tg3_chip_reset(struct tg3 *tp)
        val = GRC_MISC_CFG_CORECLK_RESET;
 
        if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
-               if (tr32(0x7e2c) == 0x60) {
-                       tw32(0x7e2c, 0x20);
-               }
+               /* Force PCIe 1.0a mode */
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+                   !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
+                   tr32(TG3_PCIE_PHY_TSTCTL) ==
+                   (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
+                       tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
+
                if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
                        tw32(GRC_MISC_CFG, (1 << 29));
                        val |= (1 << 29);
@@ -6944,8 +6958,11 @@ static int tg3_chip_reset(struct tg3 *tp)
                     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
        }
 
-       if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+       /* Manage gphy power for all CPMU absent PCIe devices. */
+       if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
+           !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
                val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
+
        tw32(GRC_MISC_CFG, val);
 
        /* restore 5701 hardware bug workaround write method */
@@ -7002,8 +7019,7 @@ static int tg3_chip_reset(struct tg3 *tp)
                 * Older PCIe devices only support the 128 byte
                 * MPS setting.  Enforce the restriction.
                 */
-               if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
-                   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
+               if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
                        val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
                pci_write_config_word(tp->pdev,
                                      tp->pcie_cap + PCI_EXP_DEVCTL,
@@ -7050,10 +7066,10 @@ static int tg3_chip_reset(struct tg3 *tp)
                tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
        }
 
-       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+       if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
                tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
                tw32_f(MAC_MODE, tp->mac_mode);
-       } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+       } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
                tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
                tw32_f(MAC_MODE, tp->mac_mode);
        } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
@@ -7076,9 +7092,7 @@ static int tg3_chip_reset(struct tg3 *tp)
        if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
            tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
+           !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
                val = tr32(0x7c00);
 
                tw32(0x7c00, val | (1 << 25));
@@ -7751,9 +7765,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        if (err)
                return err;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+       if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
                val = tr32(TG3PCI_DMA_RW_CTRL) &
                      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
                if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
@@ -7916,9 +7928,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                             BDINFO_FLAGS_DISABLED);
                }
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+               if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
                        val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
                              (TG3_RX_STD_DMA_SZ << 2);
                else
@@ -7935,9 +7945,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                          tp->rx_jumbo_pending : 0;
        tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+       if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
                tw32(STD_REPLENISH_LWM, 32);
                tw32(JMB_REPLENISH_LWM, 16);
        }
@@ -8065,8 +8073,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
                tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
 
-       if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
-               tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
+       if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
+               tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
                /* reset to prevent losing 1st rx packet intermittently */
                tw32_f(MAC_RX_MODE, RX_MODE_RESET);
                udelay(10);
@@ -8079,7 +8087,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
                MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
-           !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
+           !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
                tp->mac_mode |= MAC_MODE_LINK_POLARITY;
        tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
@@ -8264,16 +8272,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        tw32(MAC_LED_CTRL, tp->led_ctrl);
 
        tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
-       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+       if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
                tw32_f(MAC_RX_MODE, RX_MODE_RESET);
                udelay(10);
        }
        tw32_f(MAC_RX_MODE, tp->rx_mode);
        udelay(10);
 
-       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+       if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
                if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
-                       !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
+                       !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
                        /* Set drive transmission level to 1.2V  */
                        /* only if the signal pre-emphasis bit is not set  */
                        val = tr32(MAC_SERDES_CFG);
@@ -8295,12 +8303,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
-           (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
+           (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
                /* Use hardware link auto-negotiation */
                tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
        }
 
-       if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
+       if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
                u32 tmp;
 
@@ -8312,8 +8320,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        }
 
        if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
-               if (tp->link_config.phy_is_low_power) {
-                       tp->link_config.phy_is_low_power = 0;
+               if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+                       tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
                        tp->link_config.speed = tp->link_config.orig_speed;
                        tp->link_config.duplex = tp->link_config.orig_duplex;
                        tp->link_config.autoneg = tp->link_config.orig_autoneg;
@@ -8323,15 +8331,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                if (err)
                        return err;
 
-               if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
-                   !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
+               if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+                   !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
                        u32 tmp;
 
                        /* Clear CRC stats. */
                        if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
                                tg3_writephy(tp, MII_TG3_TEST1,
                                             tmp | MII_TG3_TEST1_CRC_EN);
-                               tg3_readphy(tp, 0x14, &tmp);
+                               tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
                        }
                }
        }
@@ -8499,7 +8507,7 @@ static void tg3_timer(unsigned long __opaque)
                        mac_stat = tr32(MAC_STATUS);
 
                        phy_event = 0;
-                       if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
+                       if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
                                if (mac_stat & MAC_STATUS_MI_INTERRUPT)
                                        phy_event = 1;
                        } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
@@ -8531,7 +8539,7 @@ static void tg3_timer(unsigned long __opaque)
                                }
                                tg3_setup_phy(tp, 0);
                        }
-               } else if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
+               } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
                           (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
                        tg3_serdes_parallel_detect(tp);
                }
@@ -8627,9 +8635,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
         * Turn off MSI one shot mode.  Otherwise this test has no
         * observable way to know whether the interrupt was delivered.
         */
-       if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+       if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
            (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
                val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
                tw32(MSGINT_MODE, val);
@@ -8672,9 +8678,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
 
        if (intr_ok) {
                /* Reenable MSI one shot mode. */
-               if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+               if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
                    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
                        val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
                        tw32(MSGINT_MODE, val);
@@ -8865,7 +8869,7 @@ static void tg3_ints_fini(struct tg3 *tp)
        else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
                pci_disable_msi(tp->pdev);
        tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
-       tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
+       tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
 }
 
 static int tg3_open(struct net_device *dev)
@@ -8969,11 +8973,8 @@ static int tg3_open(struct net_device *dev)
                        goto err_out2;
                }
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
-                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
-                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
-                   (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
-                   (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
+               if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
+                   (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
                        u32 val = tr32(PCIE_TRANSACTION_CFG);
 
                        tw32(PCIE_TRANSACTION_CFG,
@@ -9068,7 +9069,7 @@ static u64 calc_crc_errors(struct tg3 *tp)
 {
        struct tg3_hw_stats *hw_stats = tp->hw_stats;
 
-       if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
+       if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
                u32 val;
@@ -9077,7 +9078,7 @@ static u64 calc_crc_errors(struct tg3 *tp)
                if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
                        tg3_writephy(tp, MII_TG3_TEST1,
                                     val | MII_TG3_TEST1_CRC_EN);
-                       tg3_readphy(tp, 0x14, &val);
+                       tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
                } else
                        val = 0;
                spin_unlock_bh(&tp->lock);
@@ -9367,7 +9368,7 @@ static void tg3_get_regs(struct net_device *dev,
 
        memset(p, 0, TG3_REGDUMP_LEN);
 
-       if (tp->link_config.phy_is_low_power)
+       if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
                return;
 
        tg3_full_lock(tp, 0);
@@ -9446,7 +9447,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
        if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
                return -EINVAL;
 
-       if (tp->link_config.phy_is_low_power)
+       if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
                return -EAGAIN;
 
        offset = eeprom->offset;
@@ -9508,7 +9509,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
        u8 *buf;
        __be32 start, end;
 
-       if (tp->link_config.phy_is_low_power)
+       if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
                return -EAGAIN;
 
        if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
@@ -9565,7 +9566,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
        if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
                struct phy_device *phydev;
-               if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
+               if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
                phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
                return phy_ethtool_gset(phydev, cmd);
@@ -9573,11 +9574,11 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
        cmd->supported = (SUPPORTED_Autoneg);
 
-       if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
+       if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
                cmd->supported |= (SUPPORTED_1000baseT_Half |
                                   SUPPORTED_1000baseT_Full);
 
-       if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
+       if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
                cmd->supported |= (SUPPORTED_100baseT_Half |
                                  SUPPORTED_100baseT_Full |
                                  SUPPORTED_10baseT_Half |
@@ -9608,7 +9609,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
        if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
                struct phy_device *phydev;
-               if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
+               if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
                phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
                return phy_ethtool_sset(phydev, cmd);
@@ -9628,11 +9629,11 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                           ADVERTISED_Pause |
                           ADVERTISED_Asym_Pause;
 
-               if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
+               if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
                        mask |= ADVERTISED_1000baseT_Half |
                                ADVERTISED_1000baseT_Full;
 
-               if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
+               if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
                        mask |= ADVERTISED_100baseT_Half |
                                ADVERTISED_100baseT_Full |
                                ADVERTISED_10baseT_Half |
@@ -9653,7 +9654,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
                cmd->advertising &= mask;
        } else {
-               if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
+               if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
                        if (cmd->speed != SPEED_1000)
                                return -EINVAL;
 
@@ -9789,11 +9790,11 @@ static int tg3_nway_reset(struct net_device *dev)
        if (!netif_running(dev))
                return -EAGAIN;
 
-       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+       if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
                return -EINVAL;
 
        if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
-               if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
+               if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
                r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
        } else {
@@ -9804,7 +9805,7 @@ static int tg3_nway_reset(struct net_device *dev)
                tg3_readphy(tp, MII_BMCR, &bmcr);
                if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
                    ((bmcr & BMCR_ANENABLE) ||
-                    (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
+                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
                        tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
                                                   BMCR_ANENABLE);
                        r = 0;
@@ -9939,7 +9940,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
                else
                        tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
 
-               if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
+               if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
                        u32 oldadv = phydev->advertising &
                                     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
                        if (oldadv != newadv) {
@@ -10268,7 +10269,7 @@ static int tg3_test_link(struct tg3 *tp)
        if (!netif_running(tp->dev))
                return -ENODEV;
 
-       if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
+       if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
                max = TG3_SERDES_TIMEOUT_SEC;
        else
                max = TG3_COPPER_TIMEOUT_SEC;
@@ -10630,7 +10631,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
                           MAC_MODE_PORT_INT_LPBACK;
                if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
                        mac_mode |= MAC_MODE_LINK_POLARITY;
-               if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
+               if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
                        mac_mode |= MAC_MODE_PORT_MODE_MII;
                else
                        mac_mode |= MAC_MODE_PORT_MODE_GMII;
@@ -10638,7 +10639,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
        } else if (loopback_mode == TG3_PHY_LOOPBACK) {
                u32 val;
 
-               if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+               if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
                        tg3_phy_fet_toggle_apd(tp, false);
                        val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
                } else
@@ -10650,7 +10651,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
                udelay(40);
 
                mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
-               if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
+               if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
                        tg3_writephy(tp, MII_TG3_FET_PTEST,
                                     MII_TG3_FET_PTEST_FRC_TX_LINK |
                                     MII_TG3_FET_PTEST_FRC_TX_LOCK);
@@ -10662,7 +10663,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
                        mac_mode |= MAC_MODE_PORT_MODE_GMII;
 
                /* reset to prevent losing 1st rx packet intermittently */
-               if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+               if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
                        tw32_f(MAC_RX_MODE, RX_MODE_RESET);
                        udelay(10);
                        tw32_f(MAC_RX_MODE, tp->rx_mode);
@@ -10793,7 +10794,7 @@ static int tg3_test_loopback(struct tg3 *tp)
                return TG3_LOOPBACK_FAILED;
 
        /* Turn off gphy autopowerdown. */
-       if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
+       if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
                tg3_phy_toggle_apd(tp, false);
 
        if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
@@ -10830,14 +10831,14 @@ static int tg3_test_loopback(struct tg3 *tp)
                tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
        }
 
-       if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
+       if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
            !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
                if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
                        err |= TG3_PHY_LOOPBACK_FAILED;
        }
 
        /* Re-enable gphy autopowerdown. */
-       if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
+       if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
                tg3_phy_toggle_apd(tp, true);
 
        return err;
@@ -10848,7 +10849,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
 {
        struct tg3 *tp = netdev_priv(dev);
 
-       if (tp->link_config.phy_is_low_power)
+       if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
                tg3_set_power_state(tp, PCI_D0);
 
        memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
@@ -10880,7 +10881,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
                if (!err)
                        tg3_nvram_unlock(tp);
 
-               if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+               if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
                        tg3_phy_reset(tp);
 
                if (tg3_test_registers(tp) != 0) {
@@ -10916,7 +10917,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
                if (irq_sync && !err2)
                        tg3_phy_start(tp);
        }
-       if (tp->link_config.phy_is_low_power)
+       if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
                tg3_set_power_state(tp, PCI_D3hot);
 
 }
@@ -10929,7 +10930,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
        if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
                struct phy_device *phydev;
-               if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
+               if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
                phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
                return phy_mii_ioctl(phydev, ifr, cmd);
@@ -10943,10 +10944,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        case SIOCGMIIREG: {
                u32 mii_regval;
 
-               if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+               if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
                        break;                  /* We have no PHY */
 
-               if (tp->link_config.phy_is_low_power)
+               if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
                        return -EAGAIN;
 
                spin_lock_bh(&tp->lock);
@@ -10959,10 +10960,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        }
 
        case SIOCSMIIREG:
-               if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+               if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
                        break;                  /* We have no PHY */
 
-               if (tp->link_config.phy_is_low_power)
+               if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
                        return -EAGAIN;
 
                spin_lock_bh(&tp->lock);
@@ -12090,9 +12091,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
                tp->phy_id = eeprom_phy_id;
                if (eeprom_phy_serdes) {
                        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
-                               tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
+                               tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
                        else
-                               tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
+                               tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
                }
 
                if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
@@ -12176,7 +12177,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
                        (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
                        tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
 
-               if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
+               if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
                    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
                        tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
 
@@ -12185,19 +12186,21 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
                        tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
 
                if (cfg2 & (1 << 17))
-                       tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
+                       tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
 
                /* serdes signal pre-emphasis in register 0x590 set by */
                /* bootcode if bit 18 is set */
                if (cfg2 & (1 << 18))
-                       tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
+                       tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
 
                if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
                    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
-                       tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
+                       tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
 
-               if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+               if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
+                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+                   !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
                        u32 cfg3;
 
                        tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
@@ -12302,9 +12305,9 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
        if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
                tp->phy_id = hw_phy_id;
                if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
-                       tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
+                       tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
                else
-                       tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
+                       tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
        } else {
                if (tp->phy_id != TG3_PHY_ID_INVALID) {
                        /* Do nothing, phy ID already set up in
@@ -12323,11 +12326,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
                        tp->phy_id = p->phy_id;
                        if (!tp->phy_id ||
                            tp->phy_id == TG3_PHY_ID_BCM8002)
-                               tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
+                               tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
                }
        }
 
-       if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
+       if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
            !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
                u32 bmsr, adv_reg, tg3_ctrl, mask;
@@ -12345,7 +12348,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
                           ADVERTISE_100HALF | ADVERTISE_100FULL |
                           ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
                tg3_ctrl = 0;
-               if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
+               if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
                        tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
                                    MII_TG3_CTRL_ADV_1000_FULL);
                        if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
@@ -12360,7 +12363,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
                if (!tg3_copper_is_advertising_all(tp, mask)) {
                        tg3_writephy(tp, MII_ADVERTISE, adv_reg);
 
-                       if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
+                       if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
                                tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
 
                        tg3_writephy(tp, MII_BMCR,
@@ -12369,7 +12372,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
                tg3_phy_set_wirespeed(tp);
 
                tg3_writephy(tp, MII_ADVERTISE, adv_reg);
-               if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
+               if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
                        tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
        }
 
@@ -12382,13 +12385,13 @@ skip_phy_reset:
                err = tg3_init_5401phy_dsp(tp);
        }
 
-       if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
+       if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
                tp->link_config.advertising =
                        (ADVERTISED_1000baseT_Half |
                         ADVERTISED_1000baseT_Full |
                         ADVERTISED_Autoneg |
                         ADVERTISED_FIBRE);
-       if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
+       if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
                tp->link_config.advertising &=
                        ~(ADVERTISED_1000baseT_Half |
                          ADVERTISED_1000baseT_Full);
@@ -12717,6 +12720,7 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
 {
        int vlen;
        u32 apedata;
+       char *fwtype;
 
        if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
            !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
@@ -12732,9 +12736,15 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
 
        apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
 
+       if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
+               fwtype = "NCSI";
+       else
+               fwtype = "DASH";
+
        vlen = strlen(tp->fw_ver);
 
-       snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
+       snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
+                fwtype,
                 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
                 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
                 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
@@ -12988,6 +12998,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
                tp->pdev_peer = tg3_find_peer(tp);
 
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+               tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
+
        /* Intentionally exclude ASIC_REV_5906 */
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
@@ -12995,9 +13010,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+           (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
                tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
@@ -13027,9 +13040,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        }
 
        /* Determine TSO capabilities */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
                tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
        else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
                 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13065,9 +13076,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                        tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
                }
 
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+               if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
                        tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
                        tp->irq_max = TG3_IRQ_MAX_VECS;
                }
@@ -13082,9 +13091,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
                tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
 
        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13285,9 +13292,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+           (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
                tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
 
        /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
@@ -13345,41 +13350,39 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        }
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
-               tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
+               tp->phy_flags |= TG3_PHYFLG_IS_FET;
 
        /* A few boards don't want Ethernet@WireSpeed phy feature */
        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
            ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
             (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
             (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
-           (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
-           (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
-               tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
+           (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
+           (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+               tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
 
        if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
            GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
-               tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
+               tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
        if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
-               tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
+               tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
 
        if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
-           !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
+           !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
+           !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
                        if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
                            tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
-                               tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
+                               tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
                        if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
-                               tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
+                               tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
                } else
-                       tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
+                       tp->phy_flags |= TG3_PHYFLG_BER_BUG;
        }
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
@@ -13492,8 +13495,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
            tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
            tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
            tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
-           (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
-               tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
+           (tp->phy_flags & TG3_PHYFLG_IS_FET))
+               tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
 
        err = tg3_phy_probe(tp);
        if (err) {
@@ -13505,13 +13508,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        tg3_read_vpd(tp);
        tg3_read_fw_ver(tp);
 
-       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
-               tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
+       if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+               tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
        } else {
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
-                       tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
+                       tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
                else
-                       tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
+                       tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
        }
 
        /* 5700 {AX,BX} chips have a broken status block link
@@ -13529,13 +13532,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
         */
        if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
-           !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
-               tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
-                                 TG3_FLAG_USE_LINKCHG_REG);
+           !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
+               tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
+               tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
        }
 
        /* For all SERDES we poll the MAC status register. */
-       if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+       if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
                tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
        else
                tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
@@ -13705,9 +13708,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
 #endif
 #endif
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+       if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
                val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
                goto out;
        }
@@ -13918,9 +13919,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
 
        tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
                goto out;
 
        if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
@@ -14110,7 +14109,6 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
        tp->link_config.autoneg = AUTONEG_ENABLE;
        tp->link_config.active_speed = SPEED_INVALID;
        tp->link_config.active_duplex = DUPLEX_INVALID;
-       tp->link_config.phy_is_low_power = 0;
        tp->link_config.orig_speed = SPEED_INVALID;
        tp->link_config.orig_duplex = DUPLEX_INVALID;
        tp->link_config.orig_autoneg = AUTONEG_INVALID;
@@ -14118,9 +14116,7 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
 
 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
 {
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+       if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
                tp->bufmgr_config.mbuf_read_dma_low_water =
                        DEFAULT_MB_RDMA_LOW_WATER_5705;
                tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14645,24 +14641,31 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
                    tg3_bus_string(tp, str),
                    dev->dev_addr);
 
-       if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
+       if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
                struct phy_device *phydev;
                phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
                netdev_info(dev,
                            "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
                            phydev->drv->name, dev_name(&phydev->dev));
-       } else
+       } else {
+               char *ethtype;
+
+               if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+                       ethtype = "10/100Base-TX";
+               else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+                       ethtype = "1000Base-SX";
+               else
+                       ethtype = "10/100/1000Base-T";
+
                netdev_info(dev, "attached PHY is %s (%s Ethernet) "
-                           "(WireSpeed[%d])\n", tg3_phy_string(tp),
-                           ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
-                            ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
-                             "10/100/1000Base-T")),
-                           (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
+                           "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype,
+                         (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0);
+       }
 
        netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
                    (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
                    (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
-                   (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
+                   (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
                    (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
        netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
index 0432399ca74173e6ba73a2d08f17db4d1c743a82..4937bd19096413bae1115b82cc63ce5123207536 100644 (file)
 #define  TG3_PCIE_LNKCTL_L1_PLL_PD_DIS  0x00000080
 /* 0x7d58 --> 0x7e70 unused */
 
+#define TG3_PCIE_PHY_TSTCTL            0x00007e2c
+#define  TG3_PCIE_PHY_TSTCTL_PCIE10     0x00000040
+#define  TG3_PCIE_PHY_TSTCTL_PSCRAM     0x00000020
+
 #define TG3_PCIE_EIDLE_DELAY           0x00007e70
 #define  TG3_PCIE_EIDLE_DELAY_MASK      0x0000001f
 #define  TG3_PCIE_EIDLE_DELAY_13_CLKS   0x0000000c
 #define MII_TG3_EXT_STAT               0x11 /* Extended status register */
 #define  MII_TG3_EXT_STAT_LPASS                0x0100
 
+#define MII_TG3_RXR_COUNTERS           0x14 /* Local/Remote Receiver Counts */
 #define MII_TG3_DSP_RW_PORT            0x15 /* DSP coefficient read/write port */
-
+#define MII_TG3_DSP_CONTROL            0x16 /* DSP control register */
 #define MII_TG3_DSP_ADDRESS            0x17 /* DSP address register */
 
 #define MII_TG3_DSP_TAP1               0x0001
 #define MII_TG3_DSP_AADJ1CH0           0x001f
 #define MII_TG3_DSP_AADJ1CH3           0x601f
 #define  MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002
+#define MII_TG3_DSP_EXP1_INT_STAT      0x0f01
 #define MII_TG3_DSP_EXP8               0x0f08
 #define  MII_TG3_DSP_EXP8_REJ2MHz      0x0001
 #define  MII_TG3_DSP_EXP8_AEDW         0x0200
 /* APE shared memory.  Accessible through BAR1 */
 #define TG3_APE_FW_STATUS              0x400c
 #define  APE_FW_STATUS_READY            0x00000100
+#define TG3_APE_FW_FEATURES            0x4010
+#define  TG3_APE_FW_FEATURE_NCSI        0x00000002
 #define TG3_APE_FW_VERSION             0x4018
 #define  APE_FW_VERSION_MAJMSK          0xff000000
 #define  APE_FW_VERSION_MAJSFT          24
@@ -2526,7 +2534,6 @@ struct tg3_link_config {
        /* When we go in and out of low power mode we need
         * to swap with this state.
         */
-       int                             phy_is_low_power;
        u16                             orig_speed;
        u8                              orig_duplex;
        u8                              orig_autoneg;
@@ -2767,7 +2774,6 @@ struct tg3 {
 #define TG3_FLAG_TXD_MBOX_HWBUG                0x00000002
 #define TG3_FLAG_RX_CHECKSUMS          0x00000004
 #define TG3_FLAG_USE_LINKCHG_REG       0x00000008
-#define TG3_FLAG_USE_MI_INTERRUPT      0x00000010
 #define TG3_FLAG_ENABLE_ASF            0x00000020
 #define TG3_FLAG_ASPM_WORKAROUND       0x00000040
 #define TG3_FLAG_POLL_SERDES           0x00000080
@@ -2789,7 +2795,6 @@ struct tg3 {
 #define TG3_FLAG_TX_RECOVERY_PENDING   0x00200000
 #define TG3_FLAG_WOL_CAP               0x00400000
 #define TG3_FLAG_JUMBO_RING_ENABLE     0x00800000
-#define TG3_FLAG_10_100_ONLY           0x01000000
 #define TG3_FLAG_PAUSE_AUTONEG         0x02000000
 #define TG3_FLAG_CPMU_PRESENT          0x04000000
 #define TG3_FLAG_40BIT_DMA_BUG         0x08000000
@@ -2800,22 +2805,15 @@ struct tg3 {
        u32                             tg3_flags2;
 #define TG3_FLG2_RESTART_TIMER         0x00000001
 #define TG3_FLG2_TSO_BUG               0x00000002
-#define TG3_FLG2_NO_ETH_WIRE_SPEED     0x00000004
 #define TG3_FLG2_IS_5788               0x00000008
 #define TG3_FLG2_MAX_RXPEND_64         0x00000010
 #define TG3_FLG2_TSO_CAPABLE           0x00000020
-#define TG3_FLG2_PHY_ADC_BUG           0x00000040
-#define TG3_FLG2_PHY_5704_A0_BUG       0x00000080
-#define TG3_FLG2_PHY_BER_BUG           0x00000100
 #define TG3_FLG2_PCI_EXPRESS           0x00000200
 #define TG3_FLG2_ASF_NEW_HANDSHAKE     0x00000400
 #define TG3_FLG2_HW_AUTONEG            0x00000800
 #define TG3_FLG2_IS_NIC                        0x00001000
-#define TG3_FLG2_PHY_SERDES            0x00002000
-#define TG3_FLG2_CAPACITIVE_COUPLING   0x00004000
 #define TG3_FLG2_FLASH                 0x00008000
 #define TG3_FLG2_HW_TSO_1              0x00010000
-#define TG3_FLG2_SERDES_PREEMPHASIS    0x00020000
 #define TG3_FLG2_5705_PLUS             0x00040000
 #define TG3_FLG2_5750_PLUS             0x00080000
 #define TG3_FLG2_HW_TSO_3              0x00100000
@@ -2823,10 +2821,6 @@ struct tg3 {
 #define TG3_FLG2_USING_MSIX            0x00400000
 #define TG3_FLG2_USING_MSI_OR_MSIX     (TG3_FLG2_USING_MSI | \
                                        TG3_FLG2_USING_MSIX)
-#define TG3_FLG2_MII_SERDES            0x00800000
-#define TG3_FLG2_ANY_SERDES            (TG3_FLG2_PHY_SERDES |  \
-                                       TG3_FLG2_MII_SERDES)
-#define TG3_FLG2_PARALLEL_DETECT       0x01000000
 #define TG3_FLG2_ICH_WORKAROUND                0x02000000
 #define TG3_FLG2_5780_CLASS            0x04000000
 #define TG3_FLG2_HW_TSO_2              0x08000000
@@ -2834,9 +2828,7 @@ struct tg3 {
                                         TG3_FLG2_HW_TSO_2 | \
                                         TG3_FLG2_HW_TSO_3)
 #define TG3_FLG2_1SHOT_MSI             0x10000000
-#define TG3_FLG2_PHY_JITTER_BUG                0x20000000
 #define TG3_FLG2_NO_FWARE_REPORTED     0x40000000
-#define TG3_FLG2_PHY_ADJUST_TRIM       0x80000000
        u32                             tg3_flags3;
 #define TG3_FLG3_NO_NVRAM_ADDR_TRANS   0x00000001
 #define TG3_FLG3_ENABLE_APE            0x00000002
@@ -2844,15 +2836,12 @@ struct tg3 {
 #define TG3_FLG3_5701_DMA_BUG          0x00000008
 #define TG3_FLG3_USE_PHYLIB            0x00000010
 #define TG3_FLG3_MDIOBUS_INITED                0x00000020
-#define TG3_FLG3_PHY_CONNECTED         0x00000080
 #define TG3_FLG3_RGMII_INBAND_DISABLE  0x00000100
 #define TG3_FLG3_RGMII_EXT_IBND_RX_EN  0x00000200
 #define TG3_FLG3_RGMII_EXT_IBND_TX_EN  0x00000400
 #define TG3_FLG3_CLKREQ_BUG            0x00000800
-#define TG3_FLG3_PHY_ENABLE_APD                0x00001000
 #define TG3_FLG3_5755_PLUS             0x00002000
 #define TG3_FLG3_NO_NVRAM              0x00004000
-#define TG3_FLG3_PHY_IS_FET            0x00010000
 #define TG3_FLG3_ENABLE_RSS            0x00020000
 #define TG3_FLG3_ENABLE_TSS            0x00040000
 #define TG3_FLG3_4G_DMA_BNDRY_BUG      0x00080000
@@ -2860,6 +2849,7 @@ struct tg3 {
 #define TG3_FLG3_SHORT_DMA_BUG         0x00200000
 #define TG3_FLG3_USE_JUMBO_BDFLAG      0x00400000
 #define TG3_FLG3_L1PLLPD_EN            0x00800000
+#define TG3_FLG3_5717_PLUS             0x01000000
 
        struct timer_list               timer;
        u16                             timer_counter;
@@ -2956,6 +2946,27 @@ struct tg3 {
         (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \
         (X) == TG3_PHY_ID_BCM8002)
 
+       u32                             phy_flags;
+#define TG3_PHYFLG_IS_LOW_POWER                0x00000001
+#define TG3_PHYFLG_IS_CONNECTED                0x00000002
+#define TG3_PHYFLG_USE_MI_INTERRUPT    0x00000004
+#define TG3_PHYFLG_PHY_SERDES          0x00000010
+#define TG3_PHYFLG_MII_SERDES          0x00000020
+#define TG3_PHYFLG_ANY_SERDES          (TG3_PHYFLG_PHY_SERDES |        \
+                                       TG3_PHYFLG_MII_SERDES)
+#define TG3_PHYFLG_IS_FET              0x00000040
+#define TG3_PHYFLG_10_100_ONLY         0x00000080
+#define TG3_PHYFLG_ENABLE_APD          0x00000100
+#define TG3_PHYFLG_CAPACITIVE_COUPLING 0x00000200
+#define TG3_PHYFLG_NO_ETH_WIRE_SPEED   0x00000400
+#define TG3_PHYFLG_JITTER_BUG          0x00000800
+#define TG3_PHYFLG_ADJUST_TRIM         0x00001000
+#define TG3_PHYFLG_ADC_BUG             0x00002000
+#define TG3_PHYFLG_5704_A0_BUG         0x00004000
+#define TG3_PHYFLG_BER_BUG             0x00008000
+#define TG3_PHYFLG_SERDES_PREEMPHASIS  0x00010000
+#define TG3_PHYFLG_PARALLEL_DETECT     0x00020000
+
        u32                             led_ctrl;
        u32                             phy_otp;
 
index 14e5312e906ef2b4a7da121737c0df348a5014e3..3a8d7efa2acf554c6015de56a66b3390fc7503a1 100644 (file)
@@ -1341,6 +1341,12 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
         if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
                pr_err(PFX "skipping LMC card\n");
                return -ENODEV;
+       } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
+                  (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
+                   pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
+                   pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
+               pr_err(PFX "skipping SBE T3E3 port\n");
+               return -ENODEV;
        }
 
        /*
index 6ad6fe706312ff4c2cff31034b37428275e2f230..55f3a3e667a9af0291da7c7d4e46c12e84d7ce6a 100644 (file)
@@ -149,6 +149,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
        tfile->tun = tun;
        tun->tfile = tfile;
        tun->socket.file = file;
+       netif_carrier_on(tun->dev);
        dev_hold(tun->dev);
        sock_hold(tun->socket.sk);
        atomic_inc(&tfile->count);
@@ -162,6 +163,7 @@ static void __tun_detach(struct tun_struct *tun)
 {
        /* Detach from net device */
        netif_tx_lock_bh(tun->dev);
+       netif_carrier_off(tun->dev);
        tun->tfile = NULL;
        tun->socket.file = NULL;
        netif_tx_unlock_bh(tun->dev);
@@ -736,8 +738,18 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
                        else if (sinfo->gso_type & SKB_GSO_UDP)
                                gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
-                       else
-                               BUG();
+                       else {
+                               printk(KERN_ERR "tun: unexpected GSO type: "
+                                      "0x%x, gso_size %d, hdr_len %d\n",
+                                      sinfo->gso_type, gso.gso_size,
+                                      gso.hdr_len);
+                               print_hex_dump(KERN_ERR, "tun: ",
+                                              DUMP_PREFIX_NONE,
+                                              16, 1, skb->head,
+                                              min((int)gso.hdr_len, 64), true);
+                               WARN_ON_ONCE(1);
+                               return -EINVAL;
+                       }
                        if (sinfo->gso_type & SKB_GSO_TCP_ECN)
                                gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
                } else
@@ -1564,12 +1576,6 @@ static void tun_set_msglevel(struct net_device *dev, u32 value)
 #endif
 }
 
-static u32 tun_get_link(struct net_device *dev)
-{
-       struct tun_struct *tun = netdev_priv(dev);
-       return !!tun->tfile;
-}
-
 static u32 tun_get_rx_csum(struct net_device *dev)
 {
        struct tun_struct *tun = netdev_priv(dev);
@@ -1591,7 +1597,7 @@ static const struct ethtool_ops tun_ethtool_ops = {
        .get_drvinfo    = tun_get_drvinfo,
        .get_msglevel   = tun_get_msglevel,
        .set_msglevel   = tun_set_msglevel,
-       .get_link       = tun_get_link,
+       .get_link       = ethtool_op_get_link,
        .get_rx_csum    = tun_get_rx_csum,
        .set_rx_csum    = tun_set_rx_csum
 };
index e17dd743091532a394fd4671d71623775b5a1cdc..8d532f9b50d073069bbcb109b10767f7c3c09395 100644 (file)
@@ -594,7 +594,7 @@ static void dump_regs(struct ucc_geth_private *ugeth)
 {
        int i;
 
-       ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
+       ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num + 1);
        ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
 
        ugeth_info("maccfg1    : addr - 0x%08x, val - 0x%08x",
index a3a684cb89a970f3f35d45287249ca982ad2a6ad..6efca66b87663a84ec7f36529c190aa5a107d401 100644 (file)
@@ -477,6 +477,7 @@ static const struct usb_device_id hso_ids[] = {
        {USB_DEVICE(0x0af0, 0x8600)},
        {USB_DEVICE(0x0af0, 0x8800)},
        {USB_DEVICE(0x0af0, 0x8900)},
+       {USB_DEVICE(0x0af0, 0x9000)},
        {USB_DEVICE(0x0af0, 0xd035)},
        {USB_DEVICE(0x0af0, 0xd055)},
        {USB_DEVICE(0x0af0, 0xd155)},
index 7eab4071ea26676d3b582920a1e60081e2111c08..3b03794ac3f56a66a446c098f6003253cd8f09b2 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/usb.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
+#include <linux/kernel.h>
 
 #define DRIVER_VERSION         "22-Aug-2005"
 
@@ -158,16 +159,6 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
 }
 EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
 
-static u8 nibble(unsigned char c)
-{
-       if (likely(isdigit(c)))
-               return c - '0';
-       c = toupper(c);
-       if (likely(isxdigit(c)))
-               return 10 + c - 'A';
-       return 0;
-}
-
 int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
 {
        int             tmp, i;
@@ -183,7 +174,7 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
        }
        for (i = tmp = 0; i < 6; i++, tmp += 2)
                dev->net->dev_addr [i] =
-                       (nibble(buf [tmp]) << 4) + nibble(buf [tmp + 1]);
+                       (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]);
        return 0;
 }
 EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
@@ -624,7 +615,7 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
        while (!skb_queue_empty(&dev->rxq)
                && !skb_queue_empty(&dev->txq)
                && !skb_queue_empty(&dev->done)) {
-                       schedule_timeout(UNLINK_TIMEOUT_MS);
+                       schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
                        set_current_state(TASK_UNINTERRUPTIBLE);
                        netif_dbg(dev, ifdown, dev->net,
                                  "waited for %d urb completions\n", temp);
index 9d64186050f3373505f275e7bde9f67eb3429f37..abe0ff53daf353c1be44876b19cf37f4cefe4403 100644 (file)
@@ -664,8 +664,13 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
        while (len) {
                u32 buf_size;
 
-               buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ?
-                          VMXNET3_MAX_TX_BUF_SIZE : len;
+               if (len < VMXNET3_MAX_TX_BUF_SIZE) {
+                       buf_size = len;
+                       dw2 |= len;
+               } else {
+                       buf_size = VMXNET3_MAX_TX_BUF_SIZE;
+                       /* spec says that for TxDesc.len, 0 == 2^14 */
+               }
 
                tbi = tq->buf_info + tq->tx_ring.next2fill;
                tbi->map_type = VMXNET3_MAP_SINGLE;
@@ -673,13 +678,13 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
                                skb->data + buf_offset, buf_size,
                                PCI_DMA_TODEVICE);
 
-               tbi->len = buf_size; /* this automatically convert 2^14 to 0 */
+               tbi->len = buf_size;
 
                gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
                BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
 
                gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
-               gdesc->dword[2] = cpu_to_le32(dw2 | buf_size);
+               gdesc->dword[2] = cpu_to_le32(dw2);
                gdesc->dword[3] = 0;
 
                dev_dbg(&adapter->netdev->dev,
index 762a6a7763fed22f0c88074f596855a3b70ce033..2121c735cabd6ab3fddf335d302f7803cef87a76 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.0.13.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.0.14.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01000B00
+#define VMXNET3_DRIVER_VERSION_NUM      0x01000E00
 
 
 /*
index 94d87e80abcd944fc6d3ba1ce733881137c21a74..c7c5605b3728384069e0e3aa187a30b0e85bd512 100644 (file)
@@ -41,6 +41,8 @@
 *
 ******************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/if_vlan.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
@@ -144,7 +146,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
 
        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
                vdev->ndev->name, __func__, __LINE__);
-       printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
+       netdev_notice(vdev->ndev, "Link Up\n");
        vdev->stats.link_up++;
 
        netif_carrier_on(vdev->ndev);
@@ -168,7 +170,7 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev)
 
        vxge_debug_entryexit(VXGE_TRACE,
                "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
-       printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
+       netdev_notice(vdev->ndev, "Link Down\n");
 
        vdev->stats.link_down++;
        netif_carrier_off(vdev->ndev);
@@ -2679,7 +2681,7 @@ vxge_open(struct net_device *dev)
 
        if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
                netif_carrier_on(vdev->ndev);
-               printk(KERN_NOTICE "%s: Link Up\n", vdev->ndev->name);
+               netdev_notice(vdev->ndev, "Link Up\n");
                vdev->stats.link_up++;
        }
 
@@ -2817,7 +2819,7 @@ int do_vxge_close(struct net_device *dev, int do_io)
        }
 
        netif_carrier_off(vdev->ndev);
-       printk(KERN_NOTICE "%s: Link Down\n", vdev->ndev->name);
+       netdev_notice(vdev->ndev, "Link Down\n");
        netif_tx_stop_all_queues(vdev->ndev);
 
        /* Note that at this point xmit() is stopped by upper layer */
@@ -3844,9 +3846,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
        struct vxgedev *vdev = netdev_priv(netdev);
 
        if (pci_enable_device(pdev)) {
-               printk(KERN_ERR "%s: "
-                       "Cannot re-enable device after reset\n",
-                       VXGE_DRIVER_NAME);
+               netdev_err(netdev, "Cannot re-enable device after reset\n");
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
@@ -3871,9 +3871,8 @@ static void vxge_io_resume(struct pci_dev *pdev)
 
        if (netif_running(netdev)) {
                if (vxge_open(netdev)) {
-                       printk(KERN_ERR "%s: "
-                               "Can't bring device back up after reset\n",
-                               VXGE_DRIVER_NAME);
+                       netdev_err(netdev,
+                                  "Can't bring device back up after reset\n");
                        return;
                }
        }
@@ -4430,13 +4429,9 @@ static int __init
 vxge_starter(void)
 {
        int ret = 0;
-       char version[32];
-       snprintf(version, 32, "%s", DRV_VERSION);
 
-       printk(KERN_INFO "%s: Copyright(c) 2002-2010 Exar Corp.\n",
-               VXGE_DRIVER_NAME);
-       printk(KERN_INFO "%s: Driver version: %s\n",
-                       VXGE_DRIVER_NAME, version);
+       pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
+       pr_info("Driver version: %s\n", DRV_VERSION);
 
        verify_bandwidth();
 
index 43b77271532b074eda6e5d2170f1bc74f6c9863e..ad7719fe6d0a2ced65c2db0a27faeeab6550486a 100644 (file)
@@ -15,6 +15,8 @@
  *      Maintainer:  Kevin Curtis  <kevin.curtis@farsite.co.uk>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/version.h>
@@ -511,21 +513,19 @@ static int fst_debug_mask = { FST_DEBUG };
  * support variable numbers of macro parameters. The inverted if prevents us
  * eating someone else's else clause.
  */
-#define dbg(F,fmt,A...) if ( ! ( fst_debug_mask & (F))) \
-                                ; \
-                        else \
-                                printk ( KERN_DEBUG FST_NAME ": " fmt, ## A )
-
+#define dbg(F, fmt, args...)                                   \
+do {                                                           \
+       if (fst_debug_mask & (F))                               \
+               printk(KERN_DEBUG pr_fmt(fmt), ##args);         \
+} while (0)
 #else
-#define dbg(X...)              /* NOP */
+#define dbg(F, fmt, args...)                                   \
+do {                                                           \
+       if (0)                                                  \
+               printk(KERN_DEBUG pr_fmt(fmt), ##args);         \
+} while (0)
 #endif
 
-/*      Printing short cuts
- */
-#define printk_err(fmt,A...)    printk ( KERN_ERR     FST_NAME ": " fmt, ## A )
-#define printk_warn(fmt,A...)   printk ( KERN_WARNING FST_NAME ": " fmt, ## A )
-#define printk_info(fmt,A...)   printk ( KERN_INFO    FST_NAME ": " fmt, ## A )
-
 /*
  *      PCI ID lookup table
  */
@@ -961,7 +961,7 @@ fst_issue_cmd(struct fst_port_info *port, unsigned short cmd)
                spin_lock_irqsave(&card->card_lock, flags);
 
                if (++safety > 2000) {
-                       printk_err("Mailbox safety timeout\n");
+                       pr_err("Mailbox safety timeout\n");
                        break;
                }
 
@@ -1241,8 +1241,8 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
                 * This seems to happen on the TE1 interface sometimes
                 * so throw the frame away and log the event.
                 */
-               printk_err("Frame received with 0 length. Card %d Port %d\n",
-                          card->card_no, port->index);
+               pr_err("Frame received with 0 length. Card %d Port %d\n",
+                      card->card_no, port->index);
                /* Return descriptor to card */
                FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
 
@@ -1486,9 +1486,8 @@ fst_intr(int dummy, void *dev_id)
         */
        dbg(DBG_INTR, "intr: %d %p\n", card->irq, card);
        if (card->state != FST_RUNNING) {
-               printk_err
-                   ("Interrupt received for card %d in a non running state (%d)\n",
-                    card->card_no, card->state);
+               pr_err("Interrupt received for card %d in a non running state (%d)\n",
+                      card->card_no, card->state);
 
                /* 
                 * It is possible to really be running, i.e. we have re-loaded
@@ -1614,8 +1613,7 @@ fst_intr(int dummy, void *dev_id)
                        break;
 
                default:
-                       printk_err("intr: unknown card event %d. ignored\n",
-                                  event);
+                       pr_err("intr: unknown card event %d. ignored\n", event);
                        break;
                }
 
@@ -1637,13 +1635,13 @@ check_started_ok(struct fst_card_info *card)
 
        /* Check structure version and end marker */
        if (FST_RDW(card, smcVersion) != SMC_VERSION) {
-               printk_err("Bad shared memory version %d expected %d\n",
-                          FST_RDW(card, smcVersion), SMC_VERSION);
+               pr_err("Bad shared memory version %d expected %d\n",
+                      FST_RDW(card, smcVersion), SMC_VERSION);
                card->state = FST_BADVERSION;
                return;
        }
        if (FST_RDL(card, endOfSmcSignature) != END_SIG) {
-               printk_err("Missing shared memory signature\n");
+               pr_err("Missing shared memory signature\n");
                card->state = FST_BADVERSION;
                return;
        }
@@ -1651,11 +1649,11 @@ check_started_ok(struct fst_card_info *card)
        if ((i = FST_RDB(card, taskStatus)) == 0x01) {
                card->state = FST_RUNNING;
        } else if (i == 0xFF) {
-               printk_err("Firmware initialisation failed. Card halted\n");
+               pr_err("Firmware initialisation failed. Card halted\n");
                card->state = FST_HALTED;
                return;
        } else if (i != 0x00) {
-               printk_err("Unknown firmware status 0x%x\n", i);
+               pr_err("Unknown firmware status 0x%x\n", i);
                card->state = FST_HALTED;
                return;
        }
@@ -1665,9 +1663,10 @@ check_started_ok(struct fst_card_info *card)
         * existing firmware etc so we just report it for the moment.
         */
        if (FST_RDL(card, numberOfPorts) != card->nports) {
-               printk_warn("Port count mismatch on card %d."
-                           " Firmware thinks %d we say %d\n", card->card_no,
-                           FST_RDL(card, numberOfPorts), card->nports);
+               pr_warning("Port count mismatch on card %d. "
+                          "Firmware thinks %d we say %d\n",
+                          card->card_no,
+                          FST_RDL(card, numberOfPorts), card->nports);
        }
 }
 
@@ -2090,9 +2089,8 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                 */
 
                if (card->state != FST_RUNNING) {
-                       printk_err
-                           ("Attempt to configure card %d in non-running state (%d)\n",
-                            card->card_no, card->state);
+                       pr_err("Attempt to configure card %d in non-running state (%d)\n",
+                              card->card_no, card->state);
                        return -EIO;
                }
                if (copy_from_user(&info, ifr->ifr_data, sizeof (info))) {
@@ -2384,8 +2382,8 @@ fst_init_card(struct fst_card_info *card)
                 err = register_hdlc_device(card->ports[i].dev);
                 if (err < 0) {
                        int j;
-                        printk_err ("Cannot register HDLC device for port %d"
-                                    " (errno %d)\n", i, -err );
+                       pr_err("Cannot register HDLC device for port %d (errno %d)\n",
+                              i, -err);
                        for (j = i; j < card->nports; j++) {
                                free_netdev(card->ports[j].dev);
                                card->ports[j].dev = NULL;
@@ -2395,10 +2393,10 @@ fst_init_card(struct fst_card_info *card)
                 }
        }
 
-       printk_info("%s-%s: %s IRQ%d, %d ports\n",
-              port_to_dev(&card->ports[0])->name,
-              port_to_dev(&card->ports[card->nports - 1])->name,
-              type_strings[card->type], card->irq, card->nports);
+       pr_info("%s-%s: %s IRQ%d, %d ports\n",
+               port_to_dev(&card->ports[0])->name,
+               port_to_dev(&card->ports[card->nports - 1])->name,
+               type_strings[card->type], card->irq, card->nports);
 }
 
 static const struct net_device_ops fst_ops = {
@@ -2417,19 +2415,17 @@ static const struct net_device_ops fst_ops = {
 static int __devinit
 fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-       static int firsttime_done = 0;
        static int no_of_cards_added = 0;
        struct fst_card_info *card;
        int err = 0;
        int i;
 
-       if (!firsttime_done) {
-               printk_info("FarSync WAN driver " FST_USER_VERSION
-                      " (c) 2001-2004 FarSite Communications Ltd.\n");
-               firsttime_done = 1;
-               dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask);
-       }
-
+       printk_once(KERN_INFO
+                   pr_fmt("FarSync WAN driver " FST_USER_VERSION
+                          " (c) 2001-2004 FarSite Communications Ltd.\n"));
+#if FST_DEBUG
+       dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask);
+#endif
        /*
         * We are going to be clever and allow certain cards not to be
         * configured.  An exclude list can be provided in /etc/modules.conf
@@ -2441,8 +2437,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                 */
                for (i = 0; i < fst_excluded_cards; i++) {
                        if ((pdev->devfn) >> 3 == fst_excluded_list[i]) {
-                               printk_info("FarSync PCI device %d not assigned\n",
-                                      (pdev->devfn) >> 3);
+                               pr_info("FarSync PCI device %d not assigned\n",
+                                       (pdev->devfn) >> 3);
                                return -EBUSY;
                        }
                }
@@ -2451,20 +2447,19 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Allocate driver private data */
        card = kzalloc(sizeof (struct fst_card_info), GFP_KERNEL);
        if (card == NULL) {
-               printk_err("FarSync card found but insufficient memory for"
-                          " driver storage\n");
+               pr_err("FarSync card found but insufficient memory for driver storage\n");
                return -ENOMEM;
        }
 
        /* Try to enable the device */
        if ((err = pci_enable_device(pdev)) != 0) {
-               printk_err("Failed to enable card. Err %d\n", -err);
+               pr_err("Failed to enable card. Err %d\n", -err);
                kfree(card);
                return err;
        }
 
        if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
-               printk_err("Failed to allocate regions. Err %d\n", -err);
+               pr_err("Failed to allocate regions. Err %d\n", -err);
                pci_disable_device(pdev);
                kfree(card);
                return err;
@@ -2475,14 +2470,14 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        card->phys_mem = pci_resource_start(pdev, 2);
        card->phys_ctlmem = pci_resource_start(pdev, 3);
        if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
-               printk_err("Physical memory remap failed\n");
+               pr_err("Physical memory remap failed\n");
                pci_release_regions(pdev);
                pci_disable_device(pdev);
                kfree(card);
                return -ENODEV;
        }
        if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
-               printk_err("Control memory remap failed\n");
+               pr_err("Control memory remap failed\n");
                pci_release_regions(pdev);
                pci_disable_device(pdev);
                kfree(card);
@@ -2492,7 +2487,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* Register the interrupt handler */
        if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
-               printk_err("Unable to register interrupt %d\n", card->irq);
+               pr_err("Unable to register interrupt %d\n", card->irq);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
                iounmap(card->ctlmem);
@@ -2523,7 +2518,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                if (!dev) {
                        while (i--)
                                free_netdev(card->ports[i].dev);
-                       printk_err ("FarSync: out of memory\n");
+                       pr_err("FarSync: out of memory\n");
                         free_irq(card->irq, card);
                         pci_release_regions(pdev);
                         pci_disable_device(pdev);
@@ -2587,7 +2582,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                    pci_alloc_consistent(card->device, FST_MAX_MTU,
                                         &card->rx_dma_handle_card);
                if (card->rx_dma_handle_host == NULL) {
-                       printk_err("Could not allocate rx dma buffer\n");
+                       pr_err("Could not allocate rx dma buffer\n");
                        fst_disable_intr(card);
                        pci_release_regions(pdev);
                        pci_disable_device(pdev);
@@ -2600,7 +2595,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                    pci_alloc_consistent(card->device, FST_MAX_MTU,
                                         &card->tx_dma_handle_card);
                if (card->tx_dma_handle_host == NULL) {
-                       printk_err("Could not allocate tx dma buffer\n");
+                       pr_err("Could not allocate tx dma buffer\n");
                        fst_disable_intr(card);
                        pci_release_regions(pdev);
                        pci_disable_device(pdev);
@@ -2672,7 +2667,7 @@ fst_init(void)
 static void __exit
 fst_cleanup_module(void)
 {
-       printk_info("FarSync WAN driver unloading\n");
+       pr_info("FarSync WAN driver unloading\n");
        pci_unregister_driver(&fst_driver);
 }
 
index 2d7c96d7e865957df251046a7ba5193e94a0c423..eb80243e22df27315f7b271aa71593e4ebe534e9 100644 (file)
@@ -152,6 +152,7 @@ enum {
        /* Device IDs */
        USB_DEVICE_ID_I6050 = 0x0186,
        USB_DEVICE_ID_I6050_2 = 0x0188,
+       USB_DEVICE_ID_I6250 = 0x0187,
 };
 
 
index 0d5081d77dc045f01849676a54d1b13e39ae0b72..d3365ac85dde4639e455b35b65c805cdec1aa552 100644 (file)
@@ -491,6 +491,7 @@ int i2400mu_probe(struct usb_interface *iface,
        switch (id->idProduct) {
        case USB_DEVICE_ID_I6050:
        case USB_DEVICE_ID_I6050_2:
+       case USB_DEVICE_ID_I6250:
                i2400mu->i6050 = 1;
                break;
        default:
@@ -739,6 +740,7 @@ static
 struct usb_device_id i2400mu_id_table[] = {
        { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
        { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
+       { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
        { USB_DEVICE(0x8086, 0x0181) },
        { USB_DEVICE(0x8086, 0x1403) },
        { USB_DEVICE(0x8086, 0x1405) },
index dabafb874c36ad7da8f62a99fd69b64d45af8dc1..fe7418aefc4a994b00109e98bac4edc7d0e67a6e 100644 (file)
@@ -63,6 +63,7 @@ static bool ar9002_hw_per_calibration(struct ath_hw *ah,
                                      u8 rxchainmask,
                                      struct ath9k_cal_list *currCal)
 {
+       struct ath9k_hw_cal_data *caldata = ah->caldata;
        bool iscaldone = false;
 
        if (currCal->calState == CAL_RUNNING) {
@@ -81,14 +82,14 @@ static bool ar9002_hw_per_calibration(struct ath_hw *ah,
                                }
 
                                currCal->calData->calPostProc(ah, numChains);
-                               ichan->CalValid |= currCal->calData->calType;
+                               caldata->CalValid |= currCal->calData->calType;
                                currCal->calState = CAL_DONE;
                                iscaldone = true;
                        } else {
                                ar9002_hw_setup_calibration(ah, currCal);
                        }
                }
-       } else if (!(ichan->CalValid & currCal->calData->calType)) {
+       } else if (!(caldata->CalValid & currCal->calData->calType)) {
                ath9k_hw_reset_calibration(ah, currCal);
        }
 
@@ -686,8 +687,13 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
 {
        bool iscaldone = true;
        struct ath9k_cal_list *currCal = ah->cal_list_curr;
+       bool nfcal, nfcal_pending = false;
 
-       if (currCal &&
+       nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
+       if (ah->caldata)
+               nfcal_pending = ah->caldata->nfcal_pending;
+
+       if (currCal && !nfcal &&
            (currCal->calState == CAL_RUNNING ||
             currCal->calState == CAL_WAITING)) {
                iscaldone = ar9002_hw_per_calibration(ah, chan,
@@ -703,7 +709,7 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
        }
 
        /* Do NF cal only at longer intervals */
-       if (longcal) {
+       if (longcal || nfcal_pending) {
                /* Do periodic PAOffset Cal */
                ar9002_hw_pa_cal(ah, false);
                ar9002_hw_olc_temp_compensation(ah);
@@ -712,16 +718,18 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
                 * Get the value from the previous NF cal and update
                 * history buffer.
                 */
-               ath9k_hw_getnf(ah, chan);
-
-               /*
-                * Load the NF from history buffer of the current channel.
-                * NF is slow time-variant, so it is OK to use a historical
-                * value.
-                */
-               ath9k_hw_loadnf(ah, ah->curchan);
+               if (ath9k_hw_getnf(ah, chan)) {
+                       /*
+                        * Load the NF from history buffer of the current
+                        * channel.
+                        * NF is slow time-variant, so it is OK to use a
+                        * historical value.
+                        */
+                       ath9k_hw_loadnf(ah, ah->curchan);
+               }
 
-               ath9k_hw_start_nfcal(ah);
+               if (longcal)
+                       ath9k_hw_start_nfcal(ah, false);
        }
 
        return iscaldone;
@@ -869,8 +877,10 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
        ar9002_hw_pa_cal(ah, true);
 
        /* Do NF Calibration after DC offset and other calibrations */
-       REG_WRITE(ah, AR_PHY_AGC_CONTROL,
-                 REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
+       ath9k_hw_start_nfcal(ah, true);
+
+       if (ah->caldata)
+               ah->caldata->nfcal_pending = true;
 
        ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
 
@@ -901,7 +911,8 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
                        ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
        }
 
-       chan->CalValid = 0;
+       if (ah->caldata)
+               ah->caldata->CalValid = 0;
 
        return true;
 }
index 5a06503991368f1f73bef5ed8df588970f68000f..4674ea8c9c99497add7587ff30c6f94c7f7f6ef1 100644 (file)
@@ -68,6 +68,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
                                      u8 rxchainmask,
                                      struct ath9k_cal_list *currCal)
 {
+       struct ath9k_hw_cal_data *caldata = ah->caldata;
        /* Cal is assumed not done until explicitly set below */
        bool iscaldone = false;
 
@@ -95,7 +96,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
                                currCal->calData->calPostProc(ah, numChains);
 
                                /* Calibration has finished. */
-                               ichan->CalValid |= currCal->calData->calType;
+                               caldata->CalValid |= currCal->calData->calType;
                                currCal->calState = CAL_DONE;
                                iscaldone = true;
                        } else {
@@ -106,7 +107,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
                        ar9003_hw_setup_calibration(ah, currCal);
                        }
                }
-       } else if (!(ichan->CalValid & currCal->calData->calType)) {
+       } else if (!(caldata->CalValid & currCal->calData->calType)) {
                /* If current cal is marked invalid in channel, kick it off */
                ath9k_hw_reset_calibration(ah, currCal);
        }
@@ -148,6 +149,12 @@ static bool ar9003_hw_calibrate(struct ath_hw *ah,
 
        /* Do NF cal only at longer intervals */
        if (longcal) {
+               /*
+                * Get the value from the previous NF cal and update
+                * history buffer.
+                */
+               ath9k_hw_getnf(ah, chan);
+
                /*
                 * Load the NF from history buffer of the current channel.
                 * NF is slow time-variant, so it is OK to use a historical
@@ -156,7 +163,7 @@ static bool ar9003_hw_calibrate(struct ath_hw *ah,
                ath9k_hw_loadnf(ah, ah->curchan);
 
                /* start NF calibration, without updating BB NF register */
-               ath9k_hw_start_nfcal(ah);
+               ath9k_hw_start_nfcal(ah, false);
        }
 
        return iscaldone;
@@ -762,6 +769,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
        /* Revert chainmasks to their original values before NF cal */
        ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
 
+       ath9k_hw_start_nfcal(ah, true);
+
        /* Initialize list pointers */
        ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
 
@@ -785,7 +794,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
        if (ah->cal_list_curr)
                ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
 
-       chan->CalValid = 0;
+       if (ah->caldata)
+               ah->caldata->CalValid = 0;
 
        return true;
 }
index ace8d2678b18a890c3d64b2756623ce0f29be9c4..b883b174385b822e98cb46e0f83b8024159cf18c 100644 (file)
 #define LE16(x) __constant_cpu_to_le16(x)
 #define LE32(x) __constant_cpu_to_le32(x)
 
+/* Local defines to distinguish between extension and control CTL's */
+#define EXT_ADDITIVE (0x8000)
+#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE)
+#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE)
+#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE)
+#define REDUCE_SCALED_POWER_BY_TWO_CHAIN     6  /* 10*log10(2)*2 */
+#define REDUCE_SCALED_POWER_BY_THREE_CHAIN   9  /* 10*log10(3)*2 */
+#define PWRINCR_3_TO_1_CHAIN      9             /* 10*log(3)*2 */
+#define PWRINCR_3_TO_2_CHAIN      3             /* floor(10*log(3/2)*2) */
+#define PWRINCR_2_TO_1_CHAIN      6             /* 10*log(2)*2 */
+
+#define SUB_NUM_CTL_MODES_AT_5G_40 2    /* excluding HT40, EXT-OFDM */
+#define SUB_NUM_CTL_MODES_AT_2G_40 3    /* excluding HT40, EXT-OFDM, EXT-CCK */
+
 static const struct ar9300_eeprom ar9300_default = {
        .eepromVersion = 2,
        .templateVersion = 2,
@@ -609,6 +623,14 @@ static const struct ar9300_eeprom ar9300_default = {
         }
 };
 
+static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
+{
+       if (fbin == AR9300_BCHAN_UNUSED)
+               return fbin;
+
+       return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
+}
+
 static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
 {
        return 0;
@@ -1417,9 +1439,9 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
 #undef POW_SM
 }
 
-static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq)
+static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
+                                             u8 *targetPowerValT2)
 {
-       u8 targetPowerValT2[ar9300RateSize];
        /* XXX: hard code for now, need to get from eeprom struct */
        u8 ht40PowerIncForPdadc = 0;
        bool is2GHz = false;
@@ -1553,9 +1575,6 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq)
                          "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
                i++;
        }
-
-       /* Write target power array to registers */
-       ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
 }
 
 static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
@@ -1799,14 +1818,369 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
        return 0;
 }
 
+static u16 ar9003_hw_get_direct_edge_power(struct ar9300_eeprom *eep,
+                                          int idx,
+                                          int edge,
+                                          bool is2GHz)
+{
+       struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G;
+       struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G;
+
+       if (is2GHz)
+               return ctl_2g[idx].ctlEdges[edge].tPower;
+       else
+               return ctl_5g[idx].ctlEdges[edge].tPower;
+}
+
+static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
+                                            int idx,
+                                            unsigned int edge,
+                                            u16 freq,
+                                            bool is2GHz)
+{
+       struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G;
+       struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G;
+
+       u8 *ctl_freqbin = is2GHz ?
+               &eep->ctl_freqbin_2G[idx][0] :
+               &eep->ctl_freqbin_5G[idx][0];
+
+       if (is2GHz) {
+               if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq &&
+                   ctl_2g[idx].ctlEdges[edge - 1].flag)
+                       return ctl_2g[idx].ctlEdges[edge - 1].tPower;
+       } else {
+               if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq &&
+                   ctl_5g[idx].ctlEdges[edge - 1].flag)
+                       return ctl_5g[idx].ctlEdges[edge - 1].tPower;
+       }
+
+       return AR9300_MAX_RATE_POWER;
+}
+
+/*
+ * Find the maximum conformance test limit for the given channel and CTL info
+ */
+static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
+                                       u16 freq, int idx, bool is2GHz)
+{
+       u16 twiceMaxEdgePower = AR9300_MAX_RATE_POWER;
+       u8 *ctl_freqbin = is2GHz ?
+               &eep->ctl_freqbin_2G[idx][0] :
+               &eep->ctl_freqbin_5G[idx][0];
+       u16 num_edges = is2GHz ?
+               AR9300_NUM_BAND_EDGES_2G : AR9300_NUM_BAND_EDGES_5G;
+       unsigned int edge;
+
+       /* Get the edge power */
+       for (edge = 0;
+            (edge < num_edges) && (ctl_freqbin[edge] != AR9300_BCHAN_UNUSED);
+            edge++) {
+               /*
+                * If there's an exact channel match or an inband flag set
+                * on the lower channel use the given rdEdgePower
+                */
+               if (freq == ath9k_hw_fbin2freq(ctl_freqbin[edge], is2GHz)) {
+                       twiceMaxEdgePower =
+                               ar9003_hw_get_direct_edge_power(eep, idx,
+                                                               edge, is2GHz);
+                       break;
+               } else if ((edge > 0) &&
+                          (freq < ath9k_hw_fbin2freq(ctl_freqbin[edge],
+                                                     is2GHz))) {
+                       twiceMaxEdgePower =
+                               ar9003_hw_get_indirect_edge_power(eep, idx,
+                                                                 edge, freq,
+                                                                 is2GHz);
+                       /*
+                        * Leave loop - no more affecting edges possible in
+                        * this monotonic increasing list
+                        */
+                       break;
+               }
+       }
+       return twiceMaxEdgePower;
+}
+
+static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
+                                              struct ath9k_channel *chan,
+                                              u8 *pPwrArray, u16 cfgCtl,
+                                              u8 twiceAntennaReduction,
+                                              u8 twiceMaxRegulatoryPower,
+                                              u16 powerLimit)
+{
+       struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
+       u16 twiceMaxEdgePower = AR9300_MAX_RATE_POWER;
+       static const u16 tpScaleReductionTable[5] = {
+               0, 3, 6, 9, AR9300_MAX_RATE_POWER
+       };
+       int i;
+       int16_t  twiceLargestAntenna;
+       u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
+       u16 ctlModesFor11a[] = {
+               CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40
+       };
+       u16 ctlModesFor11g[] = {
+               CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT,
+               CTL_11G_EXT, CTL_2GHT40
+       };
+       u16 numCtlModes, *pCtlMode, ctlMode, freq;
+       struct chan_centers centers;
+       u8 *ctlIndex;
+       u8 ctlNum;
+       u16 twiceMinEdgePower;
+       bool is2ghz = IS_CHAN_2GHZ(chan);
+
+       ath9k_hw_get_channel_centers(ah, chan, &centers);
+
+       /* Compute TxPower reduction due to Antenna Gain */
+       if (is2ghz)
+               twiceLargestAntenna = pEepData->modalHeader2G.antennaGain;
+       else
+               twiceLargestAntenna = pEepData->modalHeader5G.antennaGain;
+
+       twiceLargestAntenna = (int16_t)min((twiceAntennaReduction) -
+                               twiceLargestAntenna, 0);
+
+       /*
+        * scaledPower is the minimum of the user input power level
+        * and the regulatory allowed power level
+        */
+       maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
+
+       if (regulatory->tp_scale != ATH9K_TP_SCALE_MAX) {
+               maxRegAllowedPower -=
+                       (tpScaleReductionTable[(regulatory->tp_scale)] * 2);
+       }
+
+       scaledPower = min(powerLimit, maxRegAllowedPower);
+
+       /*
+        * Reduce scaled Power by number of chains active to get
+        * to per chain tx power level
+        */
+       switch (ar5416_get_ntxchains(ah->txchainmask)) {
+       case 1:
+               break;
+       case 2:
+               scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+               break;
+       case 3:
+               scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+               break;
+       }
+
+       scaledPower = max((u16)0, scaledPower);
+
+       /*
+        * Get target powers from EEPROM - our baseline for TX Power
+        */
+       if (is2ghz) {
+               /* Setup for CTL modes */
+               /* CTL_11B, CTL_11G, CTL_2GHT20 */
+               numCtlModes =
+                       ARRAY_SIZE(ctlModesFor11g) -
+                                  SUB_NUM_CTL_MODES_AT_2G_40;
+               pCtlMode = ctlModesFor11g;
+               if (IS_CHAN_HT40(chan))
+                       /* All 2G CTL's */
+                       numCtlModes = ARRAY_SIZE(ctlModesFor11g);
+       } else {
+               /* Setup for CTL modes */
+               /* CTL_11A, CTL_5GHT20 */
+               numCtlModes = ARRAY_SIZE(ctlModesFor11a) -
+                                        SUB_NUM_CTL_MODES_AT_5G_40;
+               pCtlMode = ctlModesFor11a;
+               if (IS_CHAN_HT40(chan))
+                       /* All 5G CTL's */
+                       numCtlModes = ARRAY_SIZE(ctlModesFor11a);
+       }
+
+       /*
+        * For MIMO, need to apply regulatory caps individually across
+        * dynamically running modes: CCK, OFDM, HT20, HT40
+        *
+        * The outer loop walks through each possible applicable runtime mode.
+        * The inner loop walks through each ctlIndex entry in EEPROM.
+        * The ctl value is encoded as [7:4] == test group, [3:0] == test mode.
+        */
+       for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
+               bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) ||
+                       (pCtlMode[ctlMode] == CTL_2GHT40);
+               if (isHt40CtlMode)
+                       freq = centers.synth_center;
+               else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
+                       freq = centers.ext_center;
+               else
+                       freq = centers.ctl_center;
+
+               ath_print(common, ATH_DBG_REGULATORY,
+                         "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, "
+                         "EXT_ADDITIVE %d\n",
+                         ctlMode, numCtlModes, isHt40CtlMode,
+                         (pCtlMode[ctlMode] & EXT_ADDITIVE));
+
+               /* walk through each CTL index stored in EEPROM */
+               if (is2ghz) {
+                       ctlIndex = pEepData->ctlIndex_2G;
+                       ctlNum = AR9300_NUM_CTLS_2G;
+               } else {
+                       ctlIndex = pEepData->ctlIndex_5G;
+                       ctlNum = AR9300_NUM_CTLS_5G;
+               }
+
+               for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) {
+                       ath_print(common, ATH_DBG_REGULATORY,
+                                 "LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
+                                 "pCtlMode 0x%2.2x ctlIndex 0x%2.2x "
+                                 "chan %dn",
+                                 i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
+                                 chan->channel);
+
+                               /*
+                                * compare test group from regulatory
+                                * channel list with test mode from pCtlMode
+                                * list
+                                */
+                               if ((((cfgCtl & ~CTL_MODE_M) |
+                                      (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+                                       ctlIndex[i]) ||
+                                   (((cfgCtl & ~CTL_MODE_M) |
+                                      (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+                                    ((ctlIndex[i] & CTL_MODE_M) |
+                                      SD_NO_CTL))) {
+                                       twiceMinEdgePower =
+                                         ar9003_hw_get_max_edge_power(pEepData,
+                                                                      freq, i,
+                                                                      is2ghz);
+
+                                       if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
+                                               /*
+                                                * Find the minimum of all CTL
+                                                * edge powers that apply to
+                                                * this channel
+                                                */
+                                               twiceMaxEdgePower =
+                                                       min(twiceMaxEdgePower,
+                                                           twiceMinEdgePower);
+                                               else {
+                                                       /* specific */
+                                                       twiceMaxEdgePower =
+                                                         twiceMinEdgePower;
+                                                       break;
+                                               }
+                               }
+                       }
+
+                       minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
+
+                       ath_print(common, ATH_DBG_REGULATORY,
+                                 "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d "
+                                 "sP %d minCtlPwr %d\n",
+                                 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
+                                 scaledPower, minCtlPower);
+
+                       /* Apply ctl mode to correct target power set */
+                       switch (pCtlMode[ctlMode]) {
+                       case CTL_11B:
+                               for (i = ALL_TARGET_LEGACY_1L_5L;
+                                    i <= ALL_TARGET_LEGACY_11S; i++)
+                                       pPwrArray[i] =
+                                         (u8)min((u16)pPwrArray[i],
+                                                 minCtlPower);
+                               break;
+                       case CTL_11A:
+                       case CTL_11G:
+                               for (i = ALL_TARGET_LEGACY_6_24;
+                                    i <= ALL_TARGET_LEGACY_54; i++)
+                                       pPwrArray[i] =
+                                         (u8)min((u16)pPwrArray[i],
+                                                 minCtlPower);
+                               break;
+                       case CTL_5GHT20:
+                       case CTL_2GHT20:
+                               for (i = ALL_TARGET_HT20_0_8_16;
+                                    i <= ALL_TARGET_HT20_21; i++)
+                                       pPwrArray[i] =
+                                         (u8)min((u16)pPwrArray[i],
+                                                 minCtlPower);
+                               pPwrArray[ALL_TARGET_HT20_22] =
+                                 (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22],
+                                         minCtlPower);
+                               pPwrArray[ALL_TARGET_HT20_23] =
+                                 (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23],
+                                          minCtlPower);
+                               break;
+                       case CTL_5GHT40:
+                       case CTL_2GHT40:
+                               for (i = ALL_TARGET_HT40_0_8_16;
+                                    i <= ALL_TARGET_HT40_23; i++)
+                                       pPwrArray[i] =
+                                         (u8)min((u16)pPwrArray[i],
+                                                 minCtlPower);
+                               break;
+                       default:
+                           break;
+                       }
+       } /* end ctl mode checking */
+}
+
 static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
                                        struct ath9k_channel *chan, u16 cfgCtl,
                                        u8 twiceAntennaReduction,
                                        u8 twiceMaxRegulatoryPower,
                                        u8 powerLimit)
 {
-       ah->txpower_limit = powerLimit;
-       ar9003_hw_set_target_power_eeprom(ah, chan->channel);
+       struct ath_common *common = ath9k_hw_common(ah);
+       u8 targetPowerValT2[ar9300RateSize];
+       unsigned int i = 0;
+
+       ar9003_hw_set_target_power_eeprom(ah, chan->channel, targetPowerValT2);
+       ar9003_hw_set_power_per_rate_table(ah, chan,
+                                          targetPowerValT2, cfgCtl,
+                                          twiceAntennaReduction,
+                                          twiceMaxRegulatoryPower,
+                                          powerLimit);
+
+       while (i < ar9300RateSize) {
+               ath_print(common, ATH_DBG_EEPROM,
+                         "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+               i++;
+               ath_print(common, ATH_DBG_EEPROM,
+                         "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+               i++;
+               ath_print(common, ATH_DBG_EEPROM,
+                         "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+               i++;
+               ath_print(common, ATH_DBG_EEPROM,
+                         "TPC[%02d] 0x%08x\n\n", i, targetPowerValT2[i]);
+               i++;
+       }
+
+       /* Write target power array to registers */
+       ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
+
+       /*
+        * This is the TX power we send back to driver core,
+        * and it can use to pass to userspace to display our
+        * currently configured TX power setting.
+        *
+        * Since power is rate dependent, use one of the indices
+        * from the AR9300_Rates enum to select an entry from
+        * targetPowerValT2[] to report. Currently returns the
+        * power for HT40 MCS 0, HT20 MCS 0, or OFDM 6 Mbps
+        * as CCK power is less interesting (?).
+        */
+       i = ALL_TARGET_LEGACY_6_24; /* legacy */
+       if (IS_CHAN_HT40(chan))
+               i = ALL_TARGET_HT40_0_8_16; /* ht40 */
+       else if (IS_CHAN_HT20(chan))
+               i = ALL_TARGET_HT20_0_8_16; /* ht20 */
+
+       ah->txpower_limit = targetPowerValT2[i];
+
        ar9003_hw_calibration_apply(ah, chan->channel);
 }
 
index 49e0c865ce5c8da2ffb12a0beb9c31cf0d1616e0..7c38229ba670a28a624ab521a02820eae581c10a 100644 (file)
@@ -577,10 +577,11 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
 }
 
 void ar9003_paprd_populate_single_table(struct ath_hw *ah,
-                                       struct ath9k_channel *chan, int chain)
+                                       struct ath9k_hw_cal_data *caldata,
+                                       int chain)
 {
-       u32 *paprd_table_val = chan->pa_table[chain];
-       u32 small_signal_gain = chan->small_signal_gain[chain];
+       u32 *paprd_table_val = caldata->pa_table[chain];
+       u32 small_signal_gain = caldata->small_signal_gain[chain];
        u32 training_power;
        u32 reg = 0;
        int i;
@@ -654,17 +655,17 @@ int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
 }
 EXPORT_SYMBOL(ar9003_paprd_setup_gain_table);
 
-int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan,
-                             int chain)
+int ar9003_paprd_create_curve(struct ath_hw *ah,
+                             struct ath9k_hw_cal_data *caldata, int chain)
 {
-       u16 *small_signal_gain = &chan->small_signal_gain[chain];
-       u32 *pa_table = chan->pa_table[chain];
+       u16 *small_signal_gain = &caldata->small_signal_gain[chain];
+       u32 *pa_table = caldata->pa_table[chain];
        u32 *data_L, *data_U;
        int i, status = 0;
        u32 *buf;
        u32 reg;
 
-       memset(chan->pa_table[chain], 0, sizeof(chan->pa_table[chain]));
+       memset(caldata->pa_table[chain], 0, sizeof(caldata->pa_table[chain]));
 
        buf = kmalloc(2 * 48 * sizeof(u32), GFP_ATOMIC);
        if (!buf)
index a753a431bb13f186b7587cb0b67c10e76699a7d2..a491854fa38aa7173c5b976c54da6551f4f96aa9 100644 (file)
@@ -542,7 +542,11 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah,
                u32 reg = INI_RA(iniArr, i, 0);
                u32 val = INI_RA(iniArr, i, column);
 
-               REG_WRITE(ah, reg, val);
+               if (reg >= 0x16000 && reg < 0x17000)
+                       ath9k_hw_analog_shift_regwrite(ah, reg, val);
+               else
+                       REG_WRITE(ah, reg, val);
+
                DO_DELAY(regWrites);
        }
 }
index 998ae2c49ed27abb8cbe8a1fb8fe8b37adc0468c..07f26ee7a7235ac0f3c48c7966630ce849217cfc 100644 (file)
@@ -510,7 +510,7 @@ void ath_deinit_leds(struct ath_softc *sc);
 #define SC_OP_BEACONS                BIT(1)
 #define SC_OP_RXAGGR                 BIT(2)
 #define SC_OP_TXAGGR                 BIT(3)
-#define SC_OP_FULL_RESET             BIT(4)
+#define SC_OP_OFFCHANNEL             BIT(4)
 #define SC_OP_PREAMBLE_SHORT         BIT(5)
 #define SC_OP_PROTECT_ENABLE         BIT(6)
 #define SC_OP_RXFLUSH                BIT(7)
@@ -609,6 +609,7 @@ struct ath_softc {
 struct ath_wiphy {
        struct ath_softc *sc; /* shared for all virtual wiphys */
        struct ieee80211_hw *hw;
+       struct ath9k_hw_cal_data caldata;
        enum ath_wiphy_state {
                ATH_WIPHY_INACTIVE,
                ATH_WIPHY_ACTIVE,
index 139289e4e933dfba2345d74add499174274b0571..45208690c0ec3d5cf747d0ac18218ddeedaf20f8 100644 (file)
 /* We can tune this as we go by monitoring really low values */
 #define ATH9K_NF_TOO_LOW       -60
 
-/* AR5416 may return very high value (like -31 dBm), in those cases the nf
- * is incorrect and we should use the static NF value. Later we can try to
- * find out why they are reporting these values */
-
-static bool ath9k_hw_nf_in_range(struct ath_hw *ah, s16 nf)
-{
-       if (nf > ATH9K_NF_TOO_LOW) {
-               ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
-                         "noise floor value detected (%d) is "
-                         "lower than what we think is a "
-                         "reasonable value (%d)\n",
-                         nf, ATH9K_NF_TOO_LOW);
-               return false;
-       }
-       return true;
-}
-
 static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
 {
        int16_t nfval;
@@ -121,6 +104,19 @@ void ath9k_hw_reset_calibration(struct ath_hw *ah,
        ah->cal_samples = 0;
 }
 
+static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
+                                  struct ath9k_channel *chan)
+{
+       struct ath_nf_limits *limit;
+
+       if (!chan || IS_CHAN_2GHZ(chan))
+               limit = &ah->nf_2g;
+       else
+               limit = &ah->nf_5g;
+
+       return limit->nominal;
+}
+
 /* This is done for the currently configured channel */
 bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
 {
@@ -128,7 +124,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
        struct ieee80211_conf *conf = &common->hw->conf;
        struct ath9k_cal_list *currCal = ah->cal_list_curr;
 
-       if (!ah->curchan)
+       if (!ah->caldata)
                return true;
 
        if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah))
@@ -151,37 +147,55 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
                  "Resetting Cal %d state for channel %u\n",
                  currCal->calData->calType, conf->channel->center_freq);
 
-       ah->curchan->CalValid &= ~currCal->calData->calType;
+       ah->caldata->CalValid &= ~currCal->calData->calType;
        currCal->calState = CAL_WAITING;
 
        return false;
 }
 EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
 
-void ath9k_hw_start_nfcal(struct ath_hw *ah)
+void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
 {
+       if (ah->caldata)
+               ah->caldata->nfcal_pending = true;
+
        REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
                    AR_PHY_AGC_CONTROL_ENABLE_NF);
-       REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+
+       if (update)
+               REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+                   AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+       else
+               REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
                    AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+
        REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
 }
 
 void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
 {
-       struct ath9k_nfcal_hist *h;
+       struct ath9k_nfcal_hist *h = NULL;
        unsigned i, j;
        int32_t val;
        u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
        struct ath_common *common = ath9k_hw_common(ah);
+       s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
 
-       h = ah->nfCalHist;
+       if (ah->caldata)
+               h = ah->caldata->nfCalHist;
 
        for (i = 0; i < NUM_NF_READINGS; i++) {
                if (chainmask & (1 << i)) {
+                       s16 nfval;
+
+                       if (h)
+                               nfval = h[i].privNF;
+                       else
+                               nfval = default_nf;
+
                        val = REG_READ(ah, ah->nf_regs[i]);
                        val &= 0xFFFFFE00;
-                       val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
+                       val |= (((u32) nfval << 1) & 0x1ff);
                        REG_WRITE(ah, ah->nf_regs[i], val);
                }
        }
@@ -277,22 +291,25 @@ static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
        }
 }
 
-int16_t ath9k_hw_getnf(struct ath_hw *ah,
-                      struct ath9k_channel *chan)
+bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
 {
        struct ath_common *common = ath9k_hw_common(ah);
        int16_t nf, nfThresh;
        int16_t nfarray[NUM_NF_READINGS] = { 0 };
        struct ath9k_nfcal_hist *h;
        struct ieee80211_channel *c = chan->chan;
+       struct ath9k_hw_cal_data *caldata = ah->caldata;
+
+       if (!caldata)
+               return false;
 
        chan->channelFlags &= (~CHANNEL_CW_INT);
        if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
                ath_print(common, ATH_DBG_CALIBRATE,
                          "NF did not complete in calibration window\n");
                nf = 0;
-               chan->rawNoiseFloor = nf;
-               return chan->rawNoiseFloor;
+               caldata->rawNoiseFloor = nf;
+               return false;
        } else {
                ath9k_hw_do_getnf(ah, nfarray);
                ath9k_hw_nf_sanitize(ah, nfarray);
@@ -307,47 +324,40 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
                }
        }
 
-       h = ah->nfCalHist;
-
+       h = caldata->nfCalHist;
+       caldata->nfcal_pending = false;
        ath9k_hw_update_nfcal_hist_buffer(h, nfarray);
-       chan->rawNoiseFloor = h[0].privNF;
-
-       return chan->rawNoiseFloor;
+       caldata->rawNoiseFloor = h[0].privNF;
+       return true;
 }
 
-void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah)
+void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
+                                 struct ath9k_channel *chan)
 {
-       struct ath_nf_limits *limit;
+       struct ath9k_nfcal_hist *h;
+       s16 default_nf;
        int i, j;
 
-       if (!ah->curchan || IS_CHAN_2GHZ(ah->curchan))
-               limit = &ah->nf_2g;
-       else
-               limit = &ah->nf_5g;
+       if (!ah->caldata)
+               return;
 
+       h = ah->caldata->nfCalHist;
+       default_nf = ath9k_hw_get_default_nf(ah, chan);
        for (i = 0; i < NUM_NF_READINGS; i++) {
-               ah->nfCalHist[i].currIndex = 0;
-               ah->nfCalHist[i].privNF = limit->nominal;
-               ah->nfCalHist[i].invalidNFcount =
-                       AR_PHY_CCA_FILTERWINDOW_LENGTH;
+               h[i].currIndex = 0;
+               h[i].privNF = default_nf;
+               h[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH;
                for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
-                       ah->nfCalHist[i].nfCalBuffer[j] = limit->nominal;
+                       h[i].nfCalBuffer[j] = default_nf;
                }
        }
 }
 
 s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
 {
-       s16 nf;
-
-       if (chan->rawNoiseFloor == 0)
-               nf = -96;
-       else
-               nf = chan->rawNoiseFloor;
-
-       if (!ath9k_hw_nf_in_range(ah, nf))
-               nf = ATH_DEFAULT_NOISE_FLOOR;
+       if (!ah->caldata || !ah->caldata->rawNoiseFloor)
+               return ath9k_hw_get_default_nf(ah, chan);
 
-       return nf;
+       return ah->caldata->rawNoiseFloor;
 }
 EXPORT_SYMBOL(ath9k_hw_getchan_noise);
index cd60d09cdda7646ad7c9db67a9aa3cb293649dae..0a304b3eeeb6dad1e414ee362888900a9d811e24 100644 (file)
@@ -108,11 +108,11 @@ struct ath9k_pacal_info{
 };
 
 bool ath9k_hw_reset_calvalid(struct ath_hw *ah);
-void ath9k_hw_start_nfcal(struct ath_hw *ah);
+void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update);
 void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
-int16_t ath9k_hw_getnf(struct ath_hw *ah,
-                      struct ath9k_channel *chan);
-void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah);
+bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan);
+void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
+                                 struct ath9k_channel *chan);
 s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
 void ath9k_hw_reset_calibration(struct ath_hw *ah,
                                struct ath9k_cal_list *currCal);
index 3756400e6bf95658903a1675cd85efd65d4222c4..43b9e21bc56284da1246ec1020cd23907a11d0df 100644 (file)
@@ -353,6 +353,8 @@ struct ath9k_htc_priv {
        u16 seq_no;
        u32 bmiss_cnt;
 
+       struct ath9k_hw_cal_data caldata[38];
+
        spinlock_t beacon_lock;
 
        bool tx_queues_stop;
index cf9bcc67ade2ea0f8c8a34b1d605c5e91cda2772..ebed9d1691a5110ee3222ab12e652da4dcdde9d5 100644 (file)
@@ -125,6 +125,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
        struct ieee80211_conf *conf = &common->hw->conf;
        bool fastcc = true;
        struct ieee80211_channel *channel = hw->conf.channel;
+       struct ath9k_hw_cal_data *caldata;
        enum htc_phymode mode;
        __be16 htc_mode;
        u8 cmd_rsp;
@@ -149,7 +150,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
                  priv->ah->curchan->channel,
                  channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf));
 
-       ret = ath9k_hw_reset(ah, hchan, fastcc);
+       caldata = &priv->caldata[channel->hw_value];
+       ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
        if (ret) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to reset channel (%u Mhz) "
@@ -1028,7 +1030,7 @@ static void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
                ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
 
        /* Reset the HW */
-       ret = ath9k_hw_reset(ah, ah->curchan, false);
+       ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        if (ret) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to reset hardware; reset status %d "
@@ -1091,7 +1093,7 @@ static void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
                ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
 
        /* Reset the HW */
-       ret = ath9k_hw_reset(ah, ah->curchan, false);
+       ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        if (ret) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to reset hardware; reset status %d "
@@ -1179,7 +1181,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
        ath9k_hw_configpcipowersave(ah, 0, 0);
 
        ath9k_hw_htc_resetinit(ah);
-       ret = ath9k_hw_reset(ah, init_channel, false);
+       ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
        if (ret) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to reset hardware; reset status %d "
index 8d291ccf5c88fcc923af7bceab1ee18eb681741c..3384ca1645622e4d3307d957ff193198f6b7e888 100644 (file)
@@ -610,7 +610,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
        else
                ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
 
-       ath9k_init_nfcal_hist_buffer(ah);
        ah->bb_watchdog_timeout_ms = 25;
 
        common->state = ATH_HW_INITIALIZED;
@@ -1183,9 +1182,6 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
 
        ath9k_hw_spur_mitigate_freq(ah, chan);
 
-       if (!chan->oneTimeCalsDone)
-               chan->oneTimeCalsDone = true;
-
        return true;
 }
 
@@ -1218,7 +1214,7 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
 EXPORT_SYMBOL(ath9k_hw_check_alive);
 
 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
-                   bool bChannelChange)
+                  struct ath9k_hw_cal_data *caldata, bool bChannelChange)
 {
        struct ath_common *common = ath9k_hw_common(ah);
        u32 saveLedState;
@@ -1243,9 +1239,19 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
                return -EIO;
 
-       if (curchan && !ah->chip_fullsleep)
+       if (curchan && !ah->chip_fullsleep && ah->caldata)
                ath9k_hw_getnf(ah, curchan);
 
+       ah->caldata = caldata;
+       if (caldata &&
+           (chan->channel != caldata->channel ||
+            (chan->channelFlags & ~CHANNEL_CW_INT) !=
+            (caldata->channelFlags & ~CHANNEL_CW_INT))) {
+               /* Operating channel changed, reset channel calibration data */
+               memset(caldata, 0, sizeof(*caldata));
+               ath9k_init_nfcal_hist_buffer(ah, chan);
+       }
+
        if (bChannelChange &&
            (ah->chip_fullsleep != true) &&
            (ah->curchan != NULL) &&
@@ -1256,7 +1262,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 
                if (ath9k_hw_channel_change(ah, chan)) {
                        ath9k_hw_loadnf(ah, ah->curchan);
-                       ath9k_hw_start_nfcal(ah);
+                       ath9k_hw_start_nfcal(ah, true);
                        return 0;
                }
        }
@@ -1461,11 +1467,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        if (ah->btcoex_hw.enabled)
                ath9k_hw_btcoex_enable(ah);
 
-       if (AR_SREV_9300_20_OR_LATER(ah)) {
-               ath9k_hw_loadnf(ah, curchan);
-               ath9k_hw_start_nfcal(ah);
+       if (AR_SREV_9300_20_OR_LATER(ah))
                ar9003_hw_bb_watchdog_config(ah);
-       }
 
        return 0;
 }
index 2d30efc0b94fd95587c493ad905512debb1cb14f..399f7c1283cdf32ba4cdb5eef198c4cd2ed085bd 100644 (file)
@@ -346,19 +346,25 @@ enum ath9k_int {
         CHANNEL_HT40PLUS |                     \
         CHANNEL_HT40MINUS)
 
-struct ath9k_channel {
-       struct ieee80211_channel *chan;
+struct ath9k_hw_cal_data {
        u16 channel;
        u32 channelFlags;
-       u32 chanmode;
        int32_t CalValid;
-       bool oneTimeCalsDone;
        int8_t iCoff;
        int8_t qCoff;
        int16_t rawNoiseFloor;
        bool paprd_done;
+       bool nfcal_pending;
        u16 small_signal_gain[AR9300_MAX_CHAINS];
        u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
+       struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
+};
+
+struct ath9k_channel {
+       struct ieee80211_channel *chan;
+       u16 channel;
+       u32 channelFlags;
+       u32 chanmode;
 };
 
 #define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
@@ -669,7 +675,7 @@ struct ath_hw {
        enum nl80211_iftype opmode;
        enum ath9k_power_mode power_mode;
 
-       struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
+       struct ath9k_hw_cal_data *caldata;
        struct ath9k_pacal_info pacal_info;
        struct ar5416Stats stats;
        struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
@@ -863,7 +869,7 @@ const char *ath9k_hw_probe(u16 vendorid, u16 devid);
 void ath9k_hw_deinit(struct ath_hw *ah);
 int ath9k_hw_init(struct ath_hw *ah);
 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
-                  bool bChannelChange);
+                  struct ath9k_hw_cal_data *caldata, bool bChannelChange);
 int ath9k_hw_fill_cap_info(struct ath_hw *ah);
 u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
 
@@ -958,9 +964,10 @@ void ar9003_hw_bb_watchdog_read(struct ath_hw *ah);
 void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah);
 void ar9003_paprd_enable(struct ath_hw *ah, bool val);
 void ar9003_paprd_populate_single_table(struct ath_hw *ah,
-                                       struct ath9k_channel *chan, int chain);
-int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_channel *chan,
-                             int chain);
+                                       struct ath9k_hw_cal_data *caldata,
+                                       int chain);
+int ar9003_paprd_create_curve(struct ath_hw *ah,
+                             struct ath9k_hw_cal_data *caldata, int chain);
 int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
 int ar9003_paprd_init_table(struct ath_hw *ah);
 bool ar9003_paprd_is_done(struct ath_hw *ah);
index 0429dda0961fcea24309d045a0af2655de053c25..3caa32316e7b4567c5cbb7372aef8330b8d2a747 100644 (file)
@@ -154,6 +154,27 @@ void ath9k_ps_restore(struct ath_softc *sc)
        spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
 }
 
+static void ath_start_ani(struct ath_common *common)
+{
+       struct ath_hw *ah = common->ah;
+       unsigned long timestamp = jiffies_to_msecs(jiffies);
+       struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+       if (!(sc->sc_flags & SC_OP_ANI_RUN))
+               return;
+
+       if (sc->sc_flags & SC_OP_OFFCHANNEL)
+               return;
+
+       common->ani.longcal_timer = timestamp;
+       common->ani.shortcal_timer = timestamp;
+       common->ani.checkani_timer = timestamp;
+
+       mod_timer(&common->ani.timer,
+                 jiffies +
+                       msecs_to_jiffies((u32)ah->config.ani_poll_interval));
+}
+
 /*
  * Set/change channels.  If the channel is really being changed, it's done
  * by reseting the chip.  To accomplish this we must first cleanup any pending
@@ -162,16 +183,23 @@ void ath9k_ps_restore(struct ath_softc *sc)
 int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
                    struct ath9k_channel *hchan)
 {
+       struct ath_wiphy *aphy = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_conf *conf = &common->hw->conf;
        bool fastcc = true, stopped;
        struct ieee80211_channel *channel = hw->conf.channel;
+       struct ath9k_hw_cal_data *caldata = NULL;
        int r;
 
        if (sc->sc_flags & SC_OP_INVALID)
                return -EIO;
 
+       del_timer_sync(&common->ani.timer);
+       cancel_work_sync(&sc->paprd_work);
+       cancel_work_sync(&sc->hw_check_work);
+       cancel_delayed_work_sync(&sc->tx_complete_work);
+
        ath9k_ps_wakeup(sc);
 
        /*
@@ -191,9 +219,12 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
         * to flush data frames already in queue because of
         * changing channel. */
 
-       if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
+       if (!stopped || !(sc->sc_flags & SC_OP_OFFCHANNEL))
                fastcc = false;
 
+       if (!(sc->sc_flags & SC_OP_OFFCHANNEL))
+               caldata = &aphy->caldata;
+
        ath_print(common, ATH_DBG_CONFIG,
                  "(%u MHz) -> (%u MHz), conf_is_ht40: %d\n",
                  sc->sc_ah->curchan->channel,
@@ -201,7 +232,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
 
        spin_lock_bh(&sc->sc_resetlock);
 
-       r = ath9k_hw_reset(ah, hchan, fastcc);
+       r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
        if (r) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to reset channel (%u MHz), "
@@ -212,8 +243,6 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
        }
        spin_unlock_bh(&sc->sc_resetlock);
 
-       sc->sc_flags &= ~SC_OP_FULL_RESET;
-
        if (ath_startrecv(sc) != 0) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to restart recv logic\n");
@@ -225,6 +254,12 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
        ath_update_txpow(sc);
        ath9k_hw_set_interrupts(ah, ah->imask);
 
+       if (!(sc->sc_flags & (SC_OP_OFFCHANNEL | SC_OP_SCANNING))) {
+               ath_start_ani(common);
+               ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+               ath_beacon_config(sc, NULL);
+       }
+
  ps_restore:
        ath9k_ps_restore(sc);
        return r;
@@ -233,17 +268,19 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
 static void ath_paprd_activate(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
+       struct ath9k_hw_cal_data *caldata = ah->caldata;
        int chain;
 
-       if (!ah->curchan->paprd_done)
+       if (!caldata || !caldata->paprd_done)
                return;
 
        ath9k_ps_wakeup(sc);
+       ar9003_paprd_enable(ah, false);
        for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
                if (!(ah->caps.tx_chainmask & BIT(chain)))
                        continue;
 
-               ar9003_paprd_populate_single_table(ah, ah->curchan, chain);
+               ar9003_paprd_populate_single_table(ah, caldata, chain);
        }
 
        ar9003_paprd_enable(ah, true);
@@ -261,6 +298,7 @@ void ath_paprd_calibrate(struct work_struct *work)
        int band = hw->conf.channel->band;
        struct ieee80211_supported_band *sband = &sc->sbands[band];
        struct ath_tx_control txctl;
+       struct ath9k_hw_cal_data *caldata = ah->caldata;
        int qnum, ftype;
        int chain_ok = 0;
        int chain;
@@ -268,6 +306,9 @@ void ath_paprd_calibrate(struct work_struct *work)
        int time_left;
        int i;
 
+       if (!caldata)
+               return;
+
        skb = alloc_skb(len, GFP_KERNEL);
        if (!skb)
                return;
@@ -322,7 +363,7 @@ void ath_paprd_calibrate(struct work_struct *work)
                if (!ar9003_paprd_is_done(ah))
                        break;
 
-               if (ar9003_paprd_create_curve(ah, ah->curchan, chain) != 0)
+               if (ar9003_paprd_create_curve(ah, caldata, chain) != 0)
                        break;
 
                chain_ok = 1;
@@ -330,7 +371,7 @@ void ath_paprd_calibrate(struct work_struct *work)
        kfree_skb(skb);
 
        if (chain_ok) {
-               ah->curchan->paprd_done = true;
+               caldata->paprd_done = true;
                ath_paprd_activate(sc);
        }
 
@@ -439,33 +480,14 @@ set_timer:
                cal_interval = min(cal_interval, (u32)short_cal_interval);
 
        mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
-       if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) &&
-           !(sc->sc_flags & SC_OP_SCANNING)) {
-               if (!sc->sc_ah->curchan->paprd_done)
+       if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
+               if (!ah->caldata->paprd_done)
                        ieee80211_queue_work(sc->hw, &sc->paprd_work);
                else
                        ath_paprd_activate(sc);
        }
 }
 
-static void ath_start_ani(struct ath_common *common)
-{
-       struct ath_hw *ah = common->ah;
-       unsigned long timestamp = jiffies_to_msecs(jiffies);
-       struct ath_softc *sc = (struct ath_softc *) common->priv;
-
-       if (!(sc->sc_flags & SC_OP_ANI_RUN))
-               return;
-
-       common->ani.longcal_timer = timestamp;
-       common->ani.shortcal_timer = timestamp;
-       common->ani.checkani_timer = timestamp;
-
-       mod_timer(&common->ani.timer,
-                 jiffies +
-                       msecs_to_jiffies((u32)ah->config.ani_poll_interval));
-}
-
 /*
  * Update tx/rx chainmask. For legacy association,
  * hard code chainmask to 1x1, for 11n association, use
@@ -477,7 +499,7 @@ void ath_update_chainmask(struct ath_softc *sc, int is_ht)
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
 
-       if ((sc->sc_flags & SC_OP_SCANNING) || is_ht ||
+       if ((sc->sc_flags & SC_OP_OFFCHANNEL) || is_ht ||
            (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE)) {
                common->tx_chainmask = ah->caps.tx_chainmask;
                common->rx_chainmask = ah->caps.rx_chainmask;
@@ -817,7 +839,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
                ah->curchan = ath_get_curchannel(sc, sc->hw);
 
        spin_lock_bh(&sc->sc_resetlock);
-       r = ath9k_hw_reset(ah, ah->curchan, false);
+       r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        if (r) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to reset channel (%u MHz), "
@@ -877,7 +899,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
                ah->curchan = ath_get_curchannel(sc, hw);
 
        spin_lock_bh(&sc->sc_resetlock);
-       r = ath9k_hw_reset(ah, ah->curchan, false);
+       r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        if (r) {
                ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
                          "Unable to reset channel (%u MHz), "
@@ -910,7 +932,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
        ath_flushrecv(sc);
 
        spin_lock_bh(&sc->sc_resetlock);
-       r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
+       r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
        if (r)
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to reset hardware; reset status %d\n", r);
@@ -1085,7 +1107,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
         * and then setup of the interrupt mask.
         */
        spin_lock_bh(&sc->sc_resetlock);
-       r = ath9k_hw_reset(ah, init_channel, false);
+       r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
        if (r) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to reset hardware; reset status %d "
@@ -1579,6 +1601,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
 
                aphy->chan_idx = pos;
                aphy->chan_is_ht = conf_is_ht(conf);
+               if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+                       sc->sc_flags |= SC_OP_OFFCHANNEL;
+               else
+                       sc->sc_flags &= ~SC_OP_OFFCHANNEL;
 
                if (aphy->state == ATH_WIPHY_SCAN ||
                    aphy->state == ATH_WIPHY_ACTIVE)
@@ -1990,7 +2016,6 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
 {
        struct ath_wiphy *aphy = hw->priv;
        struct ath_softc *sc = aphy->sc;
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
        mutex_lock(&sc->mutex);
        if (ath9k_wiphy_scanning(sc)) {
@@ -2008,10 +2033,6 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
        aphy->state = ATH_WIPHY_SCAN;
        ath9k_wiphy_pause_all_forced(sc, aphy);
        sc->sc_flags |= SC_OP_SCANNING;
-       del_timer_sync(&common->ani.timer);
-       cancel_work_sync(&sc->paprd_work);
-       cancel_work_sync(&sc->hw_check_work);
-       cancel_delayed_work_sync(&sc->tx_complete_work);
        mutex_unlock(&sc->mutex);
 }
 
@@ -2023,15 +2044,10 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
 {
        struct ath_wiphy *aphy = hw->priv;
        struct ath_softc *sc = aphy->sc;
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
        mutex_lock(&sc->mutex);
        aphy->state = ATH_WIPHY_ACTIVE;
        sc->sc_flags &= ~SC_OP_SCANNING;
-       sc->sc_flags |= SC_OP_FULL_RESET;
-       ath_start_ani(common);
-       ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
-       ath_beacon_config(sc, NULL);
        mutex_unlock(&sc->mutex);
 }
 
index da0cfe90c38acab5c5a7bbca9a5e74ed92b7c837..a3fc987ebab003bd5fda0545fa581863b71ba806 100644 (file)
@@ -1140,6 +1140,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                if (flush)
                        goto requeue;
 
+               retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
+                                                rxs, &decrypt_error);
+               if (retval)
+                       goto requeue;
+
                rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
                if (rs.rs_tstamp > tsf_lower &&
                    unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
@@ -1149,11 +1154,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                    unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
                        rxs->mactime += 0x100000000ULL;
 
-               retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
-                                                rxs, &decrypt_error);
-               if (retval)
-                       goto requeue;
-
                /* Ensure we always have an skb to requeue once we are done
                 * processing the current buffer's skb */
                requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
index 501b72821b4d426e5138b7f3b84e9a962d617361..4dda14e3622781babd1d7e296e395ed18bca135c 100644 (file)
@@ -120,26 +120,14 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
        list_add_tail(&ac->list, &txq->axq_acq);
 }
 
-static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
-{
-       struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
-
-       spin_lock_bh(&txq->axq_lock);
-       tid->paused++;
-       spin_unlock_bh(&txq->axq_lock);
-}
-
 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
        struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
 
-       BUG_ON(tid->paused <= 0);
-       spin_lock_bh(&txq->axq_lock);
-
-       tid->paused--;
+       WARN_ON(!tid->paused);
 
-       if (tid->paused > 0)
-               goto unlock;
+       spin_lock_bh(&txq->axq_lock);
+       tid->paused = false;
 
        if (list_empty(&tid->buf_q))
                goto unlock;
@@ -157,15 +145,10 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
        struct list_head bf_head;
        INIT_LIST_HEAD(&bf_head);
 
-       BUG_ON(tid->paused <= 0);
-       spin_lock_bh(&txq->axq_lock);
+       WARN_ON(!tid->paused);
 
-       tid->paused--;
-
-       if (tid->paused > 0) {
-               spin_unlock_bh(&txq->axq_lock);
-               return;
-       }
+       spin_lock_bh(&txq->axq_lock);
+       tid->paused = false;
 
        while (!list_empty(&tid->buf_q)) {
                bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
@@ -811,7 +794,7 @@ void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
        an = (struct ath_node *)sta->drv_priv;
        txtid = ATH_AN_2_TID(an, tid);
        txtid->state |= AGGR_ADDBA_PROGRESS;
-       ath_tx_pause_tid(sc, txtid);
+       txtid->paused = true;
        *ssn = txtid->seq_start;
 }
 
@@ -835,10 +818,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
                return;
        }
 
-       ath_tx_pause_tid(sc, txtid);
-
        /* drop all software retried frames and mark this TID */
        spin_lock_bh(&txq->axq_lock);
+       txtid->paused = true;
        while (!list_empty(&txtid->buf_q)) {
                bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
                if (!bf_isretried(bf)) {
@@ -1181,7 +1163,7 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
                          "Failed to stop TX DMA. Resetting hardware!\n");
 
                spin_lock_bh(&sc->sc_resetlock);
-               r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
+               r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
                if (r)
                        ath_print(common, ATH_DBG_FATAL,
                                  "Unable to reset hardware; reset status %d\n",
index 5bbff4c5a4895b51acc66351bfcf18b212eeea3d..a146240f7ddbf1d8f85f364ce3783d877789668c 100644 (file)
@@ -1924,6 +1924,10 @@ static int ipw2100_net_init(struct net_device *dev)
                bg_band->channels =
                        kzalloc(geo->bg_channels *
                                sizeof(struct ieee80211_channel), GFP_KERNEL);
+               if (!bg_band->channels) {
+                       ipw2100_down(priv);
+                       return -ENOMEM;
+               }
                /* translate geo->bg to bg_band.channels */
                for (i = 0; i < geo->bg_channels; i++) {
                        bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
index f052c6d09b374da05ca891f0e0a6b2ed8f78671f..d706b8afbe5abd7f54b570d1a414a7f9c3c9aac5 100644 (file)
@@ -980,7 +980,7 @@ ssize_t iwl_ucode_bt_stats_read(struct file *file,
                         le32_to_cpu(bt->lo_priority_tx_req_cnt),
                         accum_bt->lo_priority_tx_req_cnt);
        pos += scnprintf(buf + pos, bufsz - pos,
-                        "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
+                        "lo_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
                         le32_to_cpu(bt->lo_priority_tx_denied_cnt),
                         accum_bt->lo_priority_tx_denied_cnt);
        pos += scnprintf(buf + pos, bufsz - pos,
index 35c86d22b14bc67e33748854b02a80719315fb9e..23e5c42e7d7eb31798aa72f7e995f3537d98a86f 100644 (file)
@@ -300,8 +300,9 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
                                      struct ieee80211_sta *sta)
 {
        int ret = -EAGAIN;
+       u32 load = rs_tl_get_load(lq_data, tid);
 
-       if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
+       if (load > IWL_AGG_LOAD_THRESHOLD) {
                IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
                                sta->addr, tid);
                ret = ieee80211_start_tx_ba_session(sta, tid);
@@ -311,12 +312,14 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
                         * this might be cause by reloading firmware
                         * stop the tx ba session here
                         */
-                       IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
+                       IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
                                tid);
                        ieee80211_stop_tx_ba_session(sta, tid);
                }
-       } else
-               IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid);
+       } else {
+               IWL_ERR(priv, "Aggregation not enabled for tid %d "
+                       "because load = %u\n", tid, load);
+       }
        return ret;
 }
 
index 55a1b31fd09ae058bf805583d508715ab6a12d09..ecb953f28068808010eb1a1065612afe90899bf9 100644 (file)
@@ -1331,7 +1331,14 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
        tid = ba_resp->tid;
        agg = &priv->stations[sta_id].tid[tid].agg;
        if (unlikely(agg->txq_id != scd_flow)) {
-               IWL_ERR(priv, "BA scd_flow %d does not match txq_id %d\n",
+               /*
+                * FIXME: this is a uCode bug which need to be addressed,
+                * log the information and return for now!
+                * since it is possible happen very often and in order
+                * not to fill the syslog, don't enable the logging by default
+                */
+               IWL_DEBUG_TX_REPLY(priv,
+                       "BA scd_flow %d does not match txq_id %d\n",
                        scd_flow, agg->txq_id);
                return;
        }
index 8024d44ce4bb592551e72aaca266614b97d595ab..8ccb6d205b6d6219c5e23c38806113f8514ef3ac 100644 (file)
@@ -2000,6 +2000,7 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
                              struct ieee80211_vif *vif)
 {
        struct iwl_priv *priv = hw->priv;
+       bool scan_completed = false;
 
        IWL_DEBUG_MAC80211(priv, "enter\n");
 
@@ -2013,7 +2014,7 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
        if (priv->vif == vif) {
                priv->vif = NULL;
                if (priv->scan_vif == vif) {
-                       ieee80211_scan_completed(priv->hw, true);
+                       scan_completed = true;
                        priv->scan_vif = NULL;
                        priv->scan_request = NULL;
                }
@@ -2021,6 +2022,9 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
        }
        mutex_unlock(&priv->mutex);
 
+       if (scan_completed)
+               ieee80211_scan_completed(priv->hw, true);
+
        IWL_DEBUG_MAC80211(priv, "leave\n");
 
 }
index 5c2bcef5df0cdca8a328f2fc35b38b7c6aa71f2b..0b961a353ff6489143770b7e8b911d42b6c13891 100644 (file)
@@ -71,7 +71,7 @@ do {                                                                  \
 #define IWL_DEBUG(__priv, level, fmt, args...)
 #define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
-                                     void *p, u32 len)
+                                     const void *p, u32 len)
 {}
 #endif                         /* CONFIG_IWLWIFI_DEBUG */
 
index 822f8dc26e9c051d3b9ee34e79ef715b0f204067..71a101fb2e4ecad6639d9e878fcdac3e68224f34 100644 (file)
@@ -43,6 +43,8 @@ static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
        { PCI_DEVICE(0x1260, 0x3886) },
        /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
        { PCI_DEVICE(0x1260, 0xffff) },
+       /* Standard Microsystems Corp SMC2802W Wireless PCI */
+       { PCI_DEVICE(0x10b8, 0x2802) },
        { },
 };
 
index abff8934db13a1e02f62e56b7ff0561c42891be5..9c38fc331dca7966a6d643d37aaef20079cdd8b6 100644 (file)
@@ -97,7 +97,6 @@ static iw_stats *ray_get_wireless_stats(struct net_device *dev);
 static const struct iw_handler_def ray_handler_def;
 
 /***** Prototypes for raylink functions **************************************/
-static int asc_to_int(char a);
 static void authenticate(ray_dev_t *local);
 static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type);
 static void authenticate_timeout(u_long);
@@ -1716,24 +1715,6 @@ static void authenticate_timeout(u_long data)
        join_net((u_long) local);
 }
 
-/*===========================================================================*/
-static int asc_to_int(char a)
-{
-       if (a < '0')
-               return -1;
-       if (a <= '9')
-               return (a - '0');
-       if (a < 'A')
-               return -1;
-       if (a <= 'F')
-               return (10 + a - 'A');
-       if (a < 'a')
-               return -1;
-       if (a <= 'f')
-               return (10 + a - 'a');
-       return -1;
-}
-
 /*===========================================================================*/
 static int parse_addr(char *in_str, UCHAR *out)
 {
@@ -1754,14 +1735,14 @@ static int parse_addr(char *in_str, UCHAR *out)
        i = 5;
 
        while (j > 0) {
-               if ((k = asc_to_int(in_str[j--])) != -1)
+               if ((k = hex_to_bin(in_str[j--])) != -1)
                        out[i] = k;
                else
                        return 0;
 
                if (j == 0)
                        break;
-               if ((k = asc_to_int(in_str[j--])) != -1)
+               if ((k = hex_to_bin(in_str[j--])) != -1)
                        out[i] += k << 4;
                else
                        return 0;
index 19b262e1ddbe284beb0845934444f071afcba531..63c2cc408e154705967677b287e0b4da85a5e727 100644 (file)
@@ -240,16 +240,16 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
        struct rt2x00_dev *rt2x00dev;
        int retval;
 
-       retval = pci_request_regions(pci_dev, pci_name(pci_dev));
+       retval = pci_enable_device(pci_dev);
        if (retval) {
-               ERROR_PROBE("PCI request regions failed.\n");
+               ERROR_PROBE("Enable device failed.\n");
                return retval;
        }
 
-       retval = pci_enable_device(pci_dev);
+       retval = pci_request_regions(pci_dev, pci_name(pci_dev));
        if (retval) {
-               ERROR_PROBE("Enable device failed.\n");
-               goto exit_release_regions;
+               ERROR_PROBE("PCI request regions failed.\n");
+               goto exit_disable_device;
        }
 
        pci_set_master(pci_dev);
@@ -260,14 +260,14 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
        if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
                ERROR_PROBE("PCI DMA not supported.\n");
                retval = -EIO;
-               goto exit_disable_device;
+               goto exit_release_regions;
        }
 
        hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
        if (!hw) {
                ERROR_PROBE("Failed to allocate hardware.\n");
                retval = -ENOMEM;
-               goto exit_disable_device;
+               goto exit_release_regions;
        }
 
        pci_set_drvdata(pci_dev, hw);
@@ -300,13 +300,12 @@ exit_free_reg:
 exit_free_device:
        ieee80211_free_hw(hw);
 
-exit_disable_device:
-       if (retval != -EBUSY)
-               pci_disable_device(pci_dev);
-
 exit_release_regions:
        pci_release_regions(pci_dev);
 
+exit_disable_device:
+       pci_disable_device(pci_dev);
+
        pci_set_drvdata(pci_dev, NULL);
 
        return retval;
index 96d25fb50495eac40cc160619ab7676d1a933091..4cb99c541e2abb528f63492a3c3736691242be04 100644 (file)
@@ -160,9 +160,8 @@ static void wl1271_spi_init(struct wl1271 *wl)
        spi_message_add_tail(&t, &m);
 
        spi_sync(wl_to_spi(wl), &m);
-       kfree(cmd);
-
        wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+       kfree(cmd);
 }
 
 #define WL1271_BUSY_WORD_TIMEOUT 1000
index 147bb1a69aba6bb42ac76cb4ccb935fa9645ef19..a75ed3083a6ae266d66b9630140b42db8d07266e 100644 (file)
@@ -295,7 +295,7 @@ claw_driver_group_store(struct device_driver *ddrv, const char *buf,
        int err;
        err = ccwgroup_create_from_string(claw_root_dev,
                                          claw_group_driver.driver_id,
-                                         &claw_ccw_driver, 3, buf);
+                                         &claw_ccw_driver, 2, buf);
        return err ? err : count;
 }
 
index d79892782a2b2cf1ef5b9a25eb281d87c6cbff47..d1257768be90053f1a6d93c4c048b2de0bb89bd4 100644 (file)
@@ -188,8 +188,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
                qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
 
 #define QETH_IDX_FUNC_LEVEL_OSD                 0x0101
-#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108
-#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108
+#define QETH_IDX_FUNC_LEVEL_IQD                 0x4108
 
 #define QETH_MODELLIST_ARRAY \
        {{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \
@@ -741,6 +740,7 @@ struct qeth_card {
        struct qeth_qdio_info qdio;
        struct qeth_perf_stats perf_stats;
        int use_hard_stop;
+       int read_or_write_problem;
        struct qeth_osn_info osn_info;
        struct qeth_discipline discipline;
        atomic_t force_alloc_skb;
@@ -748,6 +748,7 @@ struct qeth_card {
        struct qdio_ssqd_desc ssqd;
        debug_info_t *debug;
        struct mutex conf_mutex;
+       struct mutex discipline_mutex;
 };
 
 struct qeth_card_list_struct {
index b7019066c303af489d9cd4cfe52dcb50976b9dd4..3a5a18a0fc283de5a987365e80dead90a9b6a9ea 100644 (file)
@@ -262,6 +262,7 @@ static int qeth_issue_next_read(struct qeth_card *card)
                QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
                        "rc=%i\n", dev_name(&card->gdev->dev), rc);
                atomic_set(&card->read.irq_pending, 0);
+               card->read_or_write_problem = 1;
                qeth_schedule_recovery(card);
                wake_up(&card->wait_q);
        }
@@ -382,6 +383,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
                qeth_put_reply(reply);
        }
        spin_unlock_irqrestore(&card->lock, flags);
+       atomic_set(&card->write.irq_pending, 0);
 }
 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
 
@@ -1076,6 +1078,7 @@ static int qeth_setup_card(struct qeth_card *card)
        card->state = CARD_STATE_DOWN;
        card->lan_online = 0;
        card->use_hard_stop = 0;
+       card->read_or_write_problem = 0;
        card->dev = NULL;
        spin_lock_init(&card->vlanlock);
        spin_lock_init(&card->mclock);
@@ -1084,6 +1087,7 @@ static int qeth_setup_card(struct qeth_card *card)
        spin_lock_init(&card->ip_lock);
        spin_lock_init(&card->thread_mask_lock);
        mutex_init(&card->conf_mutex);
+       mutex_init(&card->discipline_mutex);
        card->thread_start_mask = 0;
        card->thread_allowed_mask = 0;
        card->thread_running_mask = 0;
@@ -1383,12 +1387,7 @@ static void qeth_init_func_level(struct qeth_card *card)
 {
        switch (card->info.type) {
        case QETH_CARD_TYPE_IQD:
-               if (card->ipato.enabled)
-                       card->info.func_level =
-                               QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
-               else
-                       card->info.func_level =
-                               QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
+               card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
                break;
        case QETH_CARD_TYPE_OSD:
        case QETH_CARD_TYPE_OSN:
@@ -1662,6 +1661,10 @@ int qeth_send_control_data(struct qeth_card *card, int len,
 
        QETH_CARD_TEXT(card, 2, "sendctl");
 
+       if (card->read_or_write_problem) {
+               qeth_release_buffer(iob->channel, iob);
+               return -EIO;
+       }
        reply = qeth_alloc_reply(card);
        if (!reply) {
                return -ENOMEM;
@@ -1733,6 +1736,9 @@ time_err:
        spin_unlock_irqrestore(&reply->card->lock, flags);
        reply->rc = -ETIME;
        atomic_inc(&reply->received);
+       atomic_set(&card->write.irq_pending, 0);
+       qeth_release_buffer(iob->channel, iob);
+       card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
        wake_up(&reply->wait_q);
        rc = reply->rc;
        qeth_put_reply(reply);
@@ -1990,7 +1996,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
                QETH_DBF_TEXT(SETUP, 2, "olmlimit");
                dev_err(&card->gdev->dev, "A connection could not be "
                        "established because of an OLM limit\n");
-               rc = -EMLINK;
+               iob->rc = -EMLINK;
        }
        QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
        return rc;
@@ -2489,6 +2495,10 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
        qeth_prepare_ipa_cmd(card, iob, prot_type);
        rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
                                                iob, reply_cb, reply_param);
+       if (rc == -ETIME) {
+               qeth_clear_ipacmd_list(card);
+               qeth_schedule_recovery(card);
+       }
        return rc;
 }
 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
@@ -3413,7 +3423,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
 {
        struct qeth_ipa_cmd *cmd;
        struct qeth_set_access_ctrl *access_ctrl_req;
-       int rc;
 
        QETH_CARD_TEXT(card, 4, "setaccb");
 
@@ -3440,7 +3449,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
                        card->gdev->dev.kobj.name,
                        access_ctrl_req->subcmd_code,
                        cmd->data.setadapterparms.hdr.return_code);
-               rc = 0;
                break;
        }
        case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
@@ -3454,7 +3462,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
 
                /* ensure isolation mode is "none" */
                card->options.isolation = ISOLATION_MODE_NONE;
-               rc = -EOPNOTSUPP;
                break;
        }
        case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
@@ -3469,7 +3476,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
 
                /* ensure isolation mode is "none" */
                card->options.isolation = ISOLATION_MODE_NONE;
-               rc = -EOPNOTSUPP;
                break;
        }
        case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
@@ -3483,7 +3489,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
 
                /* ensure isolation mode is "none" */
                card->options.isolation = ISOLATION_MODE_NONE;
-               rc = -EPERM;
                break;
        }
        default:
@@ -3497,12 +3502,11 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
 
                /* ensure isolation mode is "none" */
                card->options.isolation = ISOLATION_MODE_NONE;
-               rc = 0;
                break;
        }
        }
        qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
-       return rc;
+       return 0;
 }
 
 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
@@ -3744,15 +3748,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
        /* skip 4 bytes (data_len struct member) to get req_len */
        if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
                return -EFAULT;
-       ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL);
-       if (!ureq) {
+       ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
+       if (IS_ERR(ureq)) {
                QETH_CARD_TEXT(card, 2, "snmpnome");
-               return -ENOMEM;
-       }
-       if (copy_from_user(ureq, udata,
-                       req_len + sizeof(struct qeth_snmp_ureq_hdr))) {
-               kfree(ureq);
-               return -EFAULT;
+               return PTR_ERR(ureq);
        }
        qinfo.udata_len = ureq->hdr.data_len;
        qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
@@ -3971,6 +3970,7 @@ retriable:
                else
                        goto retry;
        }
+       card->read_or_write_problem = 0;
        rc = qeth_mpc_initialize(card);
        if (rc) {
                QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
@@ -4353,16 +4353,18 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
 
        QETH_DBF_TEXT(SETUP, 2, "removedv");
-       if (card->discipline.ccwgdriver) {
-               card->discipline.ccwgdriver->remove(gdev);
-               qeth_core_free_discipline(card);
-       }
 
        if (card->info.type == QETH_CARD_TYPE_OSN) {
                qeth_core_remove_osn_attributes(&gdev->dev);
        } else {
                qeth_core_remove_device_attributes(&gdev->dev);
        }
+
+       if (card->discipline.ccwgdriver) {
+               card->discipline.ccwgdriver->remove(gdev);
+               qeth_core_free_discipline(card);
+       }
+
        debug_unregister(card->debug);
        write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
        list_del(&card->list);
index 2eb022ff2610f725bad556a6d67c47371dae13d8..42fa783a70c83b9c65bf0b91472e1f05e220b80f 100644 (file)
@@ -411,7 +411,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
        if (!card)
                return -EINVAL;
 
-       mutex_lock(&card->conf_mutex);
+       mutex_lock(&card->discipline_mutex);
        if (card->state != CARD_STATE_DOWN) {
                rc = -EPERM;
                goto out;
@@ -433,6 +433,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
        if (card->options.layer2 == newdis)
                goto out;
        else {
+               card->info.mac_bits  = 0;
                if (card->discipline.ccwgdriver) {
                        card->discipline.ccwgdriver->remove(card->gdev);
                        qeth_core_free_discipline(card);
@@ -445,7 +446,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
 
        rc = card->discipline.ccwgdriver->probe(card->gdev);
 out:
-       mutex_unlock(&card->conf_mutex);
+       mutex_unlock(&card->discipline_mutex);
        return rc ? rc : count;
 }
 
index 32d07c2dcc67b6c64e64e44cbff201185591710b..830d63524d612ff8bc665f918d6b01400e5c0c88 100644 (file)
@@ -860,8 +860,6 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
                unregister_netdev(card->dev);
                card->dev = NULL;
        }
-
-       qeth_l2_del_all_mc(card);
        return;
 }
 
@@ -935,6 +933,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        enum qeth_card_states recover_flag;
 
        BUG_ON(!card);
+       mutex_lock(&card->discipline_mutex);
        mutex_lock(&card->conf_mutex);
        QETH_DBF_TEXT(SETUP, 2, "setonlin");
        QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
@@ -1012,6 +1011,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
 out:
        mutex_unlock(&card->conf_mutex);
+       mutex_unlock(&card->discipline_mutex);
        return 0;
 
 out_remove:
@@ -1025,6 +1025,7 @@ out_remove:
        else
                card->state = CARD_STATE_DOWN;
        mutex_unlock(&card->conf_mutex);
+       mutex_unlock(&card->discipline_mutex);
        return rc;
 }
 
@@ -1040,6 +1041,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
        int rc = 0, rc2 = 0, rc3 = 0;
        enum qeth_card_states recover_flag;
 
+       mutex_lock(&card->discipline_mutex);
        mutex_lock(&card->conf_mutex);
        QETH_DBF_TEXT(SETUP, 3, "setoffl");
        QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
@@ -1060,6 +1062,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
        /* let user_space know that device is offline */
        kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
        mutex_unlock(&card->conf_mutex);
+       mutex_unlock(&card->discipline_mutex);
        return 0;
 }
 
index 8447d233d0b33e97ef9e82511cecd1e08119a02f..e705b27ec7dc133c7920f6a50d7a71a2a2204ca2 100644 (file)
@@ -64,5 +64,6 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
                        const u8 *);
 int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types);
 int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types);
+int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
 
 #endif /* __QETH_L3_H__ */
index 61d348e51920a584add110bc23f39fee954836b6..e22ae248f613eea825eaf8fe48f02bbd078f9818 100644 (file)
@@ -195,7 +195,7 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
        }
 }
 
-static int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
+int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
                                                struct qeth_ipaddr *addr)
 {
        struct qeth_ipato_entry *ipatoe;
@@ -3354,6 +3354,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
 {
        struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
 
+       qeth_l3_remove_device_attributes(&cgdev->dev);
+
        qeth_set_allowed_threads(card, 0, 1);
        wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
 
@@ -3367,7 +3369,6 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
                card->dev = NULL;
        }
 
-       qeth_l3_remove_device_attributes(&cgdev->dev);
        qeth_l3_clear_ip_list(card, 0, 0);
        qeth_l3_clear_ipato_list(card);
        return;
@@ -3380,6 +3381,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        enum qeth_card_states recover_flag;
 
        BUG_ON(!card);
+       mutex_lock(&card->discipline_mutex);
        mutex_lock(&card->conf_mutex);
        QETH_DBF_TEXT(SETUP, 2, "setonlin");
        QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
@@ -3461,6 +3463,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
 out:
        mutex_unlock(&card->conf_mutex);
+       mutex_unlock(&card->discipline_mutex);
        return 0;
 out_remove:
        card->use_hard_stop = 1;
@@ -3473,6 +3476,7 @@ out_remove:
        else
                card->state = CARD_STATE_DOWN;
        mutex_unlock(&card->conf_mutex);
+       mutex_unlock(&card->discipline_mutex);
        return rc;
 }
 
@@ -3488,6 +3492,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
        int rc = 0, rc2 = 0, rc3 = 0;
        enum qeth_card_states recover_flag;
 
+       mutex_lock(&card->discipline_mutex);
        mutex_lock(&card->conf_mutex);
        QETH_DBF_TEXT(SETUP, 3, "setoffl");
        QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
@@ -3508,6 +3513,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
        /* let user_space know that device is offline */
        kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
        mutex_unlock(&card->conf_mutex);
+       mutex_unlock(&card->discipline_mutex);
        return 0;
 }
 
index fb5318b30e99bfe849602fa127bbf1a1ffe83336..67cfa68dcf1b9781f45314521f357380a5a6568c 100644 (file)
@@ -479,6 +479,7 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
+       struct qeth_ipaddr *tmpipa, *t;
        char *tmp;
        int rc = 0;
 
@@ -497,8 +498,21 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
                card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
        } else if (!strcmp(tmp, "1")) {
                card->ipato.enabled = 1;
+               list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
+                       if ((tmpipa->type == QETH_IP_TYPE_NORMAL) &&
+                               qeth_l3_is_addr_covered_by_ipato(card, tmpipa))
+                               tmpipa->set_flags |=
+                                       QETH_IPA_SETIP_TAKEOVER_FLAG;
+               }
+
        } else if (!strcmp(tmp, "0")) {
                card->ipato.enabled = 0;
+               list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
+                       if (tmpipa->set_flags &
+                               QETH_IPA_SETIP_TAKEOVER_FLAG)
+                               tmpipa->set_flags &=
+                                       ~QETH_IPA_SETIP_TAKEOVER_FLAG;
+               }
        } else
                rc = -EINVAL;
 out:
index 7a104e2de3fa77d189bf9ae010cda3f0ee0dd706..f13e56babe4b53b7e3689e39c147535ea8daea66 100644 (file)
@@ -74,6 +74,22 @@ static int move_iovec_hdr(struct iovec *from, struct iovec *to,
        }
        return seg;
 }
+/* Copy iovec entries for len bytes from iovec. */
+static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
+                          size_t len, int iovcount)
+{
+       int seg = 0;
+       size_t size;
+       while (len && seg < iovcount) {
+               size = min(from->iov_len, len);
+               to->iov_base = from->iov_base;
+               to->iov_len = size;
+               len -= size;
+               ++from;
+               ++to;
+               ++seg;
+       }
+}
 
 /* Caller must have TX VQ lock */
 static void tx_poll_stop(struct vhost_net *net)
@@ -129,7 +145,7 @@ static void handle_tx(struct vhost_net *net)
 
        if (wmem < sock->sk->sk_sndbuf / 2)
                tx_poll_stop(net);
-       hdr_size = vq->hdr_size;
+       hdr_size = vq->vhost_hlen;
 
        for (;;) {
                head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
@@ -172,7 +188,7 @@ static void handle_tx(struct vhost_net *net)
                /* TODO: Check specific error and bomb out unless ENOBUFS? */
                err = sock->ops->sendmsg(NULL, sock, &msg, len);
                if (unlikely(err < 0)) {
-                       vhost_discard_vq_desc(vq);
+                       vhost_discard_vq_desc(vq, 1);
                        tx_poll_start(net, sock);
                        break;
                }
@@ -191,9 +207,82 @@ static void handle_tx(struct vhost_net *net)
        unuse_mm(net->dev.mm);
 }
 
+static int peek_head_len(struct sock *sk)
+{
+       struct sk_buff *head;
+       int len = 0;
+
+       lock_sock(sk);
+       head = skb_peek(&sk->sk_receive_queue);
+       if (head)
+               len = head->len;
+       release_sock(sk);
+       return len;
+}
+
+/* This is a multi-buffer version of vhost_get_desc, that works if
+ *     vq has read descriptors only.
+ * @vq         - the relevant virtqueue
+ * @datalen    - data length we'll be reading
+ * @iovcount   - returned count of io vectors we fill
+ * @log                - vhost log
+ * @log_num    - log offset
+ *     returns number of buffer heads allocated, negative on error
+ */
+static int get_rx_bufs(struct vhost_virtqueue *vq,
+                      struct vring_used_elem *heads,
+                      int datalen,
+                      unsigned *iovcount,
+                      struct vhost_log *log,
+                      unsigned *log_num)
+{
+       unsigned int out, in;
+       int seg = 0;
+       int headcount = 0;
+       unsigned d;
+       int r, nlogs = 0;
+
+       while (datalen > 0) {
+               if (unlikely(headcount >= VHOST_NET_MAX_SG)) {
+                       r = -ENOBUFS;
+                       goto err;
+               }
+               d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+                                     ARRAY_SIZE(vq->iov) - seg, &out,
+                                     &in, log, log_num);
+               if (d == vq->num) {
+                       r = 0;
+                       goto err;
+               }
+               if (unlikely(out || in <= 0)) {
+                       vq_err(vq, "unexpected descriptor format for RX: "
+                               "out %d, in %d\n", out, in);
+                       r = -EINVAL;
+                       goto err;
+               }
+               if (unlikely(log)) {
+                       nlogs += *log_num;
+                       log += *log_num;
+               }
+               heads[headcount].id = d;
+               heads[headcount].len = iov_length(vq->iov + seg, in);
+               datalen -= heads[headcount].len;
+               ++headcount;
+               seg += in;
+       }
+       heads[headcount - 1].len += datalen;
+       *iovcount = seg;
+       if (unlikely(log))
+               *log_num = nlogs;
+       return headcount;
+err:
+       vhost_discard_vq_desc(vq, headcount);
+       return r;
+}
+
 /* Expects to be always run from workqueue - which acts as
  * read-size critical section for our kind of RCU. */
-static void handle_rx(struct vhost_net *net)
+static void handle_rx_big(struct vhost_net *net)
 {
        struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
        unsigned out, in, log, s;
@@ -223,7 +312,7 @@ static void handle_rx(struct vhost_net *net)
        use_mm(net->dev.mm);
        mutex_lock(&vq->mutex);
        vhost_disable_notify(vq);
-       hdr_size = vq->hdr_size;
+       hdr_size = vq->vhost_hlen;
 
        vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
                vq->log : NULL;
@@ -270,14 +359,14 @@ static void handle_rx(struct vhost_net *net)
                                         len, MSG_DONTWAIT | MSG_TRUNC);
                /* TODO: Check specific error and bomb out unless EAGAIN? */
                if (err < 0) {
-                       vhost_discard_vq_desc(vq);
+                       vhost_discard_vq_desc(vq, 1);
                        break;
                }
                /* TODO: Should check and handle checksum. */
                if (err > len) {
                        pr_debug("Discarded truncated rx packet: "
                                 " len %d > %zd\n", err, len);
-                       vhost_discard_vq_desc(vq);
+                       vhost_discard_vq_desc(vq, 1);
                        continue;
                }
                len = err;
@@ -302,54 +391,175 @@ static void handle_rx(struct vhost_net *net)
        unuse_mm(net->dev.mm);
 }
 
-static void handle_tx_kick(struct work_struct *work)
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_rx_mergeable(struct vhost_net *net)
 {
-       struct vhost_virtqueue *vq;
-       struct vhost_net *net;
-       vq = container_of(work, struct vhost_virtqueue, poll.work);
-       net = container_of(vq->dev, struct vhost_net, dev);
+       struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
+       unsigned uninitialized_var(in), log;
+       struct vhost_log *vq_log;
+       struct msghdr msg = {
+               .msg_name = NULL,
+               .msg_namelen = 0,
+               .msg_control = NULL, /* FIXME: get and handle RX aux data. */
+               .msg_controllen = 0,
+               .msg_iov = vq->iov,
+               .msg_flags = MSG_DONTWAIT,
+       };
+
+       struct virtio_net_hdr_mrg_rxbuf hdr = {
+               .hdr.flags = 0,
+               .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
+       };
+
+       size_t total_len = 0;
+       int err, headcount;
+       size_t vhost_hlen, sock_hlen;
+       size_t vhost_len, sock_len;
+       struct socket *sock = rcu_dereference(vq->private_data);
+       if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
+               return;
+
+       use_mm(net->dev.mm);
+       mutex_lock(&vq->mutex);
+       vhost_disable_notify(vq);
+       vhost_hlen = vq->vhost_hlen;
+       sock_hlen = vq->sock_hlen;
+
+       vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
+               vq->log : NULL;
+
+       while ((sock_len = peek_head_len(sock->sk))) {
+               sock_len += sock_hlen;
+               vhost_len = sock_len + vhost_hlen;
+               headcount = get_rx_bufs(vq, vq->heads, vhost_len,
+                                       &in, vq_log, &log);
+               /* On error, stop handling until the next kick. */
+               if (unlikely(headcount < 0))
+                       break;
+               /* OK, now we need to know about added descriptors. */
+               if (!headcount) {
+                       if (unlikely(vhost_enable_notify(vq))) {
+                               /* They have slipped one in as we were
+                                * doing that: check again. */
+                               vhost_disable_notify(vq);
+                               continue;
+                       }
+                       /* Nothing new?  Wait for eventfd to tell us
+                        * they refilled. */
+                       break;
+               }
+               /* We don't need to be notified again. */
+               if (unlikely((vhost_hlen)))
+                       /* Skip header. TODO: support TSO. */
+                       move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in);
+               else
+                       /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
+                        * needed because sendmsg can modify msg_iov. */
+                       copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in);
+               msg.msg_iovlen = in;
+               err = sock->ops->recvmsg(NULL, sock, &msg,
+                                        sock_len, MSG_DONTWAIT | MSG_TRUNC);
+               /* Userspace might have consumed the packet meanwhile:
+                * it's not supposed to do this usually, but might be hard
+                * to prevent. Discard data we got (if any) and keep going. */
+               if (unlikely(err != sock_len)) {
+                       pr_debug("Discarded rx packet: "
+                                " len %d, expected %zd\n", err, sock_len);
+                       vhost_discard_vq_desc(vq, headcount);
+                       continue;
+               }
+               if (unlikely(vhost_hlen) &&
+                   memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0,
+                                     vhost_hlen)) {
+                       vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
+                              vq->iov->iov_base);
+                       break;
+               }
+               /* TODO: Should check and handle checksum. */
+               if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF) &&
+                   memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount,
+                                     offsetof(typeof(hdr), num_buffers),
+                                     sizeof hdr.num_buffers)) {
+                       vq_err(vq, "Failed num_buffers write");
+                       vhost_discard_vq_desc(vq, headcount);
+                       break;
+               }
+               vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+                                           headcount);
+               if (unlikely(vq_log))
+                       vhost_log_write(vq, vq_log, log, vhost_len);
+               total_len += vhost_len;
+               if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
+                       vhost_poll_queue(&vq->poll);
+                       break;
+               }
+       }
+
+       mutex_unlock(&vq->mutex);
+       unuse_mm(net->dev.mm);
+}
+
+static void handle_rx(struct vhost_net *net)
+{
+       if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF))
+               handle_rx_mergeable(net);
+       else
+               handle_rx_big(net);
+}
+
+static void handle_tx_kick(struct vhost_work *work)
+{
+       struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+                                                 poll.work);
+       struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
+
        handle_tx(net);
 }
 
-static void handle_rx_kick(struct work_struct *work)
+static void handle_rx_kick(struct vhost_work *work)
 {
-       struct vhost_virtqueue *vq;
-       struct vhost_net *net;
-       vq = container_of(work, struct vhost_virtqueue, poll.work);
-       net = container_of(vq->dev, struct vhost_net, dev);
+       struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+                                                 poll.work);
+       struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
+
        handle_rx(net);
 }
 
-static void handle_tx_net(struct work_struct *work)
+static void handle_tx_net(struct vhost_work *work)
 {
-       struct vhost_net *net;
-       net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_TX].work);
+       struct vhost_net *net = container_of(work, struct vhost_net,
+                                            poll[VHOST_NET_VQ_TX].work);
        handle_tx(net);
 }
 
-static void handle_rx_net(struct work_struct *work)
+static void handle_rx_net(struct vhost_work *work)
 {
-       struct vhost_net *net;
-       net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_RX].work);
+       struct vhost_net *net = container_of(work, struct vhost_net,
+                                            poll[VHOST_NET_VQ_RX].work);
        handle_rx(net);
 }
 
 static int vhost_net_open(struct inode *inode, struct file *f)
 {
        struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
+       struct vhost_dev *dev;
        int r;
+
        if (!n)
                return -ENOMEM;
+
+       dev = &n->dev;
        n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
        n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
-       r = vhost_dev_init(&n->dev, n->vqs, VHOST_NET_VQ_MAX);
+       r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX);
        if (r < 0) {
                kfree(n);
                return r;
        }
 
-       vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT);
-       vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN);
+       vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
+       vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
        n->tx_poll_state = VHOST_NET_POLL_DISABLED;
 
        f->private_data = n;
@@ -533,7 +743,6 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
                 vhost_net_enable_vq(n, vq);
        }
 
-done:
        mutex_unlock(&vq->mutex);
 
        if (oldsock) {
@@ -574,9 +783,21 @@ done:
 
 static int vhost_net_set_features(struct vhost_net *n, u64 features)
 {
-       size_t hdr_size = features & (1 << VHOST_NET_F_VIRTIO_NET_HDR) ?
-               sizeof(struct virtio_net_hdr) : 0;
+       size_t vhost_hlen, sock_hlen, hdr_len;
        int i;
+
+       hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
+                       sizeof(struct virtio_net_hdr_mrg_rxbuf) :
+                       sizeof(struct virtio_net_hdr);
+       if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
+               /* vhost provides vnet_hdr */
+               vhost_hlen = hdr_len;
+               sock_hlen = 0;
+       } else {
+               /* socket provides vnet_hdr */
+               vhost_hlen = 0;
+               sock_hlen = hdr_len;
+       }
        mutex_lock(&n->dev.mutex);
        if ((features & (1 << VHOST_F_LOG_ALL)) &&
            !vhost_log_access_ok(&n->dev)) {
@@ -587,7 +808,8 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
        smp_wmb();
        for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
                mutex_lock(&n->vqs[i].mutex);
-               n->vqs[i].hdr_size = hdr_size;
+               n->vqs[i].vhost_hlen = vhost_hlen;
+               n->vqs[i].sock_hlen = sock_hlen;
                mutex_unlock(&n->vqs[i].mutex);
        }
        vhost_net_flush(n);
@@ -657,25 +879,13 @@ static struct miscdevice vhost_net_misc = {
 
 static int vhost_net_init(void)
 {
-       int r = vhost_init();
-       if (r)
-               goto err_init;
-       r = misc_register(&vhost_net_misc);
-       if (r)
-               goto err_reg;
-       return 0;
-err_reg:
-       vhost_cleanup();
-err_init:
-       return r;
-
+       return misc_register(&vhost_net_misc);
 }
 module_init(vhost_net_init);
 
 static void vhost_net_exit(void)
 {
        misc_deregister(&vhost_net_misc);
-       vhost_cleanup();
 }
 module_exit(vhost_net_exit);
 
index 248ed2db07110116419542f736812432a59ddab8..e05557d529992ec4deb1827a32574043bcd00925 100644 (file)
 #include <linux/mm.h>
 #include <linux/miscdevice.h>
 #include <linux/mutex.h>
-#include <linux/workqueue.h>
 #include <linux/rcupdate.h>
 #include <linux/poll.h>
 #include <linux/file.h>
 #include <linux/highmem.h>
 #include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/cgroup.h>
 
 #include <linux/net.h>
 #include <linux/if_packet.h>
@@ -37,8 +38,6 @@ enum {
        VHOST_MEMORY_F_LOG = 0x1,
 };
 
-static struct workqueue_struct *vhost_workqueue;
-
 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
                            poll_table *pt)
 {
@@ -52,23 +51,31 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
 static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
                             void *key)
 {
-       struct vhost_poll *poll;
-       poll = container_of(wait, struct vhost_poll, wait);
+       struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
+
        if (!((unsigned long)key & poll->mask))
                return 0;
 
-       queue_work(vhost_workqueue, &poll->work);
+       vhost_poll_queue(poll);
        return 0;
 }
 
 /* Init poll structure */
-void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
-                    unsigned long mask)
+void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
+                    unsigned long mask, struct vhost_dev *dev)
 {
-       INIT_WORK(&poll->work, func);
+       struct vhost_work *work = &poll->work;
+
        init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
        init_poll_funcptr(&poll->table, vhost_poll_func);
        poll->mask = mask;
+       poll->dev = dev;
+
+       INIT_LIST_HEAD(&work->node);
+       work->fn = fn;
+       init_waitqueue_head(&work->done);
+       work->flushing = 0;
+       work->queue_seq = work->done_seq = 0;
 }
 
 /* Start polling a file. We add ourselves to file's wait queue. The caller must
@@ -92,12 +99,40 @@ void vhost_poll_stop(struct vhost_poll *poll)
  * locks that are also used by the callback. */
 void vhost_poll_flush(struct vhost_poll *poll)
 {
-       flush_work(&poll->work);
+       struct vhost_work *work = &poll->work;
+       unsigned seq;
+       int left;
+       int flushing;
+
+       spin_lock_irq(&poll->dev->work_lock);
+       seq = work->queue_seq;
+       work->flushing++;
+       spin_unlock_irq(&poll->dev->work_lock);
+       wait_event(work->done, ({
+                  spin_lock_irq(&poll->dev->work_lock);
+                  left = seq - work->done_seq <= 0;
+                  spin_unlock_irq(&poll->dev->work_lock);
+                  left;
+       }));
+       spin_lock_irq(&poll->dev->work_lock);
+       flushing = --work->flushing;
+       spin_unlock_irq(&poll->dev->work_lock);
+       BUG_ON(flushing < 0);
 }
 
 void vhost_poll_queue(struct vhost_poll *poll)
 {
-       queue_work(vhost_workqueue, &poll->work);
+       struct vhost_dev *dev = poll->dev;
+       struct vhost_work *work = &poll->work;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->work_lock, flags);
+       if (list_empty(&work->node)) {
+               list_add_tail(&work->node, &dev->work_list);
+               work->queue_seq++;
+               wake_up_process(dev->worker);
+       }
+       spin_unlock_irqrestore(&dev->work_lock, flags);
 }
 
 static void vhost_vq_reset(struct vhost_dev *dev,
@@ -114,7 +149,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
        vq->used_flags = 0;
        vq->log_used = false;
        vq->log_addr = -1ull;
-       vq->hdr_size = 0;
+       vq->vhost_hlen = 0;
+       vq->sock_hlen = 0;
        vq->private_data = NULL;
        vq->log_base = NULL;
        vq->error_ctx = NULL;
@@ -125,10 +161,51 @@ static void vhost_vq_reset(struct vhost_dev *dev,
        vq->log_ctx = NULL;
 }
 
+static int vhost_worker(void *data)
+{
+       struct vhost_dev *dev = data;
+       struct vhost_work *work = NULL;
+       unsigned uninitialized_var(seq);
+
+       for (;;) {
+               /* mb paired w/ kthread_stop */
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               spin_lock_irq(&dev->work_lock);
+               if (work) {
+                       work->done_seq = seq;
+                       if (work->flushing)
+                               wake_up_all(&work->done);
+               }
+
+               if (kthread_should_stop()) {
+                       spin_unlock_irq(&dev->work_lock);
+                       __set_current_state(TASK_RUNNING);
+                       return 0;
+               }
+               if (!list_empty(&dev->work_list)) {
+                       work = list_first_entry(&dev->work_list,
+                                               struct vhost_work, node);
+                       list_del_init(&work->node);
+                       seq = work->queue_seq;
+               } else
+                       work = NULL;
+               spin_unlock_irq(&dev->work_lock);
+
+               if (work) {
+                       __set_current_state(TASK_RUNNING);
+                       work->fn(work);
+               } else
+                       schedule();
+
+       }
+}
+
 long vhost_dev_init(struct vhost_dev *dev,
                    struct vhost_virtqueue *vqs, int nvqs)
 {
        int i;
+
        dev->vqs = vqs;
        dev->nvqs = nvqs;
        mutex_init(&dev->mutex);
@@ -136,6 +213,9 @@ long vhost_dev_init(struct vhost_dev *dev,
        dev->log_file = NULL;
        dev->memory = NULL;
        dev->mm = NULL;
+       spin_lock_init(&dev->work_lock);
+       INIT_LIST_HEAD(&dev->work_list);
+       dev->worker = NULL;
 
        for (i = 0; i < dev->nvqs; ++i) {
                dev->vqs[i].dev = dev;
@@ -143,9 +223,9 @@ long vhost_dev_init(struct vhost_dev *dev,
                vhost_vq_reset(dev, dev->vqs + i);
                if (dev->vqs[i].handle_kick)
                        vhost_poll_init(&dev->vqs[i].poll,
-                                       dev->vqs[i].handle_kick,
-                                       POLLIN);
+                                       dev->vqs[i].handle_kick, POLLIN, dev);
        }
+
        return 0;
 }
 
@@ -159,12 +239,36 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
 /* Caller should have device mutex */
 static long vhost_dev_set_owner(struct vhost_dev *dev)
 {
+       struct task_struct *worker;
+       int err;
        /* Is there an owner already? */
-       if (dev->mm)
-               return -EBUSY;
+       if (dev->mm) {
+               err = -EBUSY;
+               goto err_mm;
+       }
        /* No owner, become one */
        dev->mm = get_task_mm(current);
+       worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
+       if (IS_ERR(worker)) {
+               err = PTR_ERR(worker);
+               goto err_worker;
+       }
+
+       dev->worker = worker;
+       err = cgroup_attach_task_current_cg(worker);
+       if (err)
+               goto err_cgroup;
+       wake_up_process(worker);        /* avoid contributing to loadavg */
+
        return 0;
+err_cgroup:
+       kthread_stop(worker);
+err_worker:
+       if (dev->mm)
+               mmput(dev->mm);
+       dev->mm = NULL;
+err_mm:
+       return err;
 }
 
 /* Caller should have device mutex */
@@ -217,6 +321,9 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
        if (dev->mm)
                mmput(dev->mm);
        dev->mm = NULL;
+
+       WARN_ON(!list_empty(&dev->work_list));
+       kthread_stop(dev->worker);
 }
 
 static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
@@ -995,9 +1102,9 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
 }
 
 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
-void vhost_discard_vq_desc(struct vhost_virtqueue *vq)
+void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
 {
-       vq->last_avail_idx--;
+       vq->last_avail_idx -= n;
 }
 
 /* After we've used one of their buffers, we tell them about it.  We'll then
@@ -1042,6 +1149,67 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
        return 0;
 }
 
+static int __vhost_add_used_n(struct vhost_virtqueue *vq,
+                           struct vring_used_elem *heads,
+                           unsigned count)
+{
+       struct vring_used_elem __user *used;
+       int start;
+
+       start = vq->last_used_idx % vq->num;
+       used = vq->used->ring + start;
+       if (copy_to_user(used, heads, count * sizeof *used)) {
+               vq_err(vq, "Failed to write used");
+               return -EFAULT;
+       }
+       if (unlikely(vq->log_used)) {
+               /* Make sure data is seen before log. */
+               smp_wmb();
+               /* Log used ring entry write. */
+               log_write(vq->log_base,
+                         vq->log_addr +
+                          ((void __user *)used - (void __user *)vq->used),
+                         count * sizeof *used);
+       }
+       vq->last_used_idx += count;
+       return 0;
+}
+
+/* After we've used one of their buffers, we tell them about it.  We'll then
+ * want to notify the guest, using eventfd. */
+int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
+                    unsigned count)
+{
+       int start, n, r;
+
+       start = vq->last_used_idx % vq->num;
+       n = vq->num - start;
+       if (n < count) {
+               r = __vhost_add_used_n(vq, heads, n);
+               if (r < 0)
+                       return r;
+               heads += n;
+               count -= n;
+       }
+       r = __vhost_add_used_n(vq, heads, count);
+
+       /* Make sure buffer is written before we update index. */
+       smp_wmb();
+       if (put_user(vq->last_used_idx, &vq->used->idx)) {
+               vq_err(vq, "Failed to increment used idx");
+               return -EFAULT;
+       }
+       if (unlikely(vq->log_used)) {
+               /* Log used index update. */
+               log_write(vq->log_base,
+                         vq->log_addr + offsetof(struct vring_used, idx),
+                         sizeof vq->used->idx);
+               if (vq->log_ctx)
+                       eventfd_signal(vq->log_ctx, 1);
+       }
+       return r;
+}
+
 /* This actually signals the guest, using eventfd. */
 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 {
@@ -1076,6 +1244,15 @@ void vhost_add_used_and_signal(struct vhost_dev *dev,
        vhost_signal(dev, vq);
 }
 
+/* multi-buffer version of vhost_add_used_and_signal */
+void vhost_add_used_and_signal_n(struct vhost_dev *dev,
+                                struct vhost_virtqueue *vq,
+                                struct vring_used_elem *heads, unsigned count)
+{
+       vhost_add_used_n(vq, heads, count);
+       vhost_signal(dev, vq);
+}
+
 /* OK, now we need to know about added descriptors. */
 bool vhost_enable_notify(struct vhost_virtqueue *vq)
 {
@@ -1100,7 +1277,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
                return false;
        }
 
-       return avail_idx != vq->last_avail_idx;
+       return avail_idx != vq->avail_idx;
 }
 
 /* We don't need to be notified again. */
@@ -1115,16 +1292,3 @@ void vhost_disable_notify(struct vhost_virtqueue *vq)
                vq_err(vq, "Failed to enable notification at %p: %d\n",
                       &vq->used->flags, r);
 }
-
-int vhost_init(void)
-{
-       vhost_workqueue = create_singlethread_workqueue("vhost");
-       if (!vhost_workqueue)
-               return -ENOMEM;
-       return 0;
-}
-
-void vhost_cleanup(void)
-{
-       destroy_workqueue(vhost_workqueue);
-}
index 11ee13dba0f7444a20313a1e15a66c3c8d029dfd..afd77295971ce3044117d0d6e5ea8f4e20f655fe 100644 (file)
@@ -5,13 +5,13 @@
 #include <linux/vhost.h>
 #include <linux/mm.h>
 #include <linux/mutex.h>
-#include <linux/workqueue.h>
 #include <linux/poll.h>
 #include <linux/file.h>
 #include <linux/skbuff.h>
 #include <linux/uio.h>
 #include <linux/virtio_config.h>
 #include <linux/virtio_ring.h>
+#include <asm/atomic.h>
 
 struct vhost_device;
 
@@ -20,19 +20,31 @@ enum {
        VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2,
 };
 
+struct vhost_work;
+typedef void (*vhost_work_fn_t)(struct vhost_work *work);
+
+struct vhost_work {
+       struct list_head          node;
+       vhost_work_fn_t           fn;
+       wait_queue_head_t         done;
+       int                       flushing;
+       unsigned                  queue_seq;
+       unsigned                  done_seq;
+};
+
 /* Poll a file (eventfd or socket) */
 /* Note: there's nothing vhost specific about this structure. */
 struct vhost_poll {
        poll_table                table;
        wait_queue_head_t        *wqh;
        wait_queue_t              wait;
-       /* struct which will handle all actual work. */
-       struct work_struct        work;
+       struct vhost_work         work;
        unsigned long             mask;
+       struct vhost_dev         *dev;
 };
 
-void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
-                    unsigned long mask);
+void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
+                    unsigned long mask, struct vhost_dev *dev);
 void vhost_poll_start(struct vhost_poll *poll, struct file *file);
 void vhost_poll_stop(struct vhost_poll *poll);
 void vhost_poll_flush(struct vhost_poll *poll);
@@ -63,7 +75,7 @@ struct vhost_virtqueue {
        struct vhost_poll poll;
 
        /* The routine to call when the Guest pings us, or timeout. */
-       work_func_t handle_kick;
+       vhost_work_fn_t handle_kick;
 
        /* Last available index we saw. */
        u16 last_avail_idx;
@@ -84,13 +96,15 @@ struct vhost_virtqueue {
        struct iovec indirect[VHOST_NET_MAX_SG];
        struct iovec iov[VHOST_NET_MAX_SG];
        struct iovec hdr[VHOST_NET_MAX_SG];
-       size_t hdr_size;
+       size_t vhost_hlen;
+       size_t sock_hlen;
+       struct vring_used_elem heads[VHOST_NET_MAX_SG];
        /* We use a kind of RCU to access private pointer.
-        * All readers access it from workqueue, which makes it possible to
-        * flush the workqueue instead of synchronize_rcu. Therefore readers do
+        * All readers access it from worker, which makes it possible to
+        * flush the vhost_work instead of synchronize_rcu. Therefore readers do
         * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
-        * work item execution acts instead of rcu_read_lock() and the end of
-        * work item execution acts instead of rcu_read_lock().
+        * vhost_work execution acts instead of rcu_read_lock() and the end of
+        * vhost_work execution acts instead of rcu_read_lock().
         * Writers use virtqueue mutex. */
        void *private_data;
        /* Log write descriptors */
@@ -110,6 +124,9 @@ struct vhost_dev {
        int nvqs;
        struct file *log_file;
        struct eventfd_ctx *log_ctx;
+       spinlock_t work_lock;
+       struct list_head work_list;
+       struct task_struct *worker;
 };
 
 long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
@@ -124,21 +141,22 @@ int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
                      struct iovec iov[], unsigned int iov_count,
                      unsigned int *out_num, unsigned int *in_num,
                      struct vhost_log *log, unsigned int *log_num);
-void vhost_discard_vq_desc(struct vhost_virtqueue *);
+void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
 
 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
-void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
+int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
+                    unsigned count);
 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
-                              unsigned int head, int len);
+                              unsigned int id, int len);
+void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
+                              struct vring_used_elem *heads, unsigned count);
+void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
 void vhost_disable_notify(struct vhost_virtqueue *);
 bool vhost_enable_notify(struct vhost_virtqueue *);
 
 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
                    unsigned int log_num, u64 len);
 
-int vhost_init(void);
-void vhost_cleanup(void);
-
 #define vq_err(vq, fmt, ...) do {                                  \
                pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
                if ((vq)->error_ctx)                               \
@@ -149,7 +167,8 @@ enum {
        VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) |
                         (1 << VIRTIO_RING_F_INDIRECT_DESC) |
                         (1 << VHOST_F_LOG_ALL) |
-                        (1 << VHOST_NET_F_VIRTIO_NET_HDR),
+                        (1 << VHOST_NET_F_VIRTIO_NET_HDR) |
+                        (1 << VIRTIO_NET_F_MRG_RXBUF),
 };
 
 static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
index 2fc8e14cc24a8b6ce36a39d4d929539d38a5710e..9aa9bcadf869ac5948e712532de794a52f52f529 100644 (file)
@@ -276,6 +276,7 @@ ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm_para.h \
                  $(srctree)/include/asm-$(SRCARCH)/kvm_para.h),)
 unifdef-y += kvm_para.h
 endif
+unifdef-y += l2tp.h
 unifdef-y += llc.h
 unifdef-y += loop.h
 unifdef-y += lp.h
diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h
new file mode 100644 (file)
index 0000000..72b713a
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2010 Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * This file is released under the GPLv2
+ *
+ */
+
+#ifndef __CAN_PLATFORM_FLEXCAN_H
+#define __CAN_PLATFORM_FLEXCAN_H
+
+/**
+ * struct flexcan_platform_data - flex CAN controller platform data
+ * @transceiver_enable:         - called to power on/off the transceiver
+ *
+ */
+struct flexcan_platform_data {
+       void (*transceiver_switch)(int enable);
+};
+
+#endif /* __CAN_PLATFORM_FLEXCAN_H */
index 0c621604baa1d7ff8185029d4eaade8ccad82d64..e0aa067d1b11689b89989d3392ab0d0e6da7f4f6 100644 (file)
@@ -570,6 +570,7 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
 int cgroup_scan_tasks(struct cgroup_scanner *scan);
 int cgroup_attach_task(struct cgroup *, struct task_struct *);
+int cgroup_attach_task_current_cg(struct task_struct *);
 
 /*
  * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
@@ -626,6 +627,12 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
        return -EINVAL;
 }
 
+/* No cgroups - nothing to do */
+static inline int cgroup_attach_task_current_cg(struct task_struct *t)
+{
+       return 0;
+}
+
 #endif /* !CONFIG_CGROUPS */
 
 #endif /* _LINUX_CGROUP_H */
index 3d7a6687d247c32577cf6bad2f7cb483fd434c51..848480bc2bf93846b14e43c0c94493cb2109f889 100644 (file)
@@ -126,6 +126,20 @@ static inline void random_ether_addr(u8 *addr)
        addr [0] |= 0x02;       /* set local assignment bit (IEEE802) */
 }
 
+/**
+ * dev_hw_addr_random - Create random MAC and set device flag
+ * @dev: pointer to net_device structure
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Generate random MAC to be used by a device and set addr_assign_type
+ * so the state can be read by sysfs and be used by udev.
+ */
+static inline void dev_hw_addr_random(struct net_device *dev, u8 *hwaddr)
+{
+       dev->addr_assign_type |= NET_ADDR_RANDOM;
+       random_ether_addr(hwaddr);
+}
+
 /**
  * compare_ether_addr - Compare two Ethernet addresses
  * @addr1: Pointer to a six-byte array containing the Ethernet address
index e24ce6ea1fa31440bd8b34758770125a043cb24f..35280b302290459ff20293b2e4bcafd5aa6b9b2a 100644 (file)
@@ -72,6 +72,8 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
        }
 }
 
+extern void macvlan_common_setup(struct net_device *dev);
+
 extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
                                  struct nlattr *tb[], struct nlattr *data[],
                                  int (*receive)(struct sk_buff *skb),
index da0341b8ca0a3cf19e5b1ba6a9ea109cb9adbae9..14ba4452296e0bcdb3e896cb4145db3225a9455d 100644 (file)
  * struct ks8842_platform_data - Platform data of the KS8842 network driver
  * @macaddr:   The MAC address of the device, set to all 0:s to use the on in
  *             the chip.
+ * @rx_dma_channel:    The DMA channel to use for RX, -1 for none.
+ * @tx_dma_channel:    The DMA channel to use for TX, -1 for none.
  *
  */
 struct ks8842_platform_data {
        u8 macaddr[ETH_ALEN];
+       int rx_dma_channel;
+       int tx_dma_channel;
 };
 
 #endif
index b6262898ece06ffaf9de1ed0b80e6fae15c364f8..1bca6171b1aa2d03691af0de3c89216a1977d9a6 100644 (file)
@@ -66,6 +66,11 @@ struct wireless_dev;
 #define HAVE_FREE_NETDEV               /* free_netdev() */
 #define HAVE_NETDEV_PRIV               /* netdev_priv() */
 
+/* hardware address assignment types */
+#define NET_ADDR_PERM          0       /* address is permanent (default) */
+#define NET_ADDR_RANDOM                1       /* address is generated randomly */
+#define NET_ADDR_STOLEN                2       /* address is stolen from other device */
+
 /* Backlog congestion levels */
 #define NET_RX_SUCCESS         0       /* keep 'em coming, baby */
 #define NET_RX_DROP            1       /* packet dropped */
@@ -919,6 +924,7 @@ struct net_device {
 
        /* Interface address info. */
        unsigned char           perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
+       unsigned char           addr_assign_type; /* hw address assignment type */
        unsigned char           addr_len;       /* hardware address length      */
        unsigned short          dev_id;         /* for shared network cards */
 
index bb103f43afa00463a1e5c99644144f0f7f0bc243..edeeabdc1500bb51c4487a4dc8b8f132ed2cf974 100644 (file)
@@ -3,6 +3,7 @@ header-y += nf_conntrack_tuple_common.h
 header-y += nfnetlink_conntrack.h
 header-y += nfnetlink_log.h
 header-y += nfnetlink_queue.h
+header-y += xt_CHECKSUM.h
 header-y += xt_CLASSIFY.h
 header-y += xt_CONNMARK.h
 header-y += xt_CONNSECMARK.h
@@ -19,17 +20,19 @@ header-y += xt_TCPMSS.h
 header-y += xt_TCPOPTSTRIP.h
 header-y += xt_TEE.h
 header-y += xt_TPROXY.h
+header-y += xt_cluster.h
 header-y += xt_comment.h
 header-y += xt_connbytes.h
 header-y += xt_connlimit.h
 header-y += xt_connmark.h
 header-y += xt_conntrack.h
-header-y += xt_cluster.h
+header-y += xt_cpu.h
 header-y += xt_dccp.h
 header-y += xt_dscp.h
 header-y += xt_esp.h
 header-y += xt_hashlimit.h
 header-y += xt_iprange.h
+header-y += xt_ipvs.h
 header-y += xt_helper.h
 header-y += xt_length.h
 header-y += xt_limit.h
index 1d0b84aa1d4294624213b771b5f87f7d5962cb1d..ea9b8d3805272cdf0378374ef59b3ffaeea67876 100644 (file)
@@ -89,7 +89,7 @@ enum nfulnl_attr_config {
 #define NFULNL_COPY_NONE       0x00
 #define NFULNL_COPY_META       0x01
 #define NFULNL_COPY_PACKET     0x02
-#define NFULNL_COPY_DISABLED   0x03
+/* 0xff is reserved, don't use it for new copy modes. */
 
 #define NFULNL_CFG_F_SEQ       0x0001
 #define NFULNL_CFG_F_SEQ_GLOBAL        0x0002
diff --git a/include/linux/netfilter/xt_CHECKSUM.h b/include/linux/netfilter/xt_CHECKSUM.h
new file mode 100644 (file)
index 0000000..9a2e466
--- /dev/null
@@ -0,0 +1,20 @@
+/* Header file for iptables ipt_CHECKSUM target
+ *
+ * (C) 2002 by Harald Welte <laforge@gnumonks.org>
+ * (C) 2010 Red Hat Inc
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This software is distributed under GNU GPL v2, 1991
+*/
+#ifndef _XT_CHECKSUM_TARGET_H
+#define _XT_CHECKSUM_TARGET_H
+
+#include <linux/types.h>
+
+#define XT_CHECKSUM_OP_FILL    0x01    /* fill in checksum in IP header */
+
+struct xt_CHECKSUM_info {
+       __u8 operation; /* bitset of operations */
+};
+
+#endif /* _XT_CHECKSUM_TARGET_H */
diff --git a/include/linux/netfilter/xt_cpu.h b/include/linux/netfilter/xt_cpu.h
new file mode 100644 (file)
index 0000000..93c7f11
--- /dev/null
@@ -0,0 +1,11 @@
+#ifndef _XT_CPU_H
+#define _XT_CPU_H
+
+#include <linux/types.h>
+
+struct xt_cpu_info {
+       __u32   cpu;
+       __u32   invert;
+};
+
+#endif /*_XT_CPU_H*/
diff --git a/include/linux/netfilter/xt_ipvs.h b/include/linux/netfilter/xt_ipvs.h
new file mode 100644 (file)
index 0000000..1167aeb
--- /dev/null
@@ -0,0 +1,27 @@
+#ifndef _XT_IPVS_H
+#define _XT_IPVS_H
+
+enum {
+       XT_IPVS_IPVS_PROPERTY = 1 << 0, /* all other options imply this one */
+       XT_IPVS_PROTO =         1 << 1,
+       XT_IPVS_VADDR =         1 << 2,
+       XT_IPVS_VPORT =         1 << 3,
+       XT_IPVS_DIR =           1 << 4,
+       XT_IPVS_METHOD =        1 << 5,
+       XT_IPVS_VPORTCTL =      1 << 6,
+       XT_IPVS_MASK =          (1 << 7) - 1,
+       XT_IPVS_ONCE_MASK =     XT_IPVS_MASK & ~XT_IPVS_IPVS_PROPERTY
+};
+
+struct xt_ipvs_mtinfo {
+       union nf_inet_addr      vaddr, vmask;
+       __be16                  vport;
+       __u8                    l4proto;
+       __u8                    fwd_method;
+       __be16                  vportctl;
+
+       __u8                    invert;
+       __u8                    bitmask;
+};
+
+#endif /* _XT_IPVS_H */
index 8dc89dfc1361761578ef2c73fd6dcdce74327f0f..b0d28c659ab75c7a87aa9e4cb956c516bf5aeaf1 100644 (file)
@@ -11,9 +11,9 @@ struct xt_quota_priv;
 struct xt_quota_info {
        u_int32_t               flags;
        u_int32_t               pad;
+       aligned_u64             quota;
 
        /* Used internally by the kernel */
-       aligned_u64             quota;
        struct xt_quota_priv    *master;
 };
 
index ae66851870becb2e496d7994f7a11a548aac16d8..384c2a25db1f0fb849b006ecbd50d585f5b90201 100644 (file)
 #define PCI_DEVICE_ID_SBE_WANXL100     0x0301
 #define PCI_DEVICE_ID_SBE_WANXL200     0x0302
 #define PCI_DEVICE_ID_SBE_WANXL400     0x0104
+#define PCI_SUBDEVICE_ID_SBE_T3E3      0x0009
+#define PCI_SUBDEVICE_ID_SBE_2T3E3_P0  0x0901
+#define PCI_SUBDEVICE_ID_SBE_2T3E3_P1  0x0902
 
 #define PCI_VENDOR_ID_TOSHIBA          0x1179
 #define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1        0x0101
 #define PCI_DEVICE_ID_NX2_57711E       0x1650
 #define PCI_DEVICE_ID_TIGON3_5705      0x1653
 #define PCI_DEVICE_ID_TIGON3_5705_2    0x1654
-#define PCI_DEVICE_ID_TIGON3_5720      0x1658
 #define PCI_DEVICE_ID_TIGON3_5721      0x1659
 #define PCI_DEVICE_ID_TIGON3_5722      0x165a
 #define PCI_DEVICE_ID_TIGON3_5723      0x165b
 #define PCI_DEVICE_ID_TIGON3_5754M     0x1672
 #define PCI_DEVICE_ID_TIGON3_5755M     0x1673
 #define PCI_DEVICE_ID_TIGON3_5756      0x1674
-#define PCI_DEVICE_ID_TIGON3_5750      0x1676
 #define PCI_DEVICE_ID_TIGON3_5751      0x1677
 #define PCI_DEVICE_ID_TIGON3_5715      0x1678
 #define PCI_DEVICE_ID_TIGON3_5715S     0x1679
 #define PCI_DEVICE_ID_TIGON3_5754      0x167a
 #define PCI_DEVICE_ID_TIGON3_5755      0x167b
-#define PCI_DEVICE_ID_TIGON3_5750M     0x167c
 #define PCI_DEVICE_ID_TIGON3_5751M     0x167d
 #define PCI_DEVICE_ID_TIGON3_5751F     0x167e
 #define PCI_DEVICE_ID_TIGON3_5787F     0x167f
index fbc8cb0d48c336d4e71f458ad3f8b8d842a1c918..58d44491880fa79ec30d7c863f609547c4ed7439 100644 (file)
@@ -282,6 +282,7 @@ enum rtattr_type_t {
        RTA_SESSION, /* no longer used */
        RTA_MP_ALGO, /* no longer used */
        RTA_TABLE,
+       RTA_MARK,
        __RTA_MAX
 };
 
index f5aa87e1e0c8c8a8ef4ec34f50129f51a013de4a..d20d9e7a9bbda130e4499813c10c8c3a82c1bebc 100644 (file)
@@ -169,6 +169,7 @@ struct skb_shared_hwtstamps {
  * @software:          generate software time stamp
  * @in_progress:       device driver is going to provide
  *                     hardware time stamp
+ * @prevent_sk_orphan: make sk reference available on driver level
  * @flags:             all shared_tx flags
  *
  * These flags are attached to packets as part of the
@@ -178,7 +179,8 @@ union skb_shared_tx {
        struct {
                __u8    hardware:1,
                        software:1,
-                       in_progress:1;
+                       in_progress:1,
+                       prevent_sk_orphan:1;
        };
        __u8 flags;
 };
@@ -202,10 +204,11 @@ struct skb_shared_info {
         */
        atomic_t        dataref;
 
-       skb_frag_t      frags[MAX_SKB_FRAGS];
        /* Intermediate layers must ensure that destructor_arg
         * remains valid until skb destructor */
        void *          destructor_arg;
+       /* must be last field, see pskb_expand_head() */
+       skb_frag_t      frags[MAX_SKB_FRAGS];
 };
 
 /* We divide dataref into two halves.  The higher 16 bits hold references
index fe82b1e10a296b4892d5ccb9c4ad7ba94b8f82a9..a4747a0f7303ab73b1dffa7cd9a66740c0ef0ffe 100644 (file)
@@ -632,10 +632,22 @@ extern struct ip_vs_conn *ip_vs_ct_in_get
 (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
  const union nf_inet_addr *d_addr, __be16 d_port);
 
+struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
+                                           struct ip_vs_protocol *pp,
+                                           const struct ip_vs_iphdr *iph,
+                                           unsigned int proto_off,
+                                           int inverse);
+
 extern struct ip_vs_conn *ip_vs_conn_out_get
 (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
  const union nf_inet_addr *d_addr, __be16 d_port);
 
+struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
+                                            struct ip_vs_protocol *pp,
+                                            const struct ip_vs_iphdr *iph,
+                                            unsigned int proto_off,
+                                            int inverse);
+
 /* put back the conn without restarting its timer */
 static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
 {
@@ -736,8 +748,6 @@ extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
 
 extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
 extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
-extern int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
-                            char *o_buf, int o_len, char *n_buf, int n_len);
 extern int ip_vs_app_init(void);
 extern void ip_vs_app_cleanup(void);
 
index 7e582061b230dd9dce935059d959572cbba7cc9a..3bed61d379a8aa1de70fe25935810c830cd70ac2 100644 (file)
@@ -53,10 +53,6 @@ typedef __u32 magic_t;
 #ifndef IRDA_ALIGN
 #  define IRDA_ALIGN __attribute__((aligned))
 #endif
-#ifndef IRDA_PACK
-#  define IRDA_PACK __attribute__((packed))
-#endif
-
 
 #ifdef CONFIG_IRDA_DEBUG
 
index 641f88e848bd3d57ff446517afb1d596652ebf22..6b1dc4f8eca58a231ff481ee6fd5be8ac1a8f15c 100644 (file)
@@ -85,7 +85,7 @@ struct discovery_t;
 struct disc_frame {
        __u8 caddr;          /* Connection address */
        __u8 control;
-} IRDA_PACK;
+} __packed;
 
 struct xid_frame {
        __u8  caddr; /* Connection address */
@@ -96,41 +96,41 @@ struct xid_frame {
        __u8  flags; /* Discovery flags */
        __u8  slotnr;
        __u8  version;
-} IRDA_PACK;
+} __packed;
 
 struct test_frame {
        __u8 caddr;          /* Connection address */
        __u8 control;
        __le32 saddr;         /* Source device address */
        __le32 daddr;         /* Destination device address */
-} IRDA_PACK;
+} __packed;
 
 struct ua_frame {
        __u8 caddr;
        __u8 control;
        __le32 saddr; /* Source device address */
        __le32 daddr; /* Dest device address */
-} IRDA_PACK;
+} __packed;
 
 struct dm_frame {
        __u8 caddr;          /* Connection address */
        __u8 control;
-} IRDA_PACK;
+} __packed;
 
 struct rd_frame {
        __u8 caddr;          /* Connection address */
        __u8 control;
-} IRDA_PACK;
+} __packed;
 
 struct rr_frame {
        __u8 caddr;          /* Connection address */
        __u8 control;
-} IRDA_PACK;
+} __packed;
 
 struct i_frame {
        __u8 caddr;
        __u8 control;
-} IRDA_PACK;
+} __packed;
 
 struct snrm_frame {
        __u8  caddr;
@@ -138,7 +138,7 @@ struct snrm_frame {
        __le32 saddr;
        __le32 daddr;
        __u8  ncaddr;
-} IRDA_PACK;
+} __packed;
 
 void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
 void irlap_send_discovery_xid_frame(struct irlap_cb *, int S, __u8 s, 
index f85fc8a140dc174f2b50f6d8f1382ad636571a20..b0787a1dea904c69c3ca5f29c0674b54053c41bb 100644 (file)
@@ -419,7 +419,7 @@ struct ieee80211_tx_rate {
        s8 idx;
        u8 count;
        u8 flags;
-} __attribute__((packed));
+} __packed;
 
 /**
  * struct ieee80211_tx_info - skb transmit information
index 32d15bd6efa3c573030e899bb979d98dcc811ee9..0772d296dfdb12b26a4bd8d97fecdd914c088536 100644 (file)
@@ -28,9 +28,14 @@ struct nf_ct_ext {
        char data[0];
 };
 
-static inline int nf_ct_ext_exist(const struct nf_conn *ct, u8 id)
+static inline bool __nf_ct_ext_exist(const struct nf_ct_ext *ext, u8 id)
 {
-       return (ct->ext && ct->ext->offset[id]);
+       return !!ext->offset[id];
+}
+
+static inline bool nf_ct_ext_exist(const struct nf_conn *ct, u8 id)
+{
+       return (ct->ext && __nf_ct_ext_exist(ct->ext, id));
 }
 
 static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
index c398017ccfa3d364e1d43b5b541405f0a1f575e4..df17bac46bf5ef81cc4a1f3114e99eb54a913758 100644 (file)
@@ -27,9 +27,9 @@ struct nf_nat_protocol {
 
        /* Alter the per-proto part of the tuple (depending on
           maniptype), to give a unique tuple in the given range if
-          possible; return false if not.  Per-protocol part of tuple
-          is initialized to the incoming packet. */
-       bool (*unique_tuple)(struct nf_conntrack_tuple *tuple,
+          possible.  Per-protocol part of tuple is initialized to the
+          incoming packet. */
+       void (*unique_tuple)(struct nf_conntrack_tuple *tuple,
                             const struct nf_nat_range *range,
                             enum nf_nat_manip_type maniptype,
                             const struct nf_conn *ct);
@@ -63,7 +63,7 @@ extern bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
                                  const union nf_conntrack_man_proto *min,
                                  const union nf_conntrack_man_proto *max);
 
-extern bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
+extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
                                      const struct nf_nat_range *range,
                                      enum nf_nat_manip_type maniptype,
                                      const struct nf_conn *ct,
index b0569ff0775ed649fd07fa460bfcbd43cc91f746..e2dec42c2db2b5c07afc98165b140b76aa599f56 100644 (file)
@@ -10,5 +10,7 @@ nfulnl_log_packet(u_int8_t pf,
                  const struct nf_loginfo *li_user,
                  const char *prefix);
 
+#define NFULNL_COPY_DISABLED    0xff
+
 #endif /* _KER_NFNETLINK_LOG_H */
 
index ceac661cdfd5fdddb29314389f1e3e4bbca320ba..cfe2943690ff298cd3e938ecef2ddb8d49dfc229 100644 (file)
@@ -9,6 +9,7 @@ struct tcf_mirred {
        int                     tcfm_ifindex;
        int                     tcfm_ok_push;
        struct net_device       *tcfm_dev;
+       struct list_head        tcfm_list;
 };
 #define to_mirred(pc) \
        container_of(pc, struct tcf_mirred, common)
index 422cb19f156ef9b7a6f5ab8968ade8a618ea5d88..37642ad9cca8e351e0deff696436701f2b62c6c0 100644 (file)
@@ -1788,6 +1788,29 @@ out:
        return retval;
 }
 
+/**
+ * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
+ * @tsk: the task to be attached
+ */
+int cgroup_attach_task_current_cg(struct task_struct *tsk)
+{
+       struct cgroupfs_root *root;
+       struct cgroup *cur_cg;
+       int retval = 0;
+
+       cgroup_lock();
+       for_each_active_root(root) {
+               cur_cg = task_cgroup_from_root(current, root);
+               retval = cgroup_attach_task(cur_cg, tsk);
+               if (retval)
+                       break;
+       }
+       cgroup_unlock();
+
+       return retval;
+}
+EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg);
+
 /*
  * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
  * held. May take task_lock of task
index b3250944cde9257ec7e31c1f736bea6f72c350fe..e24fa0873f32efff6fb48ffa06354b14f7e8ba63 100644 (file)
@@ -32,7 +32,7 @@ config WANT_COMPAT_NETLINK_MESSAGES
 config COMPAT_NETLINK_MESSAGES
        def_bool y
        depends on COMPAT
-       depends on WIRELESS_EXT || WANT_COMPAT_NETLINK_MESSAGES
+       depends on WEXT_CORE || WANT_COMPAT_NETLINK_MESSAGES
        help
          This option makes it possible to send different netlink messages
          to tasks depending on whether the task is a compat task or not. To
index 075c435ad22d85810fbeecbae807309cca3582bb..cf09fe591fc20cc73a2fdd20d13860f88c33134d 100644 (file)
@@ -22,7 +22,7 @@
 #include <asm/uaccess.h>
 #include "br_private.h"
 
-/* net device transmit always called with no BH (preempt_disabled) */
+/* net device transmit always called with BH disabled */
 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
@@ -48,13 +48,16 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_reset_mac_header(skb);
        skb_pull(skb, ETH_HLEN);
 
+       rcu_read_lock();
        if (is_multicast_ether_addr(dest)) {
                if (unlikely(netpoll_tx_running(dev))) {
                        br_flood_deliver(br, skb);
                        goto out;
                }
-               if (br_multicast_rcv(br, NULL, skb))
+               if (br_multicast_rcv(br, NULL, skb)) {
+                       kfree_skb(skb);
                        goto out;
+               }
 
                mdst = br_mdb_get(br, skb);
                if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
@@ -67,6 +70,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
                br_flood_deliver(br, skb);
 
 out:
+       rcu_read_unlock();
        return NETDEV_TX_OK;
 }
 
index a744296fc675a9b79d65f07eef2a113c834a8607..90512ccfd3e973c19adade047de7eda77ffd2963 100644 (file)
@@ -214,7 +214,7 @@ void br_fdb_delete_by_port(struct net_bridge *br,
        spin_unlock_bh(&br->hash_lock);
 }
 
-/* No locking or refcounting, assumes caller has no preempt (rcu_read_lock) */
+/* No locking or refcounting, assumes caller has rcu_read_lock */
 struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
                                          const unsigned char *addr)
 {
index 5fc1c5b1c36054dcd4b44c521691429c1bc6b167..826cd5221536cadd3b0d13dfec2e9d3639b92a93 100644 (file)
@@ -39,7 +39,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
                       netif_receive_skb);
 }
 
-/* note: already called with rcu_read_lock (preempt_disabled) */
+/* note: already called with rcu_read_lock */
 int br_handle_frame_finish(struct sk_buff *skb)
 {
        const unsigned char *dest = eth_hdr(skb)->h_dest;
@@ -110,7 +110,7 @@ drop:
        goto out;
 }
 
-/* note: already called with rcu_read_lock (preempt_disabled) */
+/* note: already called with rcu_read_lock */
 static int br_handle_local_finish(struct sk_buff *skb)
 {
        struct net_bridge_port *p = br_port_get_rcu(skb->dev);
@@ -133,8 +133,7 @@ static inline int is_link_local(const unsigned char *dest)
 
 /*
  * Return NULL if skb is handled
- * note: already called with rcu_read_lock (preempt_disabled) from
- * netif_receive_skb
+ * note: already called with rcu_read_lock
  */
 struct sk_buff *br_handle_frame(struct sk_buff *skb)
 {
index 85afcdab4921c1b220da6391a581e7bf2199b5ca..eb5b256ffc8801ff7e187c64ee3dde5da268f570 100644 (file)
@@ -1728,13 +1728,9 @@ unlock:
 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
 {
        struct net_bridge_port *port;
-       int err = -ENOENT;
+       int err = 0;
 
        spin_lock(&br->multicast_lock);
-       if (!netif_running(br->dev))
-               goto unlock;
-
-       err = 0;
        if (br->multicast_disabled == !val)
                goto unlock;
 
@@ -1742,6 +1738,9 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
        if (br->multicast_disabled)
                goto unlock;
 
+       if (!netif_running(br->dev))
+               goto unlock;
+
        if (br->mdb) {
                if (br->mdb->old) {
                        err = -EEXIST;
index 70aecb48fb69b80e608e65af295645f984b3acd4..35cf27087b561d6e9955fd75b4b03213a6e9e8d8 100644 (file)
@@ -131,7 +131,7 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
 /*
  * Called from llc.
  *
- * NO locks, but rcu_read_lock (preempt_disabled)
+ * NO locks, but rcu_read_lock
  */
 void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
                struct net_device *dev)
index 4b04d25b6a3f2a1c9d93e75d65dc035ae38fc1c1..eb1602022ac0643af4e4ad655c3de4ae42a200f4 100644 (file)
@@ -193,7 +193,7 @@ out:
 
 static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
 {
-       caif_assert(!cfpkt_getlen(pkt) < rfml->fragment_size);
+       caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size);
 
        /* Add info for MUX-layer to route the packet out. */
        cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
index ccfe633eec8e0df4701cd9ff4ad8b57975cb8f88..a10e3338f084aaf15eb73b40d46f1f28e96aaaf7 100644 (file)
@@ -650,6 +650,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
        err = sock_tx_timestamp(msg, sk, skb_tx(skb));
        if (err < 0)
                goto free_skb;
+
+       /* to be able to check the received tx sock reference in raw_rcv() */
+       skb_tx(skb)->prevent_sk_orphan = 1;
+
        skb->dev = dev;
        skb->sk  = sk;
 
index 6e1b4370781cc433570a467822c6d7da66863aa2..e1c1cdcc2bb0429d65fea408625e9dec0c317a59 100644 (file)
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/stat.h>
-#include <linux/if_bridge.h>
-#include <linux/if_macvlan.h>
 #include <net/dst.h>
 #include <net/pkt_sched.h>
 #include <net/checksum.h>
@@ -1484,6 +1482,7 @@ static inline void net_timestamp_check(struct sk_buff *skb)
 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
 {
        skb_orphan(skb);
+       nf_reset(skb);
 
        if (!(dev->flags & IFF_UP) ||
            (skb->len > (dev->mtu + dev->hard_header_len))) {
@@ -1592,9 +1591,7 @@ EXPORT_SYMBOL(__netif_schedule);
 
 void dev_kfree_skb_irq(struct sk_buff *skb)
 {
-       if (!skb->destructor)
-               dev_kfree_skb(skb);
-       else if (atomic_dec_and_test(&skb->users)) {
+       if (atomic_dec_and_test(&skb->users)) {
                struct softnet_data *sd;
                unsigned long flags;
 
@@ -2645,10 +2642,10 @@ static int ing_filter(struct sk_buff *skb)
        int result = TC_ACT_OK;
        struct Qdisc *q;
 
-       if (MAX_RED_LOOP < ttl++) {
-               printk(KERN_WARNING
-                      "Redir loop detected Dropping packet (%d->%d)\n",
-                      skb->skb_iif, dev->ifindex);
+       if (unlikely(MAX_RED_LOOP < ttl++)) {
+               if (net_ratelimit())
+                       pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
+                              skb->skb_iif, dev->ifindex);
                return TC_ACT_SHOT;
        }
 
index 646ef3bc72005ad2d692c88af576e312b8425cb1..36e603c78ce9fd3a5f989716d59bff20a2699293 100644 (file)
@@ -347,9 +347,9 @@ static struct notifier_block dropmon_net_notifier = {
 
 static int __init init_net_drop_monitor(void)
 {
-       int cpu;
-       int rc, i, ret;
        struct per_cpu_dm_data *data;
+       int cpu, rc;
+
        printk(KERN_INFO "Initalizing network drop monitor service\n");
 
        if (sizeof(void *) > 8) {
@@ -357,21 +357,12 @@ static int __init init_net_drop_monitor(void)
                return -ENOSPC;
        }
 
-       if (genl_register_family(&net_drop_monitor_family) < 0) {
+       rc = genl_register_family_with_ops(&net_drop_monitor_family,
+                                          dropmon_ops,
+                                          ARRAY_SIZE(dropmon_ops));
+       if (rc) {
                printk(KERN_ERR "Could not create drop monitor netlink family\n");
-               return -EFAULT;
-       }
-
-       rc = -EFAULT;
-
-       for (i = 0; i < ARRAY_SIZE(dropmon_ops); i++) {
-               ret = genl_register_ops(&net_drop_monitor_family,
-                                       &dropmon_ops[i]);
-               if (ret) {
-                       printk(KERN_CRIT "Failed to register operation %d\n",
-                               dropmon_ops[i].cmd);
-                       goto out_unreg;
-               }
+               return rc;
        }
 
        rc = register_netdevice_notifier(&dropmon_net_notifier);
index d2b596537d416f2b0f3fd3daaae5e9f9cc931be4..af4dfbadf2a09ea496653990ae8e1da823a059e1 100644 (file)
@@ -95,6 +95,7 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
 }
 
 NETDEVICE_SHOW(dev_id, fmt_hex);
+NETDEVICE_SHOW(addr_assign_type, fmt_dec);
 NETDEVICE_SHOW(addr_len, fmt_dec);
 NETDEVICE_SHOW(iflink, fmt_dec);
 NETDEVICE_SHOW(ifindex, fmt_dec);
@@ -295,6 +296,7 @@ static ssize_t show_ifalias(struct device *dev,
 }
 
 static struct device_attribute net_class_attributes[] = {
+       __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
        __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
        __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
        __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
index c2b7a8bed8f64e4474a75a563cc930c1f9eb326d..537e01afd81baf1e9bc7269e0c97ca3fb3844ffe 100644 (file)
@@ -49,6 +49,7 @@ static atomic_t trapped;
                (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
                                sizeof(struct iphdr) + sizeof(struct ethhdr))
 
+static void zap_completion_queue(void);
 static void arp_reply(struct sk_buff *skb);
 
 static unsigned int carrier_timeout = 4;
@@ -196,6 +197,7 @@ void netpoll_poll_dev(struct net_device *dev)
 
        service_arp_queue(dev->npinfo);
 
+       zap_completion_queue();
 }
 EXPORT_SYMBOL(netpoll_poll_dev);
 
@@ -221,11 +223,40 @@ static void refill_skbs(void)
        spin_unlock_irqrestore(&skb_pool.lock, flags);
 }
 
+static void zap_completion_queue(void)
+{
+       unsigned long flags;
+       struct softnet_data *sd = &get_cpu_var(softnet_data);
+
+       if (sd->completion_queue) {
+               struct sk_buff *clist;
+
+               local_irq_save(flags);
+               clist = sd->completion_queue;
+               sd->completion_queue = NULL;
+               local_irq_restore(flags);
+
+               while (clist != NULL) {
+                       struct sk_buff *skb = clist;
+                       clist = clist->next;
+                       if (skb->destructor) {
+                               atomic_inc(&skb->users);
+                               dev_kfree_skb_any(skb); /* put this one back */
+                       } else {
+                               __kfree_skb(skb);
+                       }
+               }
+       }
+
+       put_cpu_var(softnet_data);
+}
+
 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
 {
        int count = 0;
        struct sk_buff *skb;
 
+       zap_completion_queue();
        refill_skbs();
 repeat:
 
index 24a19debda1b3254370690e61d8fc73939b0297b..10a1ea72010d329295ce3936228aa5bffe7dc745 100644 (file)
@@ -1434,18 +1434,12 @@ static ssize_t pktgen_if_write(struct file *file,
                i += len;
 
                for (*m = 0; *v && m < pkt_dev->dst_mac + 6; v++) {
-                       if (*v >= '0' && *v <= '9') {
-                               *m *= 16;
-                               *m += *v - '0';
-                       }
-                       if (*v >= 'A' && *v <= 'F') {
-                               *m *= 16;
-                               *m += *v - 'A' + 10;
-                       }
-                       if (*v >= 'a' && *v <= 'f') {
-                               *m *= 16;
-                               *m += *v - 'a' + 10;
-                       }
+                       int value;
+
+                       value = hex_to_bin(*v);
+                       if (value >= 0)
+                               *m = *m * 16 + value;
+
                        if (*v == ':') {
                                m++;
                                *m = 0;
@@ -1476,18 +1470,12 @@ static ssize_t pktgen_if_write(struct file *file,
                i += len;
 
                for (*m = 0; *v && m < pkt_dev->src_mac + 6; v++) {
-                       if (*v >= '0' && *v <= '9') {
-                               *m *= 16;
-                               *m += *v - '0';
-                       }
-                       if (*v >= 'A' && *v <= 'F') {
-                               *m *= 16;
-                               *m += *v - 'A' + 10;
-                       }
-                       if (*v >= 'a' && *v <= 'f') {
-                               *m *= 16;
-                               *m += *v - 'a' + 10;
-                       }
+                       int value;
+
+                       value = hex_to_bin(*v);
+                       if (value >= 0)
+                               *m = *m * 16 + value;
+
                        if (*v == ':') {
                                m++;
                                *m = 0;
index 76d33ca5f037a5c886052e9e7aff206ee266c654..3a2513f0d0c3036bf6c3f688f7e94b3e1d8f197c 100644 (file)
@@ -817,7 +817,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
        memcpy(data + nhead, skb->head, skb->tail - skb->head);
 #endif
        memcpy(data + size, skb_end_pointer(skb),
-              sizeof(struct skb_shared_info));
+              offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
                get_page(skb_shinfo(skb)->frags[i].page);
@@ -843,7 +843,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
        skb->network_header   += off;
        if (skb_mac_header_was_set(skb))
                skb->mac_header += off;
-       skb->csum_start       += nhead;
+       /* Only adjust this if it actually is csum_start rather than csum */
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               skb->csum_start += nhead;
        skb->cloned   = 0;
        skb->hdr_len  = 0;
        skb->nohdr    = 0;
@@ -930,7 +932,8 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
        copy_skb_header(n, skb);
 
        off                  = newheadroom - oldheadroom;
-       n->csum_start       += off;
+       if (n->ip_summed == CHECKSUM_PARTIAL)
+               n->csum_start += off;
 #ifdef NET_SKBUFF_DATA_USES_OFFSET
        n->transport_header += off;
        n->network_header   += off;
index 6652bd9da676bbb83573de7f1156ead2658337ee..04b69896df5fc743021efd4d4a1705b7de146333 100644 (file)
@@ -446,7 +446,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
        int ptr;
        struct net_device *dev;
        struct sk_buff *skb2;
-       unsigned int mtu, hlen, left, len, ll_rs, pad;
+       unsigned int mtu, hlen, left, len, ll_rs;
        int offset;
        __be16 not_last_frag;
        struct rtable *rt = skb_rtable(skb);
@@ -585,9 +585,7 @@ slow_path:
        /* for bridged IP traffic encapsulated inside f.e. a vlan header,
         * we need to make room for the encapsulating header
         */
-       pad = nf_bridge_pad(skb);
-       ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, pad);
-       mtu -= pad;
+       ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
 
        /*
         *      Fragment the datagram.
index 16c0ba0a272840c499d6e45b482133d1634d22b3..6bccba31d13208d03f042002f5808c3396c1f37f 100644 (file)
@@ -283,16 +283,13 @@ unsigned int arpt_do_table(struct sk_buff *skb,
        arp = arp_hdr(skb);
        do {
                const struct arpt_entry_target *t;
-               int hdr_len;
 
                if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
                        e = arpt_next_entry(e);
                        continue;
                }
 
-               hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) +
-                       (2 * skb->dev->addr_len);
-               ADD_COUNTER(e->counters, hdr_len, 1);
+               ADD_COUNTER(e->counters, arp_hdr_len(skb->dev), 1);
 
                t = arpt_get_target_c(e);
 
@@ -713,7 +710,7 @@ static void get_counters(const struct xt_table_info *t,
        struct arpt_entry *iter;
        unsigned int cpu;
        unsigned int i;
-       unsigned int curcpu;
+       unsigned int curcpu = get_cpu();
 
        /* Instead of clearing (by a previous call to memset())
         * the counters and using adds, we set the counters
@@ -723,14 +720,16 @@ static void get_counters(const struct xt_table_info *t,
         * if new softirq were to run and call ipt_do_table
         */
        local_bh_disable();
-       curcpu = smp_processor_id();
-
        i = 0;
        xt_entry_foreach(iter, t->entries[curcpu], t->size) {
                SET_COUNTER(counters[i], iter->counters.bcnt,
                            iter->counters.pcnt);
                ++i;
        }
+       local_bh_enable();
+       /* Processing counters from other cpus, we can let bottom half enabled,
+        * (preemption is disabled)
+        */
 
        for_each_possible_cpu(cpu) {
                if (cpu == curcpu)
@@ -744,7 +743,7 @@ static void get_counters(const struct xt_table_info *t,
                }
                xt_info_wrunlock(cpu);
        }
-       local_bh_enable();
+       put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
index b38c11810c65938513282a9e8a0699317dad231a..c439721b165a6369acd1bd2ea1d64d4b0580b1bb 100644 (file)
@@ -364,7 +364,7 @@ ipt_do_table(struct sk_buff *skb,
                                goto no_match;
                }
 
-               ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
+               ADD_COUNTER(e->counters, skb->len, 1);
 
                t = ipt_get_target(e);
                IP_NF_ASSERT(t->u.kernel.target);
@@ -884,7 +884,7 @@ get_counters(const struct xt_table_info *t,
        struct ipt_entry *iter;
        unsigned int cpu;
        unsigned int i;
-       unsigned int curcpu;
+       unsigned int curcpu = get_cpu();
 
        /* Instead of clearing (by a previous call to memset())
         * the counters and using adds, we set the counters
@@ -894,14 +894,16 @@ get_counters(const struct xt_table_info *t,
         * if new softirq were to run and call ipt_do_table
         */
        local_bh_disable();
-       curcpu = smp_processor_id();
-
        i = 0;
        xt_entry_foreach(iter, t->entries[curcpu], t->size) {
                SET_COUNTER(counters[i], iter->counters.bcnt,
                            iter->counters.pcnt);
                ++i;
        }
+       local_bh_enable();
+       /* Processing counters from other cpus, we can let bottom half enabled,
+        * (preemption is disabled)
+        */
 
        for_each_possible_cpu(cpu) {
                if (cpu == curcpu)
@@ -915,7 +917,7 @@ get_counters(const struct xt_table_info *t,
                }
                xt_info_wrunlock(cpu);
        }
-       local_bh_enable();
+       put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
index 64d0875f519245762748055f40be0cc291be7e3c..3a43cf36db8701f9b8a99317a60da0e3ac2ad8f4 100644 (file)
@@ -469,7 +469,7 @@ struct arp_payload {
        __be32 src_ip;
        u_int8_t dst_hw[ETH_ALEN];
        __be32 dst_ip;
-} __attribute__ ((packed));
+} __packed;
 
 #ifdef DEBUG
 static void arp_print(struct arp_payload *payload)
index bbbd2736c549ca2652e7b64b856667eef60d3616..b254dafaf4294548b7d36d9b54ce29832f8658e3 100644 (file)
@@ -95,10 +95,11 @@ static void send_reset(struct sk_buff *oldskb, int hook)
        }
 
        tcph->rst       = 1;
-       tcph->check     = tcp_v4_check(sizeof(struct tcphdr),
-                                      niph->saddr, niph->daddr,
-                                      csum_partial(tcph,
-                                                   sizeof(struct tcphdr), 0));
+       tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
+                                   niph->daddr, 0);
+       nskb->ip_summed = CHECKSUM_PARTIAL;
+       nskb->csum_start = (unsigned char *)tcph - nskb->head;
+       nskb->csum_offset = offsetof(struct tcphdr, check);
 
        addr_type = RTN_UNSPEC;
        if (hook != NF_INET_FORWARD
@@ -115,7 +116,6 @@ static void send_reset(struct sk_buff *oldskb, int hook)
                goto free_nskb;
 
        niph->ttl       = dst_metric(skb_dst(nskb), RTAX_HOPLIMIT);
-       nskb->ip_summed = CHECKSUM_NONE;
 
        /* "Never happens" */
        if (nskb->len > dst_mtu(skb_dst(nskb)))
index c7719b283ada4c0e9628dbfd2fbc8a29e8da3fdd..8c8632d9b93cead0cd115945a9566d1e57829667 100644 (file)
@@ -261,14 +261,9 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
        rcu_read_lock();
        proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
 
-       /* Change protocol info to have some randomization */
-       if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
-               proto->unique_tuple(tuple, range, maniptype, ct);
-               goto out;
-       }
-
        /* Only bother mapping if it's not already in range and unique */
-       if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
+       if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM) &&
+           (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
             proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
            !nf_nat_used_tuple(tuple, ct))
                goto out;
@@ -440,7 +435,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
        if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
                return 0;
 
-       inside = (void *)skb->data + ip_hdrlen(skb);
+       inside = (void *)skb->data + hdrlen;
 
        /* We're actually going to mangle it beyond trivial checksum
           adjustment, so make sure the current checksum is correct. */
@@ -470,12 +465,10 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
        /* rcu_read_lock()ed by nf_hook_slow */
        l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
 
-       if (!nf_ct_get_tuple(skb,
-                            ip_hdrlen(skb) + sizeof(struct icmphdr),
-                            (ip_hdrlen(skb) +
+       if (!nf_ct_get_tuple(skb, hdrlen + sizeof(struct icmphdr),
+                            (hdrlen +
                              sizeof(struct icmphdr) + inside->ip.ihl * 4),
-                            (u_int16_t)AF_INET,
-                            inside->ip.protocol,
+                            (u_int16_t)AF_INET, inside->ip.protocol,
                             &inner, l3proto, l4proto))
                return 0;
 
@@ -484,15 +477,13 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
           pass all hooks (locally-generated ICMP).  Consider incoming
           packet: PREROUTING (DST manip), routing produces ICMP, goes
           through POSTROUTING (which must correct the DST manip). */
-       if (!manip_pkt(inside->ip.protocol, skb,
-                      ip_hdrlen(skb) + sizeof(inside->icmp),
-                      &ct->tuplehash[!dir].tuple,
-                      !manip))
+       if (!manip_pkt(inside->ip.protocol, skb, hdrlen + sizeof(inside->icmp),
+                      &ct->tuplehash[!dir].tuple, !manip))
                return 0;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
                /* Reloading "inside" here since manip_pkt inner. */
-               inside = (void *)skb->data + ip_hdrlen(skb);
+               inside = (void *)skb->data + hdrlen;
                inside->icmp.checksum = 0;
                inside->icmp.checksum =
                        csum_fold(skb_checksum(skb, hdrlen,
index 6c4f11f514461a5a244ba1d70180f42ad82940b4..3e61faf23a9a0c8636433dd9a1805274dfce4691 100644 (file)
@@ -34,7 +34,7 @@ bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
 }
 EXPORT_SYMBOL_GPL(nf_nat_proto_in_range);
 
-bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
+void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
                               const struct nf_nat_range *range,
                               enum nf_nat_manip_type maniptype,
                               const struct nf_conn *ct,
@@ -53,7 +53,7 @@ bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
        if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
                /* If it's dst rewrite, can't change port */
                if (maniptype == IP_NAT_MANIP_DST)
-                       return false;
+                       return;
 
                if (ntohs(*portptr) < 1024) {
                        /* Loose convention: >> 512 is credential passing */
@@ -81,15 +81,15 @@ bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
        else
                off = *rover;
 
-       for (i = 0; i < range_size; i++, off++) {
+       for (i = 0; ; ++off) {
                *portptr = htons(min + off % range_size);
-               if (nf_nat_used_tuple(tuple, ct))
+               if (++i != range_size && nf_nat_used_tuple(tuple, ct))
                        continue;
                if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
                        *rover = off;
-               return true;
+               return;
        }
-       return false;
+       return;
 }
 EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple);
 
index 22485ce306d41f2d175786bdc9fc7f20160306e2..570faf2667b26e70f3f7ddc6e2f9c89cdbfdcfd9 100644 (file)
 
 static u_int16_t dccp_port_rover;
 
-static bool
+static void
 dccp_unique_tuple(struct nf_conntrack_tuple *tuple,
                  const struct nf_nat_range *range,
                  enum nf_nat_manip_type maniptype,
                  const struct nf_conn *ct)
 {
-       return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-                                        &dccp_port_rover);
+       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
+                                 &dccp_port_rover);
 }
 
 static bool
index d7e89201351e90d01bbbac1666e913f90e036dbf..bc8d83a31c73ae4abe371e74c7b0df3e6068c03a 100644 (file)
@@ -37,7 +37,7 @@ MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
 MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
 
 /* generate unique tuple ... */
-static bool
+static void
 gre_unique_tuple(struct nf_conntrack_tuple *tuple,
                 const struct nf_nat_range *range,
                 enum nf_nat_manip_type maniptype,
@@ -50,7 +50,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
        /* If there is no master conntrack we are not PPTP,
           do not change tuples */
        if (!ct->master)
-               return false;
+               return;
 
        if (maniptype == IP_NAT_MANIP_SRC)
                keyptr = &tuple->src.u.gre.key;
@@ -68,14 +68,14 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
 
        pr_debug("min = %u, range_size = %u\n", min, range_size);
 
-       for (i = 0; i < range_size; i++, key++) {
+       for (i = 0; ; ++key) {
                *keyptr = htons(min + key % range_size);
-               if (!nf_nat_used_tuple(tuple, ct))
-                       return true;
+               if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
+                       return;
        }
 
        pr_debug("%p: no NAT mapping\n", ct);
-       return false;
+       return;
 }
 
 /* manipulate a GRE packet according to maniptype */
index 19a8b0b07d8e7d80620446939e80aa3966c84ace..5744c3ec847cc26440204b7584fdd60d857f7964 100644 (file)
@@ -27,7 +27,7 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple,
               ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
 }
 
-static bool
+static void
 icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
                  const struct nf_nat_range *range,
                  enum nf_nat_manip_type maniptype,
@@ -42,13 +42,13 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
        if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED))
                range_size = 0xFFFF;
 
-       for (i = 0; i < range_size; i++, id++) {
+       for (i = 0; ; ++id) {
                tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
                                             (id % range_size));
-               if (!nf_nat_used_tuple(tuple, ct))
-                       return true;
+               if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
+                       return;
        }
-       return false;
+       return;
 }
 
 static bool
index 3fc598eeeb1a57cc8fc76f1c6ed7c1220ce21326..756331d42661cf12b2964256a8204b7a588f92f8 100644 (file)
 
 static u_int16_t nf_sctp_port_rover;
 
-static bool
+static void
 sctp_unique_tuple(struct nf_conntrack_tuple *tuple,
                  const struct nf_nat_range *range,
                  enum nf_nat_manip_type maniptype,
                  const struct nf_conn *ct)
 {
-       return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-                                        &nf_sctp_port_rover);
+       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
+                                 &nf_sctp_port_rover);
 }
 
 static bool
index 399e2cfa263b72eaa8a2c49b078983cd67399914..aa460a595d5d930e019f9bd36eb010251a5dd33e 100644 (file)
 
 static u_int16_t tcp_port_rover;
 
-static bool
+static void
 tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
                 const struct nf_nat_range *range,
                 enum nf_nat_manip_type maniptype,
                 const struct nf_conn *ct)
 {
-       return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-                                        &tcp_port_rover);
+       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &tcp_port_rover);
 }
 
 static bool
index 9e61c79492e4beb0d02a56c0f1bb4b42226630b7..dfe65c7e292586521ad9d1f804a3f82eade1a755 100644 (file)
 
 static u_int16_t udp_port_rover;
 
-static bool
+static void
 udp_unique_tuple(struct nf_conntrack_tuple *tuple,
                 const struct nf_nat_range *range,
                 enum nf_nat_manip_type maniptype,
                 const struct nf_conn *ct)
 {
-       return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-                                        &udp_port_rover);
+       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &udp_port_rover);
 }
 
 static bool
index 440a229bbd87df929816d53d02764d7879478ba1..3cc8c8af39ef2fe82905fdad69b0b64af0f4e770 100644 (file)
 
 static u_int16_t udplite_port_rover;
 
-static bool
+static void
 udplite_unique_tuple(struct nf_conntrack_tuple *tuple,
                     const struct nf_nat_range *range,
                     enum nf_nat_manip_type maniptype,
                     const struct nf_conn *ct)
 {
-       return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
-                                        &udplite_port_rover);
+       nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
+                                 &udplite_port_rover);
 }
 
 static bool
index 14381c62acea676e8553530b32d1f4220d90e6a1..a50f2bc1c7328805e755a204ca66253844dba702 100644 (file)
@@ -26,14 +26,14 @@ static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
        return true;
 }
 
-static bool unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
+static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
                                 const struct nf_nat_range *range,
                                 enum nf_nat_manip_type maniptype,
                                 const struct nf_conn *ct)
 {
        /* Sorry: we can't help you; if it's not unique, we can't frob
           anything. */
-       return false;
+       return;
 }
 
 static bool
index 562ce92de2a65370f70550e683f8a27e3742e348..3f56b6e6c6aab583d65902e7190bbf1a6eaa60b6 100644 (file)
@@ -2878,6 +2878,9 @@ static int rt_fill_info(struct net *net,
        if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
                goto nla_put_failure;
 
+       if (rt->fl.mark)
+               NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark);
+
        error = rt->dst.error;
        expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
        if (rt->peer) {
@@ -2933,6 +2936,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
        __be32 src = 0;
        u32 iif;
        int err;
+       int mark;
        struct sk_buff *skb;
 
        err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
@@ -2960,6 +2964,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
        src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
        dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
        iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
+       mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
 
        if (iif) {
                struct net_device *dev;
@@ -2972,6 +2977,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
 
                skb->protocol   = htons(ETH_P_IP);
                skb->dev        = dev;
+               skb->mark       = mark;
                local_bh_disable();
                err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
                local_bh_enable();
@@ -2989,6 +2995,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
                                },
                        },
                        .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
+                       .mark = mark,
                };
                err = ip_route_output_key(net, &rt, &fl);
        }
index 86b9f67abede2cbaa262865585d752fc9303013c..176e11aaea771795b21c0be6b1453b46c6349f0c 100644 (file)
@@ -2187,6 +2187,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                                      GFP_KERNEL);
                        if (cvp == NULL)
                                return -ENOMEM;
+
+                       kref_init(&cvp->kref);
                }
                lock_sock(sk);
                tp->rx_opt.cookie_in_always =
@@ -2201,12 +2203,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                                 */
                                kref_put(&tp->cookie_values->kref,
                                         tcp_cookie_values_release);
-                               kref_init(&cvp->kref);
-                               tp->cookie_values = cvp;
                        } else {
                                cvp = tp->cookie_values;
                        }
                }
+
                if (cvp != NULL) {
                        cvp->cookie_desired = ctd.tcpct_cookie_desired;
 
@@ -2220,6 +2221,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                                cvp->s_data_desired = ctd.tcpct_s_data_desired;
                                cvp->s_data_constant = 0; /* false */
                        }
+
+                       tp->cookie_values = cvp;
                }
                release_sock(sk);
                return err;
@@ -2601,6 +2604,12 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                        return -EFAULT;
                return 0;
        }
+       case TCP_THIN_LINEAR_TIMEOUTS:
+               val = tp->thin_lto;
+               break;
+       case TCP_THIN_DUPACK:
+               val = tp->thin_dupack;
+               break;
        default:
                return -ENOPROTOOPT;
        }
index e81155d2f251c94295c514660552469c25dff384..ab70a3fbcafafee9a080ea0f8c6eab2ce04fc37e 100644 (file)
@@ -1763,7 +1763,10 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
 
        idev = ipv6_find_idev(dev);
        if (!idev)
-               return NULL;
+               return ERR_PTR(-ENOBUFS);
+
+       if (idev->cnf.disable_ipv6)
+               return ERR_PTR(-EACCES);
 
        /* Add default multicast route */
        addrconf_add_mroute(dev);
@@ -2132,8 +2135,9 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
        if (!dev)
                return -ENODEV;
 
-       if ((idev = addrconf_add_dev(dev)) == NULL)
-               return -ENOBUFS;
+       idev = addrconf_add_dev(dev);
+       if (IS_ERR(idev))
+               return PTR_ERR(idev);
 
        scope = ipv6_addr_scope(pfx);
 
@@ -2380,7 +2384,7 @@ static void addrconf_dev_config(struct net_device *dev)
        }
 
        idev = addrconf_add_dev(dev);
-       if (idev == NULL)
+       if (IS_ERR(idev))
                return;
 
        memset(&addr, 0, sizeof(struct in6_addr));
@@ -2471,7 +2475,7 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
        ASSERT_RTNL();
 
        idev = addrconf_add_dev(dev);
-       if (!idev) {
+       if (IS_ERR(idev)) {
                printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
                return;
        }
index dc41d6d3c6c6a6fb6e74b57fe12575362123aedf..5359ef4daac5230e4c691c2100d8016dcb69e102 100644 (file)
@@ -387,9 +387,7 @@ ip6t_do_table(struct sk_buff *skb,
                                goto no_match;
                }
 
-               ADD_COUNTER(e->counters,
-                           ntohs(ipv6_hdr(skb)->payload_len) +
-                           sizeof(struct ipv6hdr), 1);
+               ADD_COUNTER(e->counters, skb->len, 1);
 
                t = ip6t_get_target_c(e);
                IP_NF_ASSERT(t->u.kernel.target);
@@ -899,7 +897,7 @@ get_counters(const struct xt_table_info *t,
        struct ip6t_entry *iter;
        unsigned int cpu;
        unsigned int i;
-       unsigned int curcpu;
+       unsigned int curcpu = get_cpu();
 
        /* Instead of clearing (by a previous call to memset())
         * the counters and using adds, we set the counters
@@ -909,14 +907,16 @@ get_counters(const struct xt_table_info *t,
         * if new softirq were to run and call ipt_do_table
         */
        local_bh_disable();
-       curcpu = smp_processor_id();
-
        i = 0;
        xt_entry_foreach(iter, t->entries[curcpu], t->size) {
                SET_COUNTER(counters[i], iter->counters.bcnt,
                            iter->counters.pcnt);
                ++i;
        }
+       local_bh_enable();
+       /* Processing counters from other cpus, we can let bottom half enabled,
+        * (preemption is disabled)
+        */
 
        for_each_possible_cpu(cpu) {
                if (cpu == curcpu)
@@ -930,7 +930,7 @@ get_counters(const struct xt_table_info *t,
                }
                xt_info_wrunlock(cpu);
        }
-       local_bh_enable();
+       put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
index 9254008602d4901d0d6f6b6813c439b1e7ef84db..098a050a20b09817095f8241dfa6395fd14b43d2 100644 (file)
@@ -269,6 +269,11 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
         * in the chain of fragments so far.  We must know where to put
         * this fragment, right?
         */
+       prev = fq->q.fragments_tail;
+       if (!prev || NFCT_FRAG6_CB(prev)->offset < offset) {
+               next = NULL;
+               goto found;
+       }
        prev = NULL;
        for (next = fq->q.fragments; next != NULL; next = next->next) {
                if (NFCT_FRAG6_CB(next)->offset >= offset)
@@ -276,6 +281,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
                prev = next;
        }
 
+found:
        /* We found where to put this one.  Check for overlap with
         * preceding fragment, and, if needed, align things so that
         * any overlaps are eliminated.
@@ -341,6 +347,8 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
 
        /* Insert this fragment in the chain of fragments. */
        skb->next = next;
+       if (!next)
+               fq->q.fragments_tail = skb;
        if (prev)
                prev->next = skb;
        else
@@ -464,6 +472,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
                                          head->csum);
 
        fq->q.fragments = NULL;
+       fq->q.fragments_tail = NULL;
 
        /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
        fp = skb_shinfo(head)->frag_list;
index dab6b8efe5fa56e5ea11252e7fa192e5bbe79d05..29ac8e1a509e48ec1170f61f668d8d3d5712d501 100644 (file)
@@ -627,7 +627,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
        skb->dev = sta->sdata->dev;
        skb->protocol = eth_type_trans(skb, sta->sdata->dev);
        memset(skb->cb, 0, sizeof(skb->cb));
-       netif_rx(skb);
+       netif_rx_ni(skb);
 }
 
 static void sta_apply_parameters(struct ieee80211_local *local,
index 7cc4f913a43112cfd789bf841163f174aae059ba..798a91b100cc277d154bde089eb6f4745f47b6b6 100644 (file)
@@ -685,10 +685,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
 
        return 0;
 
+#ifdef CONFIG_INET
  fail_ifa:
        pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
                               &local->network_latency_notifier);
        rtnl_lock();
+#endif
  fail_pm_qos:
        ieee80211_led_exit(local);
        ieee80211_remove_interfaces(local);
index 41f20fb7e67083fa8635ba50de021babe7778e8c..872d7b6ef6b34f6dabdfbd0c491d94306d2fdf04 100644 (file)
@@ -400,19 +400,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
        else
                __set_bit(SCAN_SW_SCANNING, &local->scanning);
 
-       /*
-        * Kicking off the scan need not be protected,
-        * only the scan variable stuff, since now
-        * local->scan_req is assigned and other callers
-        * will abort their scan attempts.
-        *
-        * This avoids too many locking dependencies
-        * so that the scan completed calls have more
-        * locking freedom.
-        */
-
        ieee80211_recalc_idle(local);
-       mutex_unlock(&local->scan_mtx);
 
        if (local->ops->hw_scan) {
                WARN_ON(!ieee80211_prep_hw_scan(local));
@@ -420,8 +408,6 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
        } else
                rc = ieee80211_start_sw_scan(local);
 
-       mutex_lock(&local->scan_mtx);
-
        if (rc) {
                kfree(local->hw_scan_req);
                local->hw_scan_req = NULL;
index aa2f106347e4889ad0e03337ed19e10b1dfff823..43288259f4a145d31374a2c0d629ba3b0d16db0e 100644 (file)
@@ -326,6 +326,22 @@ config NETFILTER_XT_CONNMARK
 
 comment "Xtables targets"
 
+config NETFILTER_XT_TARGET_CHECKSUM
+       tristate "CHECKSUM target support"
+       depends on IP_NF_MANGLE || IP6_NF_MANGLE
+       depends on NETFILTER_ADVANCED
+       ---help---
+         This option adds a `CHECKSUM' target, which can be used in the iptables mangle
+         table.
+
+         You can use this target to compute and fill in the checksum in
+         a packet that lacks a checksum.  This is particularly useful,
+         if you need to work around old applications such as dhcp clients,
+         that do not work well with checksum offloads, but don't want to disable
+         checksum offload in your device.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_TARGET_CLASSIFY
        tristate '"CLASSIFY" target support'
        depends on NETFILTER_ADVANCED
@@ -647,6 +663,15 @@ config NETFILTER_XT_MATCH_CONNTRACK
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_CPU
+       tristate '"cpu" match support'
+       depends on NETFILTER_ADVANCED
+       help
+         CPU matching allows you to match packets based on the CPU
+         currently handling the packet.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_MATCH_DCCP
        tristate '"dccp" protocol match support'
        depends on NETFILTER_ADVANCED
@@ -726,6 +751,16 @@ config NETFILTER_XT_MATCH_IPRANGE
 
        If unsure, say M.
 
+config NETFILTER_XT_MATCH_IPVS
+       tristate '"ipvs" match support'
+       depends on IP_VS
+       depends on NETFILTER_ADVANCED
+       depends on NF_CONNTRACK
+       help
+         This option allows you to match against IPVS properties of a packet.
+
+         If unsure, say N.
+
 config NETFILTER_XT_MATCH_LENGTH
        tristate '"length" match support'
        depends on NETFILTER_ADVANCED
index e28420aac5efa793196b2ae77698b4cfefb8d9b5..441050f31111ae92ddda5db747d99afe8dad6293 100644 (file)
@@ -45,6 +45,7 @@ obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
 obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
 
 # targets
+obj-$(CONFIG_NETFILTER_XT_TARGET_CHECKSUM) += xt_CHECKSUM.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
@@ -69,6 +70,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
@@ -76,6 +78,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_IPVS) += xt_ipvs.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
index 712ccad133447953fdc86e8e07127f582c9a98dc..46a77d5c3887dc7c874b327a77afb539c8d92777 100644 (file)
@@ -3,7 +3,7 @@
 #
 menuconfig IP_VS
        tristate "IP virtual server support"
-       depends on NET && INET && NETFILTER
+       depends on NET && INET && NETFILTER && NF_CONNTRACK
        ---help---
          IP Virtual Server support will let you build a high-performance
          virtual server based on cluster of two or more real servers. This
@@ -26,7 +26,7 @@ if IP_VS
 
 config IP_VS_IPV6
        bool "IPv6 support for IPVS"
-       depends on EXPERIMENTAL && (IPV6 = y || IP_VS = IPV6)
+       depends on IPV6 = y || IP_VS = IPV6
        ---help---
          Add IPv6 support to IPVS. This is incomplete and might be dangerous.
 
@@ -87,19 +87,16 @@ config      IP_VS_PROTO_UDP
          protocol. Say Y if unsure.
 
 config IP_VS_PROTO_AH_ESP
-       bool
-       depends on UNDEFINED
+       def_bool IP_VS_PROTO_ESP || IP_VS_PROTO_AH
 
 config IP_VS_PROTO_ESP
        bool "ESP load balancing support"
-       select IP_VS_PROTO_AH_ESP
        ---help---
          This option enables support for load balancing ESP (Encapsulation
          Security Payload) transport protocol. Say Y if unsure.
 
 config IP_VS_PROTO_AH
        bool "AH load balancing support"
-       select IP_VS_PROTO_AH_ESP
        ---help---
          This option enables support for load balancing AH (Authentication
          Header) transport protocol. Say Y if unsure.
@@ -238,7 +235,7 @@ comment 'IPVS application helper'
 
 config IP_VS_FTP
        tristate "FTP protocol helper"
-        depends on IP_VS_PROTO_TCP
+        depends on IP_VS_PROTO_TCP && NF_NAT
        ---help---
          FTP is a protocol that transfers IP address and/or port number in
          the payload. In the virtual server via Network Address Translation,
index 1cb0e834f8ff36c784e735eab7c3aae36ea7329f..e76f87f4aca80fdc4e9f0557c0b491dcdeff7556 100644 (file)
@@ -569,49 +569,6 @@ static const struct file_operations ip_vs_app_fops = {
 };
 #endif
 
-
-/*
- *     Replace a segment of data with a new segment
- */
-int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
-                     char *o_buf, int o_len, char *n_buf, int n_len)
-{
-       int diff;
-       int o_offset;
-       int o_left;
-
-       EnterFunction(9);
-
-       diff = n_len - o_len;
-       o_offset = o_buf - (char *)skb->data;
-       /* The length of left data after o_buf+o_len in the skb data */
-       o_left = skb->len - (o_offset + o_len);
-
-       if (diff <= 0) {
-               memmove(o_buf + n_len, o_buf + o_len, o_left);
-               memcpy(o_buf, n_buf, n_len);
-               skb_trim(skb, skb->len + diff);
-       } else if (diff <= skb_tailroom(skb)) {
-               skb_put(skb, diff);
-               memmove(o_buf + n_len, o_buf + o_len, o_left);
-               memcpy(o_buf, n_buf, n_len);
-       } else {
-               if (pskb_expand_head(skb, skb_headroom(skb), diff, pri))
-                       return -ENOMEM;
-               skb_put(skb, diff);
-               memmove(skb->data + o_offset + n_len,
-                       skb->data + o_offset + o_len, o_left);
-               skb_copy_to_linear_data_offset(skb, o_offset, n_buf, n_len);
-       }
-
-       /* must update the iph total length here */
-       ip_hdr(skb)->tot_len = htons(skb->len);
-
-       LeaveFunction(9);
-       return 0;
-}
-
-
 int __init ip_vs_app_init(void)
 {
        /* we will replace it with proc_net_ipvs_create() soon */
index 654544e722643deaac0304dd76a103fcb78eced0..b71c69a2db138ac30aadb13e02a697322d54ac90 100644 (file)
@@ -271,6 +271,29 @@ struct ip_vs_conn *ip_vs_conn_in_get
        return cp;
 }
 
+struct ip_vs_conn *
+ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
+                       struct ip_vs_protocol *pp,
+                       const struct ip_vs_iphdr *iph,
+                       unsigned int proto_off, int inverse)
+{
+       __be16 _ports[2], *pptr;
+
+       pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
+       if (pptr == NULL)
+               return NULL;
+
+       if (likely(!inverse))
+               return ip_vs_conn_in_get(af, iph->protocol,
+                                        &iph->saddr, pptr[0],
+                                        &iph->daddr, pptr[1]);
+       else
+               return ip_vs_conn_in_get(af, iph->protocol,
+                                        &iph->daddr, pptr[1],
+                                        &iph->saddr, pptr[0]);
+}
+EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto);
+
 /* Get reference to connection template */
 struct ip_vs_conn *ip_vs_ct_in_get
 (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
@@ -356,6 +379,28 @@ struct ip_vs_conn *ip_vs_conn_out_get
        return ret;
 }
 
+struct ip_vs_conn *
+ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
+                        struct ip_vs_protocol *pp,
+                        const struct ip_vs_iphdr *iph,
+                        unsigned int proto_off, int inverse)
+{
+       __be16 _ports[2], *pptr;
+
+       pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
+       if (pptr == NULL)
+               return NULL;
+
+       if (likely(!inverse))
+               return ip_vs_conn_out_get(af, iph->protocol,
+                                         &iph->saddr, pptr[0],
+                                         &iph->daddr, pptr[1]);
+       else
+               return ip_vs_conn_out_get(af, iph->protocol,
+                                         &iph->daddr, pptr[1],
+                                         &iph->saddr, pptr[0]);
+}
+EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
 
 /*
  *      Put back the conn and restart its timer with its timeout
index 50907d8472a31720ab1e0e61dbfa8a8aeddeb939..4f8ddba480110167674fa36f6854e53f673e8d68 100644 (file)
@@ -54,7 +54,6 @@
 
 EXPORT_SYMBOL(register_ip_vs_scheduler);
 EXPORT_SYMBOL(unregister_ip_vs_scheduler);
-EXPORT_SYMBOL(ip_vs_skb_replace);
 EXPORT_SYMBOL(ip_vs_proto_name);
 EXPORT_SYMBOL(ip_vs_conn_new);
 EXPORT_SYMBOL(ip_vs_conn_in_get);
@@ -536,26 +535,6 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
        return NF_DROP;
 }
 
-
-/*
- *      It is hooked before NF_IP_PRI_NAT_SRC at the NF_INET_POST_ROUTING
- *      chain, and is used for VS/NAT.
- *      It detects packets for VS/NAT connections and sends the packets
- *      immediately. This can avoid that iptable_nat mangles the packets
- *      for VS/NAT.
- */
-static unsigned int ip_vs_post_routing(unsigned int hooknum,
-                                      struct sk_buff *skb,
-                                      const struct net_device *in,
-                                      const struct net_device *out,
-                                      int (*okfn)(struct sk_buff *))
-{
-       if (!skb->ipvs_property)
-               return NF_ACCEPT;
-       /* The packet was sent from IPVS, exit this chain */
-       return NF_STOP;
-}
-
 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
 {
        return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
@@ -1499,14 +1478,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
                .hooknum        = NF_INET_FORWARD,
                .priority       = 99,
        },
-       /* Before the netfilter connection tracking, exit from POST_ROUTING */
-       {
-               .hook           = ip_vs_post_routing,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET,
-               .hooknum        = NF_INET_POST_ROUTING,
-               .priority       = NF_IP_PRI_NAT_SRC-1,
-       },
 #ifdef CONFIG_IP_VS_IPV6
        /* After packet filtering, forward packet through VS/DR, VS/TUN,
         * or VS/NAT(change destination), so that filtering rules can be
@@ -1535,14 +1506,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
                .hooknum        = NF_INET_FORWARD,
                .priority       = 99,
        },
-       /* Before the netfilter connection tracking, exit from POST_ROUTING */
-       {
-               .hook           = ip_vs_post_routing,
-               .owner          = THIS_MODULE,
-               .pf             = PF_INET6,
-               .hooknum        = NF_INET_POST_ROUTING,
-               .priority       = NF_IP6_PRI_NAT_SRC-1,
-       },
 #endif
 };
 
index 2ae747a376a597296652be7b1051924aa5598bd8..f228a17ec6499b1440cae8a9dd9a1d8e312c4a00 100644 (file)
  *
  * Author:     Wouter Gadeyne
  *
+ *
+ * Code for ip_vs_expect_related and ip_vs_expect_callback is taken from
+ * http://www.ssi.bg/~ja/nfct/:
+ *
+ * ip_vs_nfct.c:       Netfilter connection tracking support for IPVS
+ *
+ * Portions Copyright (C) 2001-2002
+ * Antefacto Ltd, 181 Parnell St, Dublin 1, Ireland.
+ *
+ * Portions Copyright (C) 2003-2008
+ * Julian Anastasov
  */
 
 #define KMSG_COMPONENT "IPVS"
@@ -32,6 +43,9 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/netfilter.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat_helper.h>
 #include <linux/gfp.h>
 #include <net/protocol.h>
 #include <net/tcp.h>
 #define SERVER_STRING "227 Entering Passive Mode ("
 #define CLIENT_STRING "PORT "
 
+#define FMT_TUPLE      "%pI4:%u->%pI4:%u/%u"
+#define ARG_TUPLE(T)   &(T)->src.u3.ip, ntohs((T)->src.u.all), \
+                       &(T)->dst.u3.ip, ntohs((T)->dst.u.all), \
+                       (T)->dst.protonum
+
+#define FMT_CONN       "%pI4:%u->%pI4:%u->%pI4:%u/%u:%u"
+#define ARG_CONN(C)    &((C)->caddr.ip), ntohs((C)->cport), \
+                       &((C)->vaddr.ip), ntohs((C)->vport), \
+                       &((C)->daddr.ip), ntohs((C)->dport), \
+                       (C)->protocol, (C)->state
 
 /*
  * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper
@@ -123,6 +147,119 @@ static int ip_vs_ftp_get_addrport(char *data, char *data_limit,
        return 1;
 }
 
+/*
+ * Called from init_conntrack() as expectfn handler.
+ */
+static void
+ip_vs_expect_callback(struct nf_conn *ct,
+                     struct nf_conntrack_expect *exp)
+{
+       struct nf_conntrack_tuple *orig, new_reply;
+       struct ip_vs_conn *cp;
+
+       if (exp->tuple.src.l3num != PF_INET)
+               return;
+
+       /*
+        * We assume that no NF locks are held before this callback.
+        * ip_vs_conn_out_get and ip_vs_conn_in_get should match their
+        * expectations even if they use wildcard values, now we provide the
+        * actual values from the newly created original conntrack direction.
+        * The conntrack is confirmed when packet reaches IPVS hooks.
+        */
+
+       /* RS->CLIENT */
+       orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+       cp = ip_vs_conn_out_get(exp->tuple.src.l3num, orig->dst.protonum,
+                               &orig->src.u3, orig->src.u.tcp.port,
+                               &orig->dst.u3, orig->dst.u.tcp.port);
+       if (cp) {
+               /* Change reply CLIENT->RS to CLIENT->VS */
+               new_reply = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+               IP_VS_DBG(7, "%s(): ct=%p, status=0x%lX, tuples=" FMT_TUPLE ", "
+                         FMT_TUPLE ", found inout cp=" FMT_CONN "\n",
+                         __func__, ct, ct->status,
+                         ARG_TUPLE(orig), ARG_TUPLE(&new_reply),
+                         ARG_CONN(cp));
+               new_reply.dst.u3 = cp->vaddr;
+               new_reply.dst.u.tcp.port = cp->vport;
+               IP_VS_DBG(7, "%s(): ct=%p, new tuples=" FMT_TUPLE ", " FMT_TUPLE
+                         ", inout cp=" FMT_CONN "\n",
+                         __func__, ct,
+                         ARG_TUPLE(orig), ARG_TUPLE(&new_reply),
+                         ARG_CONN(cp));
+               goto alter;
+       }
+
+       /* CLIENT->VS */
+       cp = ip_vs_conn_in_get(exp->tuple.src.l3num, orig->dst.protonum,
+                              &orig->src.u3, orig->src.u.tcp.port,
+                              &orig->dst.u3, orig->dst.u.tcp.port);
+       if (cp) {
+               /* Change reply VS->CLIENT to RS->CLIENT */
+               new_reply = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+               IP_VS_DBG(7, "%s(): ct=%p, status=0x%lX, tuples=" FMT_TUPLE ", "
+                         FMT_TUPLE ", found outin cp=" FMT_CONN "\n",
+                         __func__, ct, ct->status,
+                         ARG_TUPLE(orig), ARG_TUPLE(&new_reply),
+                         ARG_CONN(cp));
+               new_reply.src.u3 = cp->daddr;
+               new_reply.src.u.tcp.port = cp->dport;
+               IP_VS_DBG(7, "%s(): ct=%p, new tuples=" FMT_TUPLE ", "
+                         FMT_TUPLE ", outin cp=" FMT_CONN "\n",
+                         __func__, ct,
+                         ARG_TUPLE(orig), ARG_TUPLE(&new_reply),
+                         ARG_CONN(cp));
+               goto alter;
+       }
+
+       IP_VS_DBG(7, "%s(): ct=%p, status=0x%lX, tuple=" FMT_TUPLE
+                 " - unknown expect\n",
+                 __func__, ct, ct->status, ARG_TUPLE(orig));
+       return;
+
+alter:
+       /* Never alter conntrack for non-NAT conns */
+       if (IP_VS_FWD_METHOD(cp) == IP_VS_CONN_F_MASQ)
+               nf_conntrack_alter_reply(ct, &new_reply);
+       ip_vs_conn_put(cp);
+       return;
+}
+
+/*
+ * Create NF conntrack expectation with wildcard (optional) source port.
+ * Then the default callback function will alter the reply and will confirm
+ * the conntrack entry when the first packet comes.
+ */
+static void
+ip_vs_expect_related(struct sk_buff *skb, struct nf_conn *ct,
+                    struct ip_vs_conn *cp, u_int8_t proto,
+                    const __be16 *port, int from_rs)
+{
+       struct nf_conntrack_expect *exp;
+
+       BUG_ON(!ct || ct == &nf_conntrack_untracked);
+
+       exp = nf_ct_expect_alloc(ct);
+       if (!exp)
+               return;
+
+       if (from_rs)
+               nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
+                                 nf_ct_l3num(ct), &cp->daddr, &cp->caddr,
+                                 proto, port, &cp->cport);
+       else
+               nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
+                                 nf_ct_l3num(ct), &cp->caddr, &cp->vaddr,
+                                 proto, port, &cp->vport);
+
+       exp->expectfn = ip_vs_expect_callback;
+
+       IP_VS_DBG(7, "%s(): ct=%p, expect tuple=" FMT_TUPLE "\n",
+                 __func__, ct, ARG_TUPLE(&exp->tuple));
+       nf_ct_expect_related(exp);
+       nf_ct_expect_put(exp);
+}
 
 /*
  * Look at outgoing ftp packets to catch the response to a PASV command
@@ -149,7 +286,9 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
        struct ip_vs_conn *n_cp;
        char buf[24];           /* xxx.xxx.xxx.xxx,ppp,ppp\000 */
        unsigned buf_len;
-       int ret;
+       int ret = 0;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
 
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
@@ -219,19 +358,26 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
 
                buf_len = strlen(buf);
 
+               ct = nf_ct_get(skb, &ctinfo);
+               if (ct && !nf_ct_is_untracked(ct)) {
+                       /* If mangling fails this function will return 0
+                        * which will cause the packet to be dropped.
+                        * Mangling can only fail under memory pressure,
+                        * hopefully it will succeed on the retransmitted
+                        * packet.
+                        */
+                       ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+                                                      start-data, end-start,
+                                                      buf, buf_len);
+                       if (ret)
+                               ip_vs_expect_related(skb, ct, n_cp,
+                                                    IPPROTO_TCP, NULL, 0);
+               }
+
                /*
-                * Calculate required delta-offset to keep TCP happy
+                * Not setting 'diff' is intentional, otherwise the sequence
+                * would be adjusted twice.
                 */
-               *diff = buf_len - (end-start);
-
-               if (*diff == 0) {
-                       /* simply replace it with new passive address */
-                       memcpy(start, buf, buf_len);
-                       ret = 1;
-               } else {
-                       ret = !ip_vs_skb_replace(skb, GFP_ATOMIC, start,
-                                         end-start, buf, buf_len);
-               }
 
                cp->app_data = NULL;
                ip_vs_tcp_conn_listen(n_cp);
@@ -263,6 +409,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
        union nf_inet_addr to;
        __be16 port;
        struct ip_vs_conn *n_cp;
+       struct nf_conn *ct;
 
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
@@ -349,6 +496,11 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
                ip_vs_control_add(n_cp, cp);
        }
 
+       ct = (struct nf_conn *)skb->nfct;
+       if (ct && ct != &nf_conntrack_untracked)
+               ip_vs_expect_related(skb, ct, n_cp,
+                                    IPPROTO_TCP, &n_cp->dport, 1);
+
        /*
         *      Move tunnel to listen state
         */
index 2d3d5e4b35f8f42f59c0cc0a838fab3903446a28..027f654799feb969fe5a64de8b262e37287e8f48 100644 (file)
@@ -98,6 +98,7 @@ struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
 
        return NULL;
 }
+EXPORT_SYMBOL(ip_vs_proto_get);
 
 
 /*
index c9a3f7a21d53b2de0ff597d18b0a37061e57f8a3..4c0855cb006ee93c721d53ff0b95cef5ec1853bf 100644 (file)
@@ -8,55 +8,6 @@
 #include <net/sctp/checksum.h>
 #include <net/ip_vs.h>
 
-
-static struct ip_vs_conn *
-sctp_conn_in_get(int af,
-                const struct sk_buff *skb,
-                struct ip_vs_protocol *pp,
-                const struct ip_vs_iphdr *iph,
-                unsigned int proto_off,
-                int inverse)
-{
-       __be16 _ports[2], *pptr;
-
-       pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
-       if (pptr == NULL)
-               return NULL;
-
-       if (likely(!inverse)) 
-               return ip_vs_conn_in_get(af, iph->protocol,
-                                        &iph->saddr, pptr[0],
-                                        &iph->daddr, pptr[1]);
-       else 
-               return ip_vs_conn_in_get(af, iph->protocol,
-                                        &iph->daddr, pptr[1],
-                                        &iph->saddr, pptr[0]);
-}
-
-static struct ip_vs_conn *
-sctp_conn_out_get(int af,
-                 const struct sk_buff *skb,
-                 struct ip_vs_protocol *pp,
-                 const struct ip_vs_iphdr *iph,
-                 unsigned int proto_off,
-                 int inverse)
-{
-       __be16 _ports[2], *pptr;
-
-       pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
-       if (pptr == NULL)
-               return NULL;
-
-       if (likely(!inverse)) 
-               return ip_vs_conn_out_get(af, iph->protocol,
-                                         &iph->saddr, pptr[0],
-                                         &iph->daddr, pptr[1]);
-       else 
-               return ip_vs_conn_out_get(af, iph->protocol,
-                                         &iph->daddr, pptr[1],
-                                         &iph->saddr, pptr[0]);
-}
-
 static int
 sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                   int *verdict, struct ip_vs_conn **cpp)
@@ -173,7 +124,7 @@ sctp_dnat_handler(struct sk_buff *skb,
                        return 0;
 
                /* Call application helper if needed */
-               if (!ip_vs_app_pkt_out(cp, skb))
+               if (!ip_vs_app_pkt_in(cp, skb))
                        return 0;
        }
 
@@ -1169,8 +1120,8 @@ struct ip_vs_protocol ip_vs_protocol_sctp = {
        .register_app = sctp_register_app,
        .unregister_app = sctp_unregister_app,
        .conn_schedule = sctp_conn_schedule,
-       .conn_in_get = sctp_conn_in_get,
-       .conn_out_get = sctp_conn_out_get,
+       .conn_in_get = ip_vs_conn_in_get_proto,
+       .conn_out_get = ip_vs_conn_out_get_proto,
        .snat_handler = sctp_snat_handler,
        .dnat_handler = sctp_dnat_handler,
        .csum_check = sctp_csum_check,
index 91d28e073742db770bb5df2ee8ee123fb7afbc0a..282d24de8592e659466657533b10ad6eadf4bd5c 100644 (file)
 
 #include <net/ip_vs.h>
 
-
-static struct ip_vs_conn *
-tcp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
-               const struct ip_vs_iphdr *iph, unsigned int proto_off,
-               int inverse)
-{
-       __be16 _ports[2], *pptr;
-
-       pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
-       if (pptr == NULL)
-               return NULL;
-
-       if (likely(!inverse)) {
-               return ip_vs_conn_in_get(af, iph->protocol,
-                                        &iph->saddr, pptr[0],
-                                        &iph->daddr, pptr[1]);
-       } else {
-               return ip_vs_conn_in_get(af, iph->protocol,
-                                        &iph->daddr, pptr[1],
-                                        &iph->saddr, pptr[0]);
-       }
-}
-
-static struct ip_vs_conn *
-tcp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
-                const struct ip_vs_iphdr *iph, unsigned int proto_off,
-                int inverse)
-{
-       __be16 _ports[2], *pptr;
-
-       pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
-       if (pptr == NULL)
-               return NULL;
-
-       if (likely(!inverse)) {
-               return ip_vs_conn_out_get(af, iph->protocol,
-                                         &iph->saddr, pptr[0],
-                                         &iph->daddr, pptr[1]);
-       } else {
-               return ip_vs_conn_out_get(af, iph->protocol,
-                                         &iph->daddr, pptr[1],
-                                         &iph->saddr, pptr[0]);
-       }
-}
-
-
 static int
 tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                  int *verdict, struct ip_vs_conn **cpp)
@@ -721,8 +675,8 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
        .register_app =         tcp_register_app,
        .unregister_app =       tcp_unregister_app,
        .conn_schedule =        tcp_conn_schedule,
-       .conn_in_get =          tcp_conn_in_get,
-       .conn_out_get =         tcp_conn_out_get,
+       .conn_in_get =          ip_vs_conn_in_get_proto,
+       .conn_out_get =         ip_vs_conn_out_get_proto,
        .snat_handler =         tcp_snat_handler,
        .dnat_handler =         tcp_dnat_handler,
        .csum_check =           tcp_csum_check,
index e7a6885e0167cc2a55d1fd4b71f2144c2ba46e54..8553231b5d412ca557f8699ee998e05351152213 100644 (file)
 #include <net/ip.h>
 #include <net/ip6_checksum.h>
 
-static struct ip_vs_conn *
-udp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
-               const struct ip_vs_iphdr *iph, unsigned int proto_off,
-               int inverse)
-{
-       struct ip_vs_conn *cp;
-       __be16 _ports[2], *pptr;
-
-       pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
-       if (pptr == NULL)
-               return NULL;
-
-       if (likely(!inverse)) {
-               cp = ip_vs_conn_in_get(af, iph->protocol,
-                                      &iph->saddr, pptr[0],
-                                      &iph->daddr, pptr[1]);
-       } else {
-               cp = ip_vs_conn_in_get(af, iph->protocol,
-                                      &iph->daddr, pptr[1],
-                                      &iph->saddr, pptr[0]);
-       }
-
-       return cp;
-}
-
-
-static struct ip_vs_conn *
-udp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
-                const struct ip_vs_iphdr *iph, unsigned int proto_off,
-                int inverse)
-{
-       struct ip_vs_conn *cp;
-       __be16 _ports[2], *pptr;
-
-       pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
-       if (pptr == NULL)
-               return NULL;
-
-       if (likely(!inverse)) {
-               cp = ip_vs_conn_out_get(af, iph->protocol,
-                                       &iph->saddr, pptr[0],
-                                       &iph->daddr, pptr[1]);
-       } else {
-               cp = ip_vs_conn_out_get(af, iph->protocol,
-                                       &iph->daddr, pptr[1],
-                                       &iph->saddr, pptr[0]);
-       }
-
-       return cp;
-}
-
-
 static int
 udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
                  int *verdict, struct ip_vs_conn **cpp)
@@ -520,8 +468,8 @@ struct ip_vs_protocol ip_vs_protocol_udp = {
        .init =                 udp_init,
        .exit =                 udp_exit,
        .conn_schedule =        udp_conn_schedule,
-       .conn_in_get =          udp_conn_in_get,
-       .conn_out_get =         udp_conn_out_get,
+       .conn_in_get =          ip_vs_conn_in_get_proto,
+       .conn_out_get =         ip_vs_conn_out_get_proto,
        .snat_handler =         udp_snat_handler,
        .dnat_handler =         udp_dnat_handler,
        .csum_check =           udp_csum_check,
index 02b078e11cf333f530052a3cbcbf8313cb5d23e9..21e1a5e9b9d3cd354d74808e44094ffc67f671da 100644 (file)
@@ -28,6 +28,7 @@
 #include <net/ip6_route.h>
 #include <linux/icmpv6.h>
 #include <linux/netfilter.h>
+#include <net/netfilter/nf_conntrack.h>
 #include <linux/netfilter_ipv4.h>
 
 #include <net/ip_vs.h>
@@ -348,6 +349,30 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 }
 #endif
 
+static void
+ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp)
+{
+       struct nf_conn *ct = (struct nf_conn *)skb->nfct;
+       struct nf_conntrack_tuple new_tuple;
+
+       if (ct == NULL || nf_ct_is_untracked(ct) || nf_ct_is_confirmed(ct))
+               return;
+
+       /*
+        * The connection is not yet in the hashtable, so we update it.
+        * CIP->VIP will remain the same, so leave the tuple in
+        * IP_CT_DIR_ORIGINAL untouched.  When the reply comes back from the
+        * real-server we will see RIP->DIP.
+        */
+       new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+       new_tuple.src.u3 = cp->daddr;
+       /*
+        * This will also take care of UDP and other protocols.
+        */
+       new_tuple.src.u.tcp.port = cp->dport;
+       nf_conntrack_alter_reply(ct, &new_tuple);
+}
+
 /*
  *      NAT transmitter (only for outside-to-inside nat forwarding)
  *      Not used for related ICMP
@@ -403,6 +428,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
 
+       ip_vs_update_conntrack(skb, cp);
+
        /* FIXME: when application helper enlarges the packet and the length
           is larger than the MTU of outgoing device, there will be still
           MTU problem. */
@@ -479,6 +506,8 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
 
+       ip_vs_update_conntrack(skb, cp);
+
        /* FIXME: when application helper enlarges the packet and the length
           is larger than the MTU of outgoing device, there will be still
           MTU problem. */
index 16b41b4e2a3c18a4b7979f19d51a91a9427761ae..df3eedb142ff809b9ce8f9dd49c41782f91aba42 100644 (file)
@@ -966,8 +966,7 @@ acct:
                if (acct) {
                        spin_lock_bh(&ct->lock);
                        acct[CTINFO2DIR(ctinfo)].packets++;
-                       acct[CTINFO2DIR(ctinfo)].bytes +=
-                               skb->len - skb_network_offset(skb);
+                       acct[CTINFO2DIR(ctinfo)].bytes += skb->len;
                        spin_unlock_bh(&ct->lock);
                }
        }
index fdc8fb4ae10f0346424b112752581068ccdbc340..7dcf7a404190e6aa3fa06e642f54279e2f30fba9 100644 (file)
@@ -23,9 +23,10 @@ void __nf_ct_ext_destroy(struct nf_conn *ct)
 {
        unsigned int i;
        struct nf_ct_ext_type *t;
+       struct nf_ct_ext *ext = ct->ext;
 
        for (i = 0; i < NF_CT_EXT_NUM; i++) {
-               if (!nf_ct_ext_exist(ct, i))
+               if (!__nf_ct_ext_exist(ext, i))
                        continue;
 
                rcu_read_lock();
@@ -73,44 +74,45 @@ static void __nf_ct_ext_free_rcu(struct rcu_head *head)
 
 void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
 {
-       struct nf_ct_ext *new;
+       struct nf_ct_ext *old, *new;
        int i, newlen, newoff;
        struct nf_ct_ext_type *t;
 
        /* Conntrack must not be confirmed to avoid races on reallocation. */
        NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
 
-       if (!ct->ext)
+       old = ct->ext;
+       if (!old)
                return nf_ct_ext_create(&ct->ext, id, gfp);
 
-       if (nf_ct_ext_exist(ct, id))
+       if (__nf_ct_ext_exist(old, id))
                return NULL;
 
        rcu_read_lock();
        t = rcu_dereference(nf_ct_ext_types[id]);
        BUG_ON(t == NULL);
 
-       newoff = ALIGN(ct->ext->len, t->align);
+       newoff = ALIGN(old->len, t->align);
        newlen = newoff + t->len;
        rcu_read_unlock();
 
-       new = __krealloc(ct->ext, newlen, gfp);
+       new = __krealloc(old, newlen, gfp);
        if (!new)
                return NULL;
 
-       if (new != ct->ext) {
+       if (new != old) {
                for (i = 0; i < NF_CT_EXT_NUM; i++) {
-                       if (!nf_ct_ext_exist(ct, i))
+                       if (!__nf_ct_ext_exist(old, i))
                                continue;
 
                        rcu_read_lock();
                        t = rcu_dereference(nf_ct_ext_types[i]);
                        if (t && t->move)
                                t->move((void *)new + new->offset[i],
-                                       (void *)ct->ext + ct->ext->offset[i]);
+                                       (void *)old + old->offset[i]);
                        rcu_read_unlock();
                }
-               call_rcu(&ct->ext->rcu, __nf_ct_ext_free_rcu);
+               call_rcu(&old->rcu, __nf_ct_ext_free_rcu);
                ct->ext = new;
        }
 
index 802dbffae8b42bfeeeeb805e079547cf263b2aad..c4c885dca3bd358a320f1861d8ea9e52bd8166d3 100644 (file)
@@ -585,8 +585,16 @@ static bool tcp_in_window(const struct nf_conn *ct,
                         * Let's try to use the data from the packet.
                         */
                        sender->td_end = end;
+                       win <<= sender->td_scale;
                        sender->td_maxwin = (win == 0 ? 1 : win);
                        sender->td_maxend = end + sender->td_maxwin;
+                       /*
+                        * We haven't seen traffic in the other direction yet
+                        * but we have to tweak window tracking to pass III
+                        * and IV until that happens.
+                        */
+                       if (receiver->td_maxwin == 0)
+                               receiver->td_end = receiver->td_maxend = sack;
                }
        } else if (((state->state == TCP_CONNTRACK_SYN_SENT
                     && dir == IP_CT_DIR_ORIGINAL)
@@ -680,7 +688,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
                /*
                 * Update receiver data.
                 */
-               if (after(end, sender->td_maxend))
+               if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
                        receiver->td_maxwin += end - sender->td_maxend;
                if (after(sack + win, receiver->td_maxend - 1)) {
                        receiver->td_maxend = sack + win;
diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c
new file mode 100644 (file)
index 0000000..0f642ef
--- /dev/null
@@ -0,0 +1,70 @@
+/* iptables module for the packet checksum mangling
+ *
+ * (C) 2002 by Harald Welte <laforge@netfilter.org>
+ * (C) 2010 Red Hat, Inc.
+ *
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_CHECKSUM.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>");
+MODULE_DESCRIPTION("Xtables: checksum modification");
+MODULE_ALIAS("ipt_CHECKSUM");
+MODULE_ALIAS("ip6t_CHECKSUM");
+
+static unsigned int
+checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               skb_checksum_help(skb);
+
+       return XT_CONTINUE;
+}
+
+static int checksum_tg_check(const struct xt_tgchk_param *par)
+{
+       const struct xt_CHECKSUM_info *einfo = par->targinfo;
+
+       if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
+               pr_info("unsupported CHECKSUM operation %x\n", einfo->operation);
+               return -EINVAL;
+       }
+       if (!einfo->operation) {
+               pr_info("no CHECKSUM operation enabled\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static struct xt_target checksum_tg_reg __read_mostly = {
+       .name           = "CHECKSUM",
+       .family         = NFPROTO_UNSPEC,
+       .target         = checksum_tg,
+       .targetsize     = sizeof(struct xt_CHECKSUM_info),
+       .table          = "mangle",
+       .checkentry     = checksum_tg_check,
+       .me             = THIS_MODULE,
+};
+
+static int __init checksum_tg_init(void)
+{
+       return xt_register_target(&checksum_tg_reg);
+}
+
+static void __exit checksum_tg_exit(void)
+{
+       xt_unregister_target(&checksum_tg_reg);
+}
+
+module_init(checksum_tg_init);
+module_exit(checksum_tg_exit);
index e1a0dedac2580b1aeb5fe1d52d6e5f60f082005b..c61294d85fdafbcb4696b704a6754f9f1c9476c1 100644 (file)
@@ -37,8 +37,10 @@ tproxy_tg(struct sk_buff *skb, const struct xt_action_param *par)
                return NF_DROP;
 
        sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol,
-                                  iph->saddr, tgi->laddr ? tgi->laddr : iph->daddr,
-                                  hp->source, tgi->lport ? tgi->lport : hp->dest,
+                                  iph->saddr,
+                                  tgi->laddr ? tgi->laddr : iph->daddr,
+                                  hp->source,
+                                  tgi->lport ? tgi->lport : hp->dest,
                                   par->in, true);
 
        /* NOTE: assign_sock consumes our sk reference */
diff --git a/net/netfilter/xt_cpu.c b/net/netfilter/xt_cpu.c
new file mode 100644 (file)
index 0000000..b39db8a
--- /dev/null
@@ -0,0 +1,63 @@
+/* Kernel module to match running CPU */
+
+/*
+ * Might be used to distribute connections on several daemons, if
+ * RPS (Remote Packet Steering) is enabled or NIC is multiqueue capable,
+ * each RX queue IRQ affined to one CPU (1:1 mapping)
+ *
+ */
+
+/* (C) 2010 Eric Dumazet
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter/xt_cpu.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Dumazet <eric.dumazet@gmail.com>");
+MODULE_DESCRIPTION("Xtables: CPU match");
+
+static int cpu_mt_check(const struct xt_mtchk_param *par)
+{
+       const struct xt_cpu_info *info = par->matchinfo;
+
+       if (info->invert & ~1)
+               return -EINVAL;
+       return 0;
+}
+
+static bool cpu_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_cpu_info *info = par->matchinfo;
+
+       return (info->cpu == smp_processor_id()) ^ info->invert;
+}
+
+static struct xt_match cpu_mt_reg __read_mostly = {
+       .name       = "cpu",
+       .revision   = 0,
+       .family     = NFPROTO_UNSPEC,
+       .checkentry = cpu_mt_check,
+       .match      = cpu_mt,
+       .matchsize  = sizeof(struct xt_cpu_info),
+       .me         = THIS_MODULE,
+};
+
+static int __init cpu_mt_init(void)
+{
+       return xt_register_match(&cpu_mt_reg);
+}
+
+static void __exit cpu_mt_exit(void)
+{
+       xt_unregister_match(&cpu_mt_reg);
+}
+
+module_init(cpu_mt_init);
+module_exit(cpu_mt_exit);
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c
new file mode 100644 (file)
index 0000000..7a4d66d
--- /dev/null
@@ -0,0 +1,189 @@
+/*
+ *     xt_ipvs - kernel module to match IPVS connection properties
+ *
+ *     Author: Hannes Eder <heder@google.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#ifdef CONFIG_IP_VS_IPV6
+#include <net/ipv6.h>
+#endif
+#include <linux/ip_vs.h>
+#include <linux/types.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_ipvs.h>
+#include <net/netfilter/nf_conntrack.h>
+
+#include <net/ip_vs.h>
+
+MODULE_AUTHOR("Hannes Eder <heder@google.com>");
+MODULE_DESCRIPTION("Xtables: match IPVS connection properties");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_ipvs");
+MODULE_ALIAS("ip6t_ipvs");
+
+/* borrowed from xt_conntrack */
+static bool ipvs_mt_addrcmp(const union nf_inet_addr *kaddr,
+                           const union nf_inet_addr *uaddr,
+                           const union nf_inet_addr *umask,
+                           unsigned int l3proto)
+{
+       if (l3proto == NFPROTO_IPV4)
+               return ((kaddr->ip ^ uaddr->ip) & umask->ip) == 0;
+#ifdef CONFIG_IP_VS_IPV6
+       else if (l3proto == NFPROTO_IPV6)
+               return ipv6_masked_addr_cmp(&kaddr->in6, &umask->in6,
+                      &uaddr->in6) == 0;
+#endif
+       else
+               return false;
+}
+
+static bool
+ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_ipvs_mtinfo *data = par->matchinfo;
+       /* ipvs_mt_check ensures that family is only NFPROTO_IPV[46]. */
+       const u_int8_t family = par->family;
+       struct ip_vs_iphdr iph;
+       struct ip_vs_protocol *pp;
+       struct ip_vs_conn *cp;
+       bool match = true;
+
+       if (data->bitmask == XT_IPVS_IPVS_PROPERTY) {
+               match = skb->ipvs_property ^
+                       !!(data->invert & XT_IPVS_IPVS_PROPERTY);
+               goto out;
+       }
+
+       /* other flags than XT_IPVS_IPVS_PROPERTY are set */
+       if (!skb->ipvs_property) {
+               match = false;
+               goto out;
+       }
+
+       ip_vs_fill_iphdr(family, skb_network_header(skb), &iph);
+
+       if (data->bitmask & XT_IPVS_PROTO)
+               if ((iph.protocol == data->l4proto) ^
+                   !(data->invert & XT_IPVS_PROTO)) {
+                       match = false;
+                       goto out;
+               }
+
+       pp = ip_vs_proto_get(iph.protocol);
+       if (unlikely(!pp)) {
+               match = false;
+               goto out;
+       }
+
+       /*
+        * Check if the packet belongs to an existing entry
+        */
+       cp = pp->conn_out_get(family, skb, pp, &iph, iph.len, 1 /* inverse */);
+       if (unlikely(cp == NULL)) {
+               match = false;
+               goto out;
+       }
+
+       /*
+        * We found a connection, i.e. ct != 0, make sure to call
+        * __ip_vs_conn_put before returning.  In our case jump to out_put_con.
+        */
+
+       if (data->bitmask & XT_IPVS_VPORT)
+               if ((cp->vport == data->vport) ^
+                   !(data->invert & XT_IPVS_VPORT)) {
+                       match = false;
+                       goto out_put_cp;
+               }
+
+       if (data->bitmask & XT_IPVS_VPORTCTL)
+               if ((cp->control != NULL &&
+                    cp->control->vport == data->vportctl) ^
+                   !(data->invert & XT_IPVS_VPORTCTL)) {
+                       match = false;
+                       goto out_put_cp;
+               }
+
+       if (data->bitmask & XT_IPVS_DIR) {
+               enum ip_conntrack_info ctinfo;
+               struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+
+               if (ct == NULL || nf_ct_is_untracked(ct)) {
+                       match = false;
+                       goto out_put_cp;
+               }
+
+               if ((ctinfo >= IP_CT_IS_REPLY) ^
+                   !!(data->invert & XT_IPVS_DIR)) {
+                       match = false;
+                       goto out_put_cp;
+               }
+       }
+
+       if (data->bitmask & XT_IPVS_METHOD)
+               if (((cp->flags & IP_VS_CONN_F_FWD_MASK) == data->fwd_method) ^
+                   !(data->invert & XT_IPVS_METHOD)) {
+                       match = false;
+                       goto out_put_cp;
+               }
+
+       if (data->bitmask & XT_IPVS_VADDR) {
+               if (ipvs_mt_addrcmp(&cp->vaddr, &data->vaddr,
+                                   &data->vmask, family) ^
+                   !(data->invert & XT_IPVS_VADDR)) {
+                       match = false;
+                       goto out_put_cp;
+               }
+       }
+
+out_put_cp:
+       __ip_vs_conn_put(cp);
+out:
+       pr_debug("match=%d\n", match);
+       return match;
+}
+
+static int ipvs_mt_check(const struct xt_mtchk_param *par)
+{
+       if (par->family != NFPROTO_IPV4
+#ifdef CONFIG_IP_VS_IPV6
+           && par->family != NFPROTO_IPV6
+#endif
+               ) {
+               pr_info("protocol family %u not supported\n", par->family);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static struct xt_match xt_ipvs_mt_reg __read_mostly = {
+       .name       = "ipvs",
+       .revision   = 0,
+       .family     = NFPROTO_UNSPEC,
+       .match      = ipvs_mt,
+       .checkentry = ipvs_mt_check,
+       .matchsize  = XT_ALIGN(sizeof(struct xt_ipvs_mtinfo)),
+       .me         = THIS_MODULE,
+};
+
+static int __init ipvs_mt_init(void)
+{
+       return xt_register_match(&xt_ipvs_mt_reg);
+}
+
+static void __exit ipvs_mt_exit(void)
+{
+       xt_unregister_match(&xt_ipvs_mt_reg);
+}
+
+module_init(ipvs_mt_init);
+module_exit(ipvs_mt_exit);
index b4f7dfea59805f3c705859163bfcf57fcb6b4da8..70eb2b4984ddb277e052458f325599910e57d875 100644 (file)
@@ -11,7 +11,8 @@
 #include <linux/netfilter/xt_quota.h>
 
 struct xt_quota_priv {
-       uint64_t quota;
+       spinlock_t      lock;
+       uint64_t        quota;
 };
 
 MODULE_LICENSE("GPL");
@@ -20,8 +21,6 @@ MODULE_DESCRIPTION("Xtables: countdown quota match");
 MODULE_ALIAS("ipt_quota");
 MODULE_ALIAS("ip6t_quota");
 
-static DEFINE_SPINLOCK(quota_lock);
-
 static bool
 quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
@@ -29,7 +28,7 @@ quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
        struct xt_quota_priv *priv = q->master;
        bool ret = q->flags & XT_QUOTA_INVERT;
 
-       spin_lock_bh(&quota_lock);
+       spin_lock_bh(&priv->lock);
        if (priv->quota >= skb->len) {
                priv->quota -= skb->len;
                ret = !ret;
@@ -37,9 +36,7 @@ quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
                /* we do not allow even small packets from now on */
                priv->quota = 0;
        }
-       /* Copy quota back to matchinfo so that iptables can display it */
-       q->quota = priv->quota;
-       spin_unlock_bh(&quota_lock);
+       spin_unlock_bh(&priv->lock);
 
        return ret;
 }
@@ -55,6 +52,7 @@ static int quota_mt_check(const struct xt_mtchk_param *par)
        if (q->master == NULL)
                return -ENOMEM;
 
+       spin_lock_init(&q->master->lock);
        q->master->quota = q->quota;
        return 0;
 }
index 8648a9922aabace0231de8eec18b88e30239fd5f..2cbf380377d5e009fa972d85af18ded9971a5248 100644 (file)
@@ -1406,7 +1406,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        struct netlink_sock *nlk = nlk_sk(sk);
        int noblock = flags&MSG_DONTWAIT;
        size_t copied;
-       struct sk_buff *skb, *frag __maybe_unused = NULL;
+       struct sk_buff *skb;
        int err;
 
        if (flags&MSG_OOB)
@@ -1441,7 +1441,21 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
                        kfree_skb(skb);
                        skb = compskb;
                } else {
-                       frag = skb_shinfo(skb)->frag_list;
+                       /*
+                        * Before setting frag_list to NULL, we must get a
+                        * private copy of skb if shared (because of MSG_PEEK)
+                        */
+                       if (skb_shared(skb)) {
+                               struct sk_buff *nskb;
+
+                               nskb = pskb_copy(skb, GFP_KERNEL);
+                               kfree_skb(skb);
+                               skb = nskb;
+                               err = -ENOMEM;
+                               if (!skb)
+                                       goto out;
+                       }
+                       kfree_skb(skb_shinfo(skb)->frag_list);
                        skb_shinfo(skb)->frag_list = NULL;
                }
        }
@@ -1478,10 +1492,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        if (flags & MSG_TRUNC)
                copied = skb->len;
 
-#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
-       skb_shinfo(skb)->frag_list = frag;
-#endif
-
        skb_free_datagram(sk, skb);
 
        if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
index aa4308afcc7f07168d9317235e795218d878acdd..26ed3e8587c20f13a22c7aa2e2abf91dedf3d62a 100644 (file)
@@ -303,6 +303,7 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
 errout:
        return err;
 }
+EXPORT_SYMBOL(genl_register_ops);
 
 /**
  * genl_unregister_ops - unregister generic netlink operations
@@ -337,6 +338,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
 
        return -ENOENT;
 }
+EXPORT_SYMBOL(genl_unregister_ops);
 
 /**
  * genl_register_family - register a generic netlink family
@@ -405,6 +407,7 @@ errout_locked:
 errout:
        return err;
 }
+EXPORT_SYMBOL(genl_register_family);
 
 /**
  * genl_register_family_with_ops - register a generic netlink family
@@ -485,6 +488,7 @@ int genl_unregister_family(struct genl_family *family)
 
        return -ENOENT;
 }
+EXPORT_SYMBOL(genl_unregister_family);
 
 static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
@@ -873,11 +877,7 @@ static int __init genl_init(void)
        for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
                INIT_LIST_HEAD(&family_ht[i]);
 
-       err = genl_register_family(&genl_ctrl);
-       if (err < 0)
-               goto problem;
-
-       err = genl_register_ops(&genl_ctrl, &genl_ctrl_ops);
+       err = genl_register_family_with_ops(&genl_ctrl, &genl_ctrl_ops, 1);
        if (err < 0)
                goto problem;
 
@@ -899,11 +899,6 @@ problem:
 
 subsys_initcall(genl_init);
 
-EXPORT_SYMBOL(genl_register_ops);
-EXPORT_SYMBOL(genl_unregister_ops);
-EXPORT_SYMBOL(genl_register_family);
-EXPORT_SYMBOL(genl_unregister_family);
-
 static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group,
                         gfp_t flags)
 {
index cbc244a128bd1611829f6e943ae7a669cc81f2d3..b4fdaac233f77ec3dbe4bd86b0f8d5bc178bb9f7 100644 (file)
@@ -109,7 +109,9 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
                init_timer(&rose_neigh->t0timer);
 
                if (rose_route->ndigis != 0) {
-                       if ((rose_neigh->digipeat = kmalloc(sizeof(ax25_digi), GFP_KERNEL)) == NULL) {
+                       rose_neigh->digipeat =
+                               kmalloc(sizeof(ax25_digi), GFP_ATOMIC);
+                       if (rose_neigh->digipeat == NULL) {
                                kfree(rose_neigh);
                                res = -ENOMEM;
                                goto out;
index a16b0175f890686025c5b6684ef13cc878acd6f3..11f195af2da0732aaf362380928e298f7f35a199 100644 (file)
@@ -33,6 +33,7 @@
 static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1];
 static u32 mirred_idx_gen;
 static DEFINE_RWLOCK(mirred_lock);
+static LIST_HEAD(mirred_list);
 
 static struct tcf_hashinfo mirred_hash_info = {
        .htab   =       tcf_mirred_ht,
@@ -47,7 +48,9 @@ static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
                        m->tcf_bindcnt--;
                m->tcf_refcnt--;
                if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
-                       dev_put(m->tcfm_dev);
+                       list_del(&m->tcfm_list);
+                       if (m->tcfm_dev)
+                               dev_put(m->tcfm_dev);
                        tcf_hash_destroy(&m->common, &mirred_hash_info);
                        return 1;
                }
@@ -134,8 +137,10 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
                m->tcfm_ok_push = ok_push;
        }
        spin_unlock_bh(&m->tcf_lock);
-       if (ret == ACT_P_CREATED)
+       if (ret == ACT_P_CREATED) {
+               list_add(&m->tcfm_list, &mirred_list);
                tcf_hash_insert(pc, &mirred_hash_info);
+       }
 
        return ret;
 }
@@ -164,9 +169,14 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
        m->tcf_bstats.packets++;
 
        dev = m->tcfm_dev;
+       if (!dev) {
+               printk_once(KERN_NOTICE "tc mirred: target device is gone\n");
+               goto out;
+       }
+
        if (!(dev->flags & IFF_UP)) {
                if (net_ratelimit())
-                       pr_notice("tc mirred to Houston: device %s is gone!\n",
+                       pr_notice("tc mirred to Houston: device %s is down\n",
                                  dev->name);
                goto out;
        }
@@ -230,6 +240,28 @@ nla_put_failure:
        return -1;
 }
 
+static int mirred_device_event(struct notifier_block *unused,
+                              unsigned long event, void *ptr)
+{
+       struct net_device *dev = ptr;
+       struct tcf_mirred *m;
+
+       if (event == NETDEV_UNREGISTER)
+               list_for_each_entry(m, &mirred_list, tcfm_list) {
+                       if (m->tcfm_dev == dev) {
+                               dev_put(dev);
+                               m->tcfm_dev = NULL;
+                       }
+               }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block mirred_device_notifier = {
+       .notifier_call = mirred_device_event,
+};
+
+
 static struct tc_action_ops act_mirred_ops = {
        .kind           =       "mirred",
        .hinfo          =       &mirred_hash_info,
@@ -250,12 +282,17 @@ MODULE_LICENSE("GPL");
 
 static int __init mirred_init_module(void)
 {
+       int err = register_netdevice_notifier(&mirred_device_notifier);
+       if (err)
+               return err;
+
        pr_info("Mirror/redirect action on\n");
        return tcf_register_action(&act_mirred_ops);
 }
 
 static void __exit mirred_cleanup_module(void)
 {
+       unregister_netdevice_notifier(&mirred_device_notifier);
        tcf_unregister_action(&act_mirred_ops);
 }
 
index 24e614c495f26091d5acdc8af3a8f951d8d984c2..d0386a413e8dc9406d470e66af196efc40f4e226 100644 (file)
@@ -218,6 +218,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
                if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
                        goto drop;
 
+               icmph = (void *)(skb_network_header(skb) + ihl);
                iph = (void *)(icmph + 1);
                if (egress)
                        addr = iph->daddr;
@@ -246,7 +247,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
                        iph->saddr = new_addr;
 
                inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
-                                        1);
+                                        0);
                break;
        }
        default:
index 4f522143811e467fe70e7e4b13db47f979b11b8a..7416a5c73b2a993550991ac66eca7cc254c6f2e6 100644 (file)
@@ -134,10 +134,12 @@ next_knode:
 #endif
 
                for (i = n->sel.nkeys; i>0; i--, key++) {
-                       unsigned int toff;
+                       int toff = off + key->off + (off2 & key->offmask);
                        __be32 *data, _data;
 
-                       toff = off + key->off + (off2 & key->offmask);
+                       if (skb_headroom(skb) + toff < 0)
+                               goto out;
+
                        data = skb_header_pointer(skb, toff, 4, &_data);
                        if (!data)
                                goto out;