]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <davem@davemloft.net>
Fri, 19 Nov 2010 21:13:47 +0000 (13:13 -0800)
committerDavid S. Miller <davem@davemloft.net>
Fri, 19 Nov 2010 21:13:47 +0000 (13:13 -0800)
Conflicts:
drivers/net/bonding/bond_main.c
net/core/net-sysfs.c
net/ipv6/addrconf.c

380 files changed:
Documentation/networking/ip-sysctl.txt
Documentation/networking/stmmac.txt
arch/arm/mach-omap2/board-omap3pandora.c
drivers/atm/fore200e.c
drivers/block/aoe/aoecmd.c
drivers/infiniband/core/addr.c
drivers/isdn/hardware/mISDN/mISDNinfineon.c
drivers/isdn/hardware/mISDN/mISDNisar.c
drivers/isdn/hisax/avm_pci.c
drivers/isdn/hisax/callc.c
drivers/isdn/hisax/hfc_2bds0.c
drivers/isdn/hisax/hfc_2bs0.c
drivers/isdn/hisax/hfc_pci.c
drivers/isdn/hisax/hfc_sx.c
drivers/isdn/hisax/hisax.h
drivers/isdn/hisax/ipacx.c
drivers/isdn/hisax/isar.c
drivers/isdn/hisax/isdnl1.h
drivers/isdn/hisax/isdnl3.c
drivers/isdn/hisax/netjet.c
drivers/isdn/hisax/st5481_d.c
drivers/isdn/i4l/isdn_concap.c
drivers/isdn/i4l/isdn_net.c
drivers/isdn/i4l/isdn_ppp.c
drivers/isdn/mISDN/layer1.c
drivers/isdn/mISDN/layer2.c
drivers/isdn/mISDN/tei.c
drivers/net/3c507.c
drivers/net/3c515.c
drivers/net/82596.c
drivers/net/Kconfig
drivers/net/arm/w90p910_ether.c
drivers/net/at1700.c
drivers/net/atarilance.c
drivers/net/ax88796.c
drivers/net/bnx2x/bnx2x_link.c
drivers/net/bnx2x/bnx2x_main.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bonding.h
drivers/net/can/mscan/mscan.c
drivers/net/can/pch_can.c
drivers/net/can/sja1000/sja1000_of_platform.c
drivers/net/cnic.c
drivers/net/cxgb4vf/adapter.h
drivers/net/cxgb4vf/cxgb4vf_main.c
drivers/net/cxgb4vf/sge.c
drivers/net/cxgb4vf/t4vf_hw.c
drivers/net/dm9000.c
drivers/net/e1000e/82571.c
drivers/net/e1000e/defines.h
drivers/net/e1000e/netdev.c
drivers/net/eepro.c
drivers/net/enic/enic.h
drivers/net/enic/enic_main.c
drivers/net/fec_mpc52xx.c
drivers/net/igbvf/Makefile
drivers/net/igbvf/defines.h
drivers/net/igbvf/ethtool.c
drivers/net/igbvf/igbvf.h
drivers/net/igbvf/mbx.c
drivers/net/igbvf/mbx.h
drivers/net/igbvf/netdev.c
drivers/net/igbvf/regs.h
drivers/net/igbvf/vf.c
drivers/net/igbvf/vf.h
drivers/net/iseries_veth.c
drivers/net/ixgbe/Makefile
drivers/net/ixgbe/ixgbe.h
drivers/net/ixgbe/ixgbe_82598.c
drivers/net/ixgbe/ixgbe_82599.c
drivers/net/ixgbe/ixgbe_common.c
drivers/net/ixgbe/ixgbe_common.h
drivers/net/ixgbe/ixgbe_dcb.c
drivers/net/ixgbe/ixgbe_dcb.h
drivers/net/ixgbe/ixgbe_dcb_82598.c
drivers/net/ixgbe/ixgbe_dcb_82599.c
drivers/net/ixgbe/ixgbe_dcb_nl.c
drivers/net/ixgbe/ixgbe_ethtool.c
drivers/net/ixgbe/ixgbe_fcoe.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbe/ixgbe_mbx.c
drivers/net/ixgbe/ixgbe_mbx.h
drivers/net/ixgbe/ixgbe_phy.c
drivers/net/ixgbe/ixgbe_phy.h
drivers/net/ixgbe/ixgbe_sriov.c
drivers/net/ixgbe/ixgbe_type.h
drivers/net/ixgbe/ixgbe_x540.c [new file with mode: 0644]
drivers/net/ixgbevf/Makefile
drivers/net/ixgbevf/defines.h
drivers/net/ixgbevf/ixgbevf.h
drivers/net/ixgbevf/ixgbevf_main.c
drivers/net/ixgbevf/mbx.c
drivers/net/ixgbevf/mbx.h
drivers/net/ixgbevf/regs.h
drivers/net/ixgbevf/vf.c
drivers/net/ixgbevf/vf.h
drivers/net/ks8851.c
drivers/net/lance.c
drivers/net/lib82596.c
drivers/net/lib8390.c
drivers/net/macvlan.c
drivers/net/ne-h8300.c
drivers/net/pcmcia/axnet_cs.c
drivers/net/ppp_generic.c
drivers/net/qla3xxx.c
drivers/net/qlcnic/qlcnic.h
drivers/net/qlcnic/qlcnic_ctx.c
drivers/net/qlcnic/qlcnic_ethtool.c
drivers/net/qlge/qlge.h
drivers/net/qlge/qlge_dbg.c
drivers/net/qlge/qlge_ethtool.c
drivers/net/qlge/qlge_main.c
drivers/net/qlge/qlge_mpi.c
drivers/net/s2io.c
drivers/net/sh_eth.c
drivers/net/usb/hso.c
drivers/net/usb/ipheth.c
drivers/net/usb/pegasus.c
drivers/net/vxge/vxge-config.c
drivers/net/vxge/vxge-config.h
drivers/net/vxge/vxge-ethtool.c
drivers/net/vxge/vxge-main.c
drivers/net/vxge/vxge-main.h
drivers/net/vxge/vxge-reg.h
drivers/net/vxge/vxge-traffic.h
drivers/net/vxge/vxge-version.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/debug.c
drivers/net/wireless/ath/ath5k/desc.h
drivers/net/wireless/ath/ath5k/phy.c
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/ar9002_mac.c
drivers/net/wireless/ath/ath9k/ar9002_phy.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_mac.c
drivers/net/wireless/ath/ath9k/ar9003_mac.h
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/common.c
drivers/net/wireless/ath/ath9k/common.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/eeprom.h
drivers/net/wireless/ath/ath9k/eeprom_4k.c
drivers/net/wireless/ath/ath9k/eeprom_9287.c
drivers/net/wireless/ath/ath9k/eeprom_def.c
drivers/net/wireless/ath/ath9k/gpio.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/mac.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/virtual.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/carl9170.h
drivers/net/wireless/ath/carl9170/fwcmd.h
drivers/net/wireless/ath/carl9170/hw.h
drivers/net/wireless/ath/carl9170/mac.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/phy.c
drivers/net/wireless/ath/carl9170/phy.h
drivers/net/wireless/ath/carl9170/tx.c
drivers/net/wireless/ath/carl9170/usb.c
drivers/net/wireless/ath/carl9170/version.h
drivers/net/wireless/ath/debug.h
drivers/net/wireless/ath/key.c
drivers/net/wireless/b43/b43.h
drivers/net/wireless/b43/dma.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/b43/radio_2055.c
drivers/net/wireless/b43/radio_2056.c
drivers/net/wireless/b43/radio_2056.h
drivers/net/wireless/b43legacy/rfkill.c
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/Makefile
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-3945.c
drivers/net/wireless/iwlwifi/iwl-3945.h
drivers/net/wireless/iwlwifi/iwl-4965.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn-calib.c
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-agn-sta.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-agn.h
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-csr.h
drivers/net/wireless/iwlwifi/iwl-dev.h
drivers/net/wireless/iwlwifi/iwl-led.c
drivers/net/wireless/iwlwifi/iwl-legacy.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-legacy.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-power.c
drivers/net/wireless/iwlwifi/iwl-power.h
drivers/net/wireless/iwlwifi/iwl-rx.c
drivers/net/wireless/iwlwifi/iwl-scan.c
drivers/net/wireless/iwlwifi/iwl-tx.c
drivers/net/wireless/iwlwifi/iwl3945-base.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/rt2x00/rt2400pci.c
drivers/net/wireless/rt2x00/rt2400pci.h
drivers/net/wireless/rt2x00/rt2500pci.c
drivers/net/wireless/rt2x00/rt2500pci.h
drivers/net/wireless/rt2x00/rt2500usb.c
drivers/net/wireless/rt2x00/rt2800.h
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800pci.h
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2800usb.h
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00config.c
drivers/net/wireless/rt2x00/rt2x00debug.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00lib.h
drivers/net/wireless/rt2x00/rt2x00link.c
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00pci.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rt2x00/rt2x00queue.h
drivers/net/wireless/rt2x00/rt2x00reg.h
drivers/net/wireless/rt2x00/rt2x00usb.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt61pci.h
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/rt2x00/rt73usb.h
drivers/net/wireless/rtl818x/rtl8187_dev.c
drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
drivers/net/wireless/wl1251/main.c
drivers/net/wireless/wl1251/sdio.c
drivers/net/wireless/wl1251/spi.c
drivers/net/wireless/wl1251/wl1251.h
drivers/net/wireless/wl12xx/Kconfig
drivers/net/wireless/wl12xx/wl1271.h
drivers/net/wireless/wl12xx/wl1271_acx.c
drivers/net/wireless/wl12xx/wl1271_acx.h
drivers/net/wireless/wl12xx/wl1271_boot.c
drivers/net/wireless/wl12xx/wl1271_debugfs.c
drivers/net/wireless/wl12xx/wl1271_event.c
drivers/net/wireless/wl12xx/wl1271_main.c
drivers/net/wireless/wl12xx/wl1271_rx.c
drivers/net/wireless/wl12xx/wl1271_rx.h
drivers/net/wireless/wl12xx/wl1271_scan.c
drivers/net/wireless/wl12xx/wl1271_testmode.c
drivers/net/wireless/wl12xx/wl1271_tx.c
drivers/net/wireless/wl12xx/wl1271_tx.h
drivers/net/wireless/zd1201.c
drivers/net/wireless/zd1211rw/zd_usb.c
drivers/net/xilinx_emaclite.c
drivers/net/znet.c
drivers/ssb/pcihost_wrapper.c
include/linux/bitops.h
include/linux/dccp.h
include/linux/filter.h
include/linux/if_bridge.h
include/linux/if_link.h
include/linux/if_macvlan.h
include/linux/igmp.h
include/linux/inetdevice.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/nl80211.h
include/linux/rfkill.h
include/linux/wl12xx.h
include/net/caif/cfctrl.h
include/net/cfg80211.h
include/net/dn_dev.h
include/net/dn_route.h
include/net/dst.h
include/net/flow.h
include/net/inet_sock.h
include/net/neighbour.h
include/net/netlink.h
include/net/route.h
include/net/rtnetlink.h
include/net/sock.h
include/net/xfrm.h
lib/nlattr.c
net/8021q/vlan.c
net/8021q/vlan.h
net/8021q/vlan_core.c
net/8021q/vlan_dev.c
net/8021q/vlan_netlink.c
net/8021q/vlanproc.c
net/atm/br2684.c
net/atm/clip.c
net/atm/lec.c
net/bridge/br.c
net/bridge/br_fdb.c
net/bridge/br_forward.c
net/bridge/br_if.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_notify.c
net/bridge/br_private.h
net/bridge/br_stp_bpdu.c
net/bridge/netfilter/ebtable_broute.c
net/bridge/netfilter/ebtables.c
net/core/dev.c
net/core/filter.c
net/core/net-sysfs.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/timestamping.c
net/dccp/ackvec.c
net/dccp/ackvec.h
net/dccp/ccids/ccid2.c
net/dccp/ccids/ccid2.h
net/dccp/dccp.h
net/dccp/input.c
net/dccp/ipv4.c
net/dccp/options.c
net/dccp/output.c
net/decnet/af_decnet.c
net/decnet/dn_dev.c
net/decnet/dn_fib.c
net/decnet/dn_neigh.c
net/decnet/dn_route.c
net/decnet/dn_rules.c
net/ipv4/af_inet.c
net/ipv4/arp.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/tcp.c
net/ipv4/tcp_output.c
net/ipv4/tcp_probe.c
net/ipv4/udp.c
net/ipv4/xfrm4_policy.c
net/ipv6/addrconf.c
net/ipv6/ip6mr.c
net/ipv6/mcast.c
net/ipv6/netfilter.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/udp.c
net/l2tp/l2tp_ip.c
net/mac80211/aes_ccm.c
net/mac80211/aes_cmac.c
net/mac80211/debugfs.c
net/mac80211/debugfs.h
net/mac80211/debugfs_key.c
net/mac80211/debugfs_sta.c
net/mac80211/rc80211_minstrel_ht.c
net/netfilter/core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/xt_TEE.c
net/packet/af_packet.c
net/rfkill/core.c
net/rxrpc/ar-peer.c
net/socket.c
net/unix/af_unix.c
net/wireless/reg.c
net/x25/af_x25.c
security/selinux/hooks.c

index fe95105992c57f1c81248dd8d3c1c38c3272e0f9..ae5522703d16e8b579e3ed3c0b2e03f1d707bea5 100644 (file)
@@ -707,10 +707,28 @@ igmp_max_memberships - INTEGER
        Change the maximum number of multicast groups we can subscribe to.
        Default: 20
 
-conf/interface/*  changes special settings per interface (where "interface" is
-                 the name of your network interface)
-conf/all/*       is special, changes the settings for all interfaces
+       Theoretical maximum value is bounded by having to send a membership
+       report in a single datagram (i.e. the report can't span multiple
+       datagrams, or risk confusing the switch and leaving groups you don't
+       intend to).
 
+       The number of supported groups 'M' is bounded by the number of group
+       report entries you can fit into a single datagram of 65535 bytes.
+
+       M = 65536-sizeof (ip header)/(sizeof(Group record))
+
+       Group records are variable length, with a minimum of 12 bytes.
+       So net.ipv4.igmp_max_memberships should not be set higher than:
+
+       (65536-24) / 12 = 5459
+
+       The value 5459 assumes no IP header options, so in practice
+       this number may be lower.
+
+       conf/interface/*  changes special settings per interface (where
+       "interface" is the name of your network interface)
+
+       conf/all/*        is special, changes the settings for all interfaces
 
 log_martians - BOOLEAN
        Log packets with impossible addresses to kernel log.
index 7ee770b5ef5fc0664d75d3b62a9798af0c06a10d..80a7a34549022147a68ddb4821e16be4f9e589ea 100644 (file)
@@ -7,7 +7,7 @@ This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
 (Synopsys IP blocks); it has been fully tested on STLinux platforms.
 
 Currently this network device driver is for all STM embedded MAC/GMAC
-(7xxx SoCs).
+(7xxx SoCs). Other platforms start using it i.e. ARM SPEAr.
 
 DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100
 Universal version 4.0 have been used for developing the first code
@@ -95,9 +95,14 @@ Several information came from the platform; please refer to the
 driver's Header file in include/linux directory.
 
 struct plat_stmmacenet_data {
-        int bus_id;
-        int pbl;
-        int has_gmac;
+       int bus_id;
+       int pbl;
+       int clk_csr;
+       int has_gmac;
+       int enh_desc;
+       int tx_coe;
+       int bugged_jumbo;
+       int pmt;
         void (*fix_mac_speed)(void *priv, unsigned int speed);
         void (*bus_setup)(unsigned long ioaddr);
 #ifdef CONFIG_STM_DRIVERS
@@ -114,6 +119,12 @@ Where:
   registers (on STM platforms);
 - has_gmac: GMAC core is on board (get it at run-time in the next step);
 - bus_id: bus identifier.
+- tx_coe: core is able to perform the tx csum in HW.
+- enh_desc: if sets the MAC will use the enhanced descriptor structure.
+- clk_csr: CSR Clock range selection.
+- bugged_jumbo: some HWs are not able to perform the csum in HW for
+  over-sized frames due to limited buffer sizes. Setting this
+  flag the csum will be done in SW on JUMBO frames.
 
 struct plat_stmmacphy_data {
         int bus_id;
@@ -131,13 +142,28 @@ Where:
 - interface: physical MII interface mode;
 - phy_reset: hook to reset HW function.
 
+SOURCES:
+- Kconfig
+- Makefile
+- stmmac_main.c: main network device driver;
+- stmmac_mdio.c: mdio functions;
+- stmmac_ethtool.c: ethtool support;
+- stmmac_timer.[ch]: timer code used for mitigating the driver dma interrupts
+  Only tested on ST40 platforms based.
+- stmmac.h: private driver structure;
+- common.h: common definitions and VFTs;
+- descs.h: descriptor structure definitions;
+- dwmac1000_core.c: GMAC core functions;
+- dwmac1000_dma.c:  dma functions for the GMAC chip;
+- dwmac1000.h: specific header file for the GMAC;
+- dwmac100_core: MAC 100 core and dma code;
+- dwmac100_dma.c: dma funtions for the MAC chip;
+- dwmac1000.h: specific header file for the MAC;
+- dwmac_lib.c: generic DMA functions shared among chips
+- enh_desc.c: functions for handling enhanced descriptors
+- norm_desc.c: functions for handling normal descriptors
+
 TODO:
-- Continue to make the driver more generic and suitable for other Synopsys
-  Ethernet controllers used on other architectures (i.e. ARM).
-- 10G controllers are not supported.
-- MAC uses Normal descriptors and GMAC uses enhanced ones.
-  This is a limit that should be reviewed. MAC could want to
-  use the enhanced structure.
-- Checksumming: Rx/Tx csum is done in HW in case of GMAC only.
+- XGMAC controller is not supported.
 - Review the timer optimisation code to use an embedded device that seems to be
   available in new chip generations.
index 89ed1be2d62e0d7214ef968e44d73170ebe35a22..8be261506056b4de54d7a12a7c95df27e3afcec5 100644 (file)
@@ -642,31 +642,13 @@ static void __init omap3pandora_init_irq(void)
        omap_gpio_init();
 }
 
-static void pandora_wl1251_set_power(bool enable)
-{
-       /*
-        * Keep power always on until wl1251_sdio driver learns to re-init
-        * the chip after powering it down and back up.
-        */
-}
-
-static struct wl12xx_platform_data pandora_wl1251_pdata = {
-       .set_power      = pandora_wl1251_set_power,
-       .use_eeprom     = true,
-};
-
-static struct platform_device pandora_wl1251_data = {
-       .name           = "wl1251_data",
-       .id             = -1,
-       .dev            = {
-               .platform_data  = &pandora_wl1251_pdata,
-       },
-};
-
-static void pandora_wl1251_init(void)
+static void __init pandora_wl1251_init(void)
 {
+       struct wl12xx_platform_data pandora_wl1251_pdata;
        int ret;
 
+       memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata));
+
        ret = gpio_request(PANDORA_WIFI_IRQ_GPIO, "wl1251 irq");
        if (ret < 0)
                goto fail;
@@ -679,6 +661,11 @@ static void pandora_wl1251_init(void)
        if (pandora_wl1251_pdata.irq < 0)
                goto fail_irq;
 
+       pandora_wl1251_pdata.use_eeprom = true;
+       ret = wl12xx_set_platform_data(&pandora_wl1251_pdata);
+       if (ret < 0)
+               goto fail_irq;
+
        return;
 
 fail_irq:
@@ -691,7 +678,6 @@ static struct platform_device *omap3pandora_devices[] __initdata = {
        &pandora_leds_gpio,
        &pandora_keys_gpio,
        &pandora_dss_device,
-       &pandora_wl1251_data,
        &pandora_vwlan_device,
 };
 
index c8fc69c85a062f81029495e827ae615547f964e9..c09761959354a5ad3508c8f2ecccfa0b21da3ded 100644 (file)
@@ -92,7 +92,7 @@
 
 #define FORE200E_INDEX(virt_addr, type, index)     (&((type *)(virt_addr))[ index ])
 
-#define FORE200E_NEXT_ENTRY(index, modulo)         (index = ++(index) % (modulo))
+#define FORE200E_NEXT_ENTRY(index, modulo)         (index = ((index) + 1) % (modulo))
 
 #if 1
 #define ASSERT(expr)     if (!(expr)) { \
index 5674bd01d96dffc86a8818f6a62a72207092c71e..de0435e63b02cbd349c5dcc282682359f6f85934 100644 (file)
@@ -297,8 +297,8 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
        struct sk_buff *skb;
        struct net_device *ifp;
 
-       read_lock(&dev_base_lock);
-       for_each_netdev(&init_net, ifp) {
+       rcu_read_lock();
+       for_each_netdev_rcu(&init_net, ifp) {
                dev_hold(ifp);
                if (!is_aoe_netif(ifp))
                        goto cont;
@@ -325,7 +325,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
 cont:
                dev_put(ifp);
        }
-       read_unlock(&dev_base_lock);
+       rcu_read_unlock();
 }
 
 static void
index a5ea1bce9689601edb299eab667ea0be35290cd2..c15fd2ea56c1ae169946bd6a7323f6e625add339 100644 (file)
@@ -200,7 +200,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
        src_in->sin_family = AF_INET;
        src_in->sin_addr.s_addr = rt->rt_src;
 
-       if (rt->idev->dev->flags & IFF_LOOPBACK) {
+       if (rt->dst.dev->flags & IFF_LOOPBACK) {
                ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
                if (!ret)
                        memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
@@ -208,12 +208,12 @@ static int addr4_resolve(struct sockaddr_in *src_in,
        }
 
        /* If the device does ARP internally, return 'done' */
-       if (rt->idev->dev->flags & IFF_NOARP) {
-               rdma_copy_addr(addr, rt->idev->dev, NULL);
+       if (rt->dst.dev->flags & IFF_NOARP) {
+               rdma_copy_addr(addr, rt->dst.dev, NULL);
                goto put;
        }
 
-       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev);
+       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
        if (!neigh || !(neigh->nud_state & NUD_VALID)) {
                neigh_event_send(rt->dst.neighbour, NULL);
                ret = -ENODATA;
index e90db8870b6c7a0212a012d2935d91cffdcd52c0..bc0529ac88a10ecaa011937a9828b8ba2e87b757 100644 (file)
@@ -420,7 +420,7 @@ enable_hwirq(struct inf_hw *hw)
                break;
        case INF_NICCY:
                val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
-               val |= NICCY_IRQ_ENABLE;;
+               val |= NICCY_IRQ_ENABLE;
                outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
                break;
        case INF_SCT_1:
@@ -924,7 +924,7 @@ setup_instance(struct inf_hw *card)
                mISDNipac_init(&card->ipac, card);
 
        if (card->ipac.isac.dch.dev.Bprotocols == 0)
-               goto error_setup;;
+               goto error_setup;
 
        err = mISDN_register_device(&card->ipac.isac.dch.dev,
                &card->pdev->dev, card->name);
index 38eb31439a7316102ad40110312a43230dd8cb3a..d13fa5b119f5f1b15ccb9fb02364223fbef94fe4 100644 (file)
@@ -264,7 +264,7 @@ load_firmware(struct isar_hw *isar, const u8 *buf, int size)
                        while (noc) {
                                val = le16_to_cpu(*sp++);
                                *mp++ = val >> 8;
-                               *mp++ = val & 0xFF;;
+                               *mp++ = val & 0xFF;
                                noc--;
                        }
                        spin_lock_irqsave(isar->hwlock, flags);
index fcf4ed1cb4b9f2c44eef2bc1591faaf8b4343d9b..0e66af1decd43266129c4fc350ea0dbddeee111f 100644 (file)
@@ -314,7 +314,7 @@ hdlc_fill_fifo(struct BCState *bcs)
                        bcs->hw.hdlc.ctrl.sr.cmd |= HDLC_CMD_XME;
        }
        if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
-               debugl1(cs, "hdlc_fill_fifo %d/%ld", count, bcs->tx_skb->len);
+               debugl1(cs, "hdlc_fill_fifo %d/%u", count, bcs->tx_skb->len);
        p = bcs->tx_skb->data;
        ptr = (u_int *)p;
        skb_pull(bcs->tx_skb, count);
index f150330b5a23e24adb74af94535c83bbbeea7c36..37e685eafd24a015ba5a61c62193bbceb82eb054 100644 (file)
@@ -65,7 +65,7 @@ hisax_findcard(int driverid)
        return (struct IsdnCardState *) 0;
 }
 
-static void
+static __attribute__((format(printf, 3, 4))) void
 link_debug(struct Channel *chanp, int direction, char *fmt, ...)
 {
        va_list args;
@@ -1068,7 +1068,7 @@ init_d_st(struct Channel *chanp)
        return 0;
 }
 
-static void
+static __attribute__((format(printf, 2, 3))) void
 callc_debug(struct FsmInst *fi, char *fmt, ...)
 {
        va_list args;
index 7250f56a5246f4daad7d8573f3d1f59b5ee95d21..a16459a1332ceab1be36f77b3eade4ee2c36b885 100644 (file)
@@ -292,7 +292,7 @@ hfc_fill_fifo(struct BCState *bcs)
        }
        count = GetFreeFifoBytes_B(bcs);
        if (cs->debug & L1_DEB_HSCX)
-               debugl1(cs, "hfc_fill_fifo %d count(%ld/%d),%lx",
+               debugl1(cs, "hfc_fill_fifo %d count(%u/%d),%lx",
                        bcs->channel, bcs->tx_skb->len,
                        count, current->state);
        if (count < bcs->tx_skb->len) {
@@ -719,7 +719,7 @@ hfc_fill_dfifo(struct IsdnCardState *cs)
        }
        count = GetFreeFifoBytes_D(cs);
        if (cs->debug & L1_DEB_ISAC)
-               debugl1(cs, "hfc_fill_Dfifo count(%ld/%d)",
+               debugl1(cs, "hfc_fill_Dfifo count(%u/%d)",
                        cs->tx_skb->len, count);
        if (count < cs->tx_skb->len) {
                if (cs->debug & L1_DEB_ISAC)
index b1f6481e119371b51ce43516fb25cefd7b852502..626f85df302b395964393e4b904c6d22bd5c2c10 100644 (file)
@@ -282,7 +282,7 @@ hfc_fill_fifo(struct BCState *bcs)
            count += cs->hw.hfc.fifosize; 
        } /* L1_MODE_TRANS */
        if (cs->debug & L1_DEB_HSCX)
-               debugl1(cs, "hfc_fill_fifo %d count(%ld/%d)",
+               debugl1(cs, "hfc_fill_fifo %d count(%u/%d)",
                        bcs->channel, bcs->tx_skb->len,
                        count);
        if (count < bcs->tx_skb->len) {
index 917cc84065bd0a318875240cc2e3b8290a6c6758..3147020d188be82f8e03d4dccc7d4c762e2a372c 100644 (file)
@@ -550,7 +550,7 @@ hfcpci_fill_dfifo(struct IsdnCardState *cs)
                count += D_FIFO_SIZE;   /* count now contains available bytes */
 
        if (cs->debug & L1_DEB_ISAC)
-               debugl1(cs, "hfcpci_fill_Dfifo count(%ld/%d)",
+               debugl1(cs, "hfcpci_fill_Dfifo count(%u/%d)",
                        cs->tx_skb->len, count);
        if (count < cs->tx_skb->len) {
                if (cs->debug & L1_DEB_ISAC)
@@ -681,7 +681,7 @@ hfcpci_fill_fifo(struct BCState *bcs)
                count += B_FIFO_SIZE;   /* count now contains available bytes */
 
        if (cs->debug & L1_DEB_HSCX)
-               debugl1(cs, "hfcpci_fill_fifo %d count(%ld/%d),%lx",
+               debugl1(cs, "hfcpci_fill_fifo %d count(%u/%d),%lx",
                        bcs->channel, bcs->tx_skb->len,
                        count, current->state);
 
index 5aa138eb0b3c6a3cd45abfc2b3fb024f4b56d27a..1235b7131ae16e8f886eb5ddddd5aef8417bf2b9 100644 (file)
@@ -179,7 +179,7 @@ write_fifo(struct IsdnCardState *cs, struct sk_buff *skb, u_char fifo, int trans
          count += fifo_size;   /* count now contains available bytes */
 
        if (cs->debug & L1_DEB_ISAC_FIFO)
-         debugl1(cs, "hfcsx_write_fifo %d count(%ld/%d)",
+         debugl1(cs, "hfcsx_write_fifo %d count(%u/%d)",
                  fifo, skb->len, count);
        if (count < skb->len) {
          if (cs->debug & L1_DEB_ISAC_FIFO)
@@ -265,7 +265,7 @@ read_fifo(struct IsdnCardState *cs, u_char fifo, int trans_max)
          count++;
 
          if (cs->debug & L1_DEB_ISAC_FIFO)
-           debugl1(cs, "hfcsx_read_fifo %d count %ld)",
+           debugl1(cs, "hfcsx_read_fifo %d count %u)",
                    fifo, count);
 
          if ((count > fifo_size) || (count < 4)) {
@@ -986,7 +986,7 @@ HFCSX_l1hw(struct PStack *st, int pr, void *arg)
                                default:
                                        spin_unlock_irqrestore(&cs->lock, flags);
                                        if (cs->debug & L1_DEB_WARN)
-                                               debugl1(cs, "hfcsx_l1hw loop invalid %4lx", arg);
+                                               debugl1(cs, "hfcsx_l1hw loop invalid %4lx", (unsigned long)arg);
                                        return;
                        }
                        cs->hw.hfcsx.trm |= 0x80;       /* enable IOM-loop */
index 32ab3924aa7341f5f390623faee36d9a308abc67..de1c669c7b1302a31c72a23c05d037f96c650764 100644 (file)
@@ -1286,7 +1286,9 @@ int jiftime(char *s, long mark);
 
 int HiSax_command(isdn_ctrl * ic);
 int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb);
+__attribute__((format(printf, 3, 4)))
 void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...);
+__attribute__((format(printf, 3, 0)))
 void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, va_list args);
 void HiSax_reportcard(int cardnr, int sel);
 int QuickHex(char *txt, u_char * p, int cnt);
index 751b25f2ff5811f7be494ae6457f1d73ac3b47c1..332104103e18c5ff653a0ffee72e5a0d6063025d 100644 (file)
@@ -717,7 +717,7 @@ bch_mode(struct BCState *bcs, int mode, int bc)
 
         bc = bc ? 1 : 0;  // in case bc is greater than 1
        if (cs->debug & L1_DEB_HSCX)
-               debugl1(cs, "mode_bch() switch B-% mode %d chan %d", hscx, mode, bc);
+               debugl1(cs, "mode_bch() switch B-%d mode %d chan %d", hscx, mode, bc);
        bcs->mode = mode;
        bcs->channel = bc;
   
index 2e72227bd071b9d18329e60a0e4b06a444700ec9..1be4552d94b4b81188ae86dff4163ef1c6d6e83b 100644 (file)
@@ -953,7 +953,7 @@ isar_pump_statev_modem(struct BCState *bcs, u_char devt) {
                        break;
                case PSEV_GSTN_CLR:
                        if (cs->debug & L1_DEB_HSCX)
-                               debugl1(cs, "pump stev GSTN CLEAR", devt);
+                               debugl1(cs, "pump stev GSTN CLEAR");
                        break;
                default:
                        if (cs->debug & L1_DEB_HSCX)
@@ -1268,7 +1268,7 @@ isar_int_main(struct IsdnCardState *cs)
 static void
 ftimer_handler(struct BCState *bcs) {
        if (bcs->cs->debug)
-               debugl1(bcs->cs, "ftimer flags %04x",
+               debugl1(bcs->cs, "ftimer flags %04lx",
                        bcs->Flag);
        test_and_clear_bit(BC_FLG_FTI_RUN, &bcs->Flag);
        if (test_and_clear_bit(BC_FLG_LL_CONN, &bcs->Flag)) {
@@ -1748,7 +1748,7 @@ isar_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic) {
        struct BCState *bcs;
 
        if (cs->debug & L1_DEB_HSCX)
-               debugl1(cs, "isar_auxcmd cmd/ch %x/%d", ic->command, ic->arg);
+               debugl1(cs, "isar_auxcmd cmd/ch %x/%ld", ic->command, ic->arg);
        switch (ic->command) {
                case (ISDN_CMD_FAXCMD):
                        bcs = cs->channel[ic->arg].bcs;
index 172ad4c8c9619dccbbc071e2b3dd65253ce21ce5..425d86116f2bbb40c62200e2c23218f6c0ea015c 100644 (file)
@@ -21,6 +21,7 @@
 #define B_XMTBUFREADY  1
 #define B_ACKPENDING   2
 
+__attribute__((format(printf, 2, 3)))
 void debugl1(struct IsdnCardState *cs, char *fmt, ...);
 void DChannel_proc_xmt(struct IsdnCardState *cs);
 void DChannel_proc_rcv(struct IsdnCardState *cs);
index fd0b643ab7408cfee4b288447ab30587b623796a..ad291f21b201394d5e0340d29e0e51d1feb8d2b1 100644 (file)
@@ -66,7 +66,7 @@ static char *strL3Event[] =
        "EV_TIMEOUT",
 };
 
-static void
+static __attribute__((format(printf, 2, 3))) void
 l3m_debug(struct FsmInst *fi, char *fmt, ...)
 {
        va_list args;
index 5d7f0f2ff9b9c99133272ce5e7e91bc61c9ca785..644891efc26fb52ba994616878c923b787a5f5a1 100644 (file)
@@ -254,7 +254,7 @@ static int make_raw_data(struct BCState *bcs) {
                val >>= 1;
        }
        if (bcs->cs->debug & L1_DEB_HSCX)
-               debugl1(bcs->cs,"tiger make_raw: in %ld out %d.%d",
+               debugl1(bcs->cs,"tiger make_raw: in %u out %d.%d",
                        bcs->tx_skb->len, s_cnt, bitcnt);
        if (bitcnt) {
                while (8>bitcnt++) {
@@ -361,7 +361,7 @@ static int make_raw_data_56k(struct BCState *bcs) {
                val >>= 1;
        }
        if (bcs->cs->debug & L1_DEB_HSCX)
-               debugl1(bcs->cs,"tiger make_raw_56k: in %ld out %d.%d",
+               debugl1(bcs->cs,"tiger make_raw_56k: in %u out %d.%d",
                        bcs->tx_skb->len, s_cnt, bitcnt);
        if (bitcnt) {
                while (8>bitcnt++) {
@@ -612,7 +612,7 @@ void netjet_fill_dma(struct BCState *bcs)
        if (!bcs->tx_skb)
                return;
        if (bcs->cs->debug & L1_DEB_HSCX)
-               debugl1(bcs->cs,"tiger fill_dma1: c%d %4x", bcs->channel,
+               debugl1(bcs->cs,"tiger fill_dma1: c%d %4lx", bcs->channel,
                        bcs->Flag);
        if (test_and_set_bit(BC_FLG_BUSY, &bcs->Flag))
                return;
@@ -625,7 +625,7 @@ void netjet_fill_dma(struct BCState *bcs)
                        return;         
        };
        if (bcs->cs->debug & L1_DEB_HSCX)
-               debugl1(bcs->cs,"tiger fill_dma2: c%d %4x", bcs->channel,
+               debugl1(bcs->cs,"tiger fill_dma2: c%d %4lx", bcs->channel,
                        bcs->Flag);
        if (test_and_clear_bit(BC_FLG_NOFRAME, &bcs->Flag)) {
                write_raw(bcs, bcs->hw.tiger.sendp, bcs->hw.tiger.free);
@@ -667,7 +667,7 @@ void netjet_fill_dma(struct BCState *bcs)
                write_raw(bcs, p, cnt);
        }
        if (bcs->cs->debug & L1_DEB_HSCX)
-               debugl1(bcs->cs,"tiger fill_dma3: c%d %4x", bcs->channel,
+               debugl1(bcs->cs,"tiger fill_dma3: c%d %4lx", bcs->channel,
                        bcs->Flag);
 }
 
index b7876b19fe7348ad03ab04d3740cf610845705c5..44082637a09fbcfe71701dc69c07fdbcca9c54c4 100644 (file)
@@ -167,7 +167,8 @@ static struct FsmNode L1FnList[] __initdata =
        {ST_L1_F8, EV_IND_RSY,           l1_ignore},
 };
 
-static void l1m_debug(struct FsmInst *fi, char *fmt, ...)
+static __attribute__((format(printf, 2, 3)))
+void l1m_debug(struct FsmInst *fi, char *fmt, ...)
 {
        va_list args;
        char buf[256];
@@ -269,7 +270,8 @@ static char *strDoutEvent[] =
        "EV_DOUT_UNDERRUN",
 };
 
-static void dout_debug(struct FsmInst *fi, char *fmt, ...)
+static __attribute__((format(printf, 2, 3)))
+void dout_debug(struct FsmInst *fi, char *fmt, ...)
 {
        va_list args;
        char buf[256];
index 46048e55f241157663a41c5c2fcbe32ecbf50f63..d568689669f87a010d2c2a4cf48078d3458aa736 100644 (file)
@@ -61,7 +61,7 @@ static int isdn_concap_dl_data_req(struct concap_proto *concap, struct sk_buff *
 static int isdn_concap_dl_connect_req(struct concap_proto *concap)
 {
        struct net_device *ndev = concap -> net_dev;
-       isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
+       isdn_net_local *lp = netdev_priv(ndev);
        int ret;
        IX25DEBUG( "isdn_concap_dl_connect_req: %s \n", ndev -> name);
 
index 26d44c3ca1d8d8a1c1da84fb2df404e2d4995e75..afeede7ee2954237251865dd7e879055fa320f9e 100644 (file)
@@ -827,7 +827,7 @@ isdn_net_dial(void)
 void
 isdn_net_hangup(struct net_device *d)
 {
-       isdn_net_local *lp = (isdn_net_local *) netdev_priv(d);
+       isdn_net_local *lp = netdev_priv(d);
        isdn_ctrl cmd;
 #ifdef CONFIG_ISDN_X25
        struct concap_proto *cprot = lp->netdev->cprot;
@@ -1052,7 +1052,7 @@ isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb)
 {
        isdn_net_dev *nd;
        isdn_net_local *slp;
-       isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
+       isdn_net_local *lp = netdev_priv(ndev);
        int retv = NETDEV_TX_OK;
 
        if (((isdn_net_local *) netdev_priv(ndev))->master) {
@@ -1116,7 +1116,7 @@ isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb)
 static void
 isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev)
 {
-       isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
+       isdn_net_local *lp = netdev_priv(dev);
        if (!skb)
                return;
        if (lp->p_encap == ISDN_NET_ENCAP_ETHER) {
@@ -1131,7 +1131,7 @@ isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev)
 
 static void isdn_net_tx_timeout(struct net_device * ndev)
 {
-       isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
+       isdn_net_local *lp = netdev_priv(ndev);
 
        printk(KERN_WARNING "isdn_tx_timeout dev %s dialstate %d\n", ndev->name, lp->dialstate);
        if (!lp->dialstate){
@@ -1165,7 +1165,7 @@ static void isdn_net_tx_timeout(struct net_device * ndev)
 static netdev_tx_t
 isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
-       isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
+       isdn_net_local *lp = netdev_priv(ndev);
 #ifdef CONFIG_ISDN_X25
        struct concap_proto * cprot = lp -> netdev -> cprot;
 /* At this point hard_start_xmit() passes control to the encapsulation
@@ -1347,7 +1347,7 @@ isdn_net_close(struct net_device *dev)
 static struct net_device_stats *
 isdn_net_get_stats(struct net_device *dev)
 {
-       isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
+       isdn_net_local *lp = netdev_priv(dev);
        return &lp->stats;
 }
 
@@ -1426,7 +1426,7 @@ isdn_net_ciscohdlck_alloc_skb(isdn_net_local *lp, int len)
 static int
 isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-       isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
+       isdn_net_local *lp = netdev_priv(dev);
        unsigned long len = 0;
        unsigned long expires = 0;
        int tmp = 0;
@@ -1493,7 +1493,7 @@ isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 static int isdn_net_ioctl(struct net_device *dev,
                          struct ifreq *ifr, int cmd)
 {
-       isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
+       isdn_net_local *lp = netdev_priv(dev);
 
        switch (lp->p_encap) {
 #ifdef CONFIG_ISDN_PPP
@@ -1786,7 +1786,7 @@ isdn_net_ciscohdlck_receive(isdn_net_local *lp, struct sk_buff *skb)
 static void
 isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
 {
-       isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
+       isdn_net_local *lp = netdev_priv(ndev);
        isdn_net_local *olp = lp;       /* original 'lp' */
 #ifdef CONFIG_ISDN_X25
        struct concap_proto *cprot = lp -> netdev -> cprot;
@@ -1800,7 +1800,7 @@ isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
                 * handle master's statistics and hangup-timeout
                 */
                ndev = lp->master;
-               lp = (isdn_net_local *) netdev_priv(ndev);
+               lp = netdev_priv(ndev);
                lp->stats.rx_packets++;
                lp->stats.rx_bytes += skb->len;
        }
index fe824e0cbb252cedda78527fac18f2a40876a9df..9e8162c80bb06580d3945c0990e79bfa4876b396 100644 (file)
@@ -1147,15 +1147,14 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
        }
 
        if (is->pass_filter
-           && sk_run_filter(skb, is->pass_filter, is->pass_len) == 0) {
+           && sk_run_filter(skb, is->pass_filter) == 0) {
                if (is->debug & 0x2)
                        printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
                kfree_skb(skb);
                return;
        }
        if (!(is->active_filter
-             && sk_run_filter(skb, is->active_filter,
-                              is->active_len) == 0)) {
+             && sk_run_filter(skb, is->active_filter) == 0)) {
                if (is->debug & 0x2)
                        printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n");
                lp->huptimer = 0;
@@ -1221,7 +1220,7 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
        struct ippp_struct *ipt,*ipts;
        int slot, retval = NETDEV_TX_OK;
 
-       mlp = (isdn_net_local *) netdev_priv(netdev);
+       mlp = netdev_priv(netdev);
        nd = mlp->netdev;       /* get master lp */
 
        slot = mlp->ppp_slot;
@@ -1294,15 +1293,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
        }
 
        if (ipt->pass_filter
-           && sk_run_filter(skb, ipt->pass_filter, ipt->pass_len) == 0) {
+           && sk_run_filter(skb, ipt->pass_filter) == 0) {
                if (ipt->debug & 0x4)
                        printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
                kfree_skb(skb);
                goto unlock;
        }
        if (!(ipt->active_filter
-             && sk_run_filter(skb, ipt->active_filter,
-                              ipt->active_len) == 0)) {
+             && sk_run_filter(skb, ipt->active_filter) == 0)) {
                if (ipt->debug & 0x4)
                        printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n");
                lp->huptimer = 0;
@@ -1492,9 +1490,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
        }
        
        drop |= is->pass_filter
-               && sk_run_filter(skb, is->pass_filter, is->pass_len) == 0;
+               && sk_run_filter(skb, is->pass_filter) == 0;
        drop |= is->active_filter
-               && sk_run_filter(skb, is->active_filter, is->active_len) == 0;
+               && sk_run_filter(skb, is->active_filter) == 0;
        
        skb_push(skb, IPPP_MAX_HEADER - 4);
        return drop;
@@ -1985,7 +1983,7 @@ isdn_ppp_dev_ioctl_stats(int slot, struct ifreq *ifr, struct net_device *dev)
 {
        struct ppp_stats __user *res = ifr->ifr_data;
        struct ppp_stats t;
-       isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
+       isdn_net_local *lp = netdev_priv(dev);
 
        if (!access_ok(VERIFY_WRITE, res, sizeof(struct ppp_stats)))
                return -EFAULT;
@@ -2024,7 +2022,7 @@ isdn_ppp_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        int error=0;
        int len;
-       isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
+       isdn_net_local *lp = netdev_priv(dev);
 
 
        if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP)
@@ -2091,7 +2089,7 @@ isdn_ppp_dial_slave(char *name)
 
        sdev = lp->slave;
        while (sdev) {
-               isdn_net_local *mlp = (isdn_net_local *) netdev_priv(sdev);
+               isdn_net_local *mlp = netdev_priv(sdev);
                if (!(mlp->flags & ISDN_NET_CONNECTED))
                        break;
                sdev = mlp->slave;
@@ -2099,7 +2097,7 @@ isdn_ppp_dial_slave(char *name)
        if (!sdev)
                return 2;
 
-       isdn_net_dial_req((isdn_net_local *) netdev_priv(sdev));
+       isdn_net_dial_req(netdev_priv(sdev));
        return 0;
 #else
        return -1;
@@ -2122,7 +2120,7 @@ isdn_ppp_hangup_slave(char *name)
 
        sdev = lp->slave;
        while (sdev) {
-               isdn_net_local *mlp = (isdn_net_local *) netdev_priv(sdev);
+               isdn_net_local *mlp = netdev_priv(sdev);
 
                if (mlp->slave) { /* find last connected link in chain */
                        isdn_net_local *nlp = ISDN_SLAVE_PRIV(mlp);
index ac4aa18c632b0f2040cef841a4e5fd98a0cd7f5a..5cc7c001c523e3e7c73730c4822fe317d7ca1ef4 100644 (file)
@@ -99,12 +99,16 @@ static void
 l1m_debug(struct FsmInst *fi, char *fmt, ...)
 {
        struct layer1 *l1 = fi->userdata;
+       struct va_format vaf;
        va_list va;
 
        va_start(va, fmt);
-       printk(KERN_DEBUG "%s: ", dev_name(&l1->dch->dev.dev));
-       vprintk(fmt, va);
-       printk("\n");
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       printk(KERN_DEBUG "%s: %pV\n", dev_name(&l1->dch->dev.dev), &vaf);
+
        va_end(va);
 }
 
index c97371788764df3af6a45ad9f402ece12f59a1b3..4ae75053c9d2a9e572f0fb760caabac9ce4b3e28 100644 (file)
@@ -95,14 +95,20 @@ static void
 l2m_debug(struct FsmInst *fi, char *fmt, ...)
 {
        struct layer2 *l2 = fi->userdata;
+       struct va_format vaf;
        va_list va;
 
        if (!(*debug & DEBUG_L2_FSM))
                return;
+
        va_start(va, fmt);
-       printk(KERN_DEBUG "l2 (sapi %d tei %d): ", l2->sapi, l2->tei);
-       vprintk(fmt, va);
-       printk("\n");
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n",
+              l2->sapi, l2->tei, &vaf);
+
        va_end(va);
 }
 
index 1b85d9d274965e583a5c972eb7c1a597af879dab..687c9b6264ab027fee9d1ed355239711236f34be 100644 (file)
@@ -79,14 +79,19 @@ static void
 da_debug(struct FsmInst *fi, char *fmt, ...)
 {
        struct manager  *mgr = fi->userdata;
+       struct va_format vaf;
        va_list va;
 
        if (!(*debug & DEBUG_L2_TEIFSM))
                return;
+
        va_start(va, fmt);
-       printk(KERN_DEBUG "mgr(%d): ", mgr->ch.st->dev->id);
-       vprintk(fmt, va);
-       printk("\n");
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       printk(KERN_DEBUG "mgr(%d): %pV\n", mgr->ch.st->dev->id, &vaf);
+
        va_end(va);
 }
 
@@ -223,14 +228,20 @@ static void
 tei_debug(struct FsmInst *fi, char *fmt, ...)
 {
        struct teimgr   *tm = fi->userdata;
+       struct va_format vaf;
        va_list va;
 
        if (!(*debug & DEBUG_L2_TEIFSM))
                return;
+
        va_start(va, fmt);
-       printk(KERN_DEBUG "sapi(%d) tei(%d): ", tm->l2->sapi, tm->l2->tei);
-       vprintk(fmt, va);
-       printk("\n");
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       printk(KERN_DEBUG "sapi(%d) tei(%d): %pV\n",
+              tm->l2->sapi, tm->l2->tei, &vaf);
+
        va_end(va);
 }
 
index ea9b7a098c9bd75e2023fe7e5164f4baf7a86fee..475a66d95b3402a77addb60ceb2be1f0143ad187 100644 (file)
@@ -201,7 +201,7 @@ struct net_local {
 #define RX_BUF_SIZE    (1518+14+18)    /* packet+header+RBD */
 #define RX_BUF_END             (dev->mem_end - dev->mem_start)
 
-#define TX_TIMEOUT 5
+#define TX_TIMEOUT (HZ/20)
 
 /*
   That's it: only 86 bytes to set up the beast, including every extra
index cdf7226a7c43a775b0505b09ed1a0d3ba21a623e..d2bb4b254c57ddf0c4e51bd0eebd33960732892b 100644 (file)
@@ -98,7 +98,7 @@ static int rx_nocopy, rx_copy, queued_packet;
 #define WAIT_TX_AVAIL 200
 
 /* Operational parameter that usually are not changed. */
-#define TX_TIMEOUT  40         /* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT  ((4*HZ)/10)        /* Time in jiffies before concluding Tx hung */
 
 /* The size here is somewhat misleading: the Corkscrew also uses the ISA
    aliased registers at <base>+0x400.
index e2c9c5b949f97f5f67ef481fd32021969889e6f9..be1f1970c8422610d7ef37ebc85692a4372a9fbe 100644 (file)
@@ -191,7 +191,7 @@ enum commands {
 #define         RX_SUSPEND     0x0030
 #define         RX_ABORT       0x0040
 
-#define TX_TIMEOUT     5
+#define TX_TIMEOUT     (HZ/20)
 
 
 struct i596_reg {
index f6668cdaac85487b3c621b881af0c9afd975595b..a11dc735752c2278478d2a040fe651223d3c70e6 100644 (file)
@@ -1533,7 +1533,7 @@ config E100
 
          <http://support.intel.com/support/network/adapter/pro100/21397.htm>
 
-          to identify the adapter.
+         to identify the adapter.
 
          For the latest Intel PRO/100 network driver for Linux, see:
 
@@ -1786,17 +1786,17 @@ config KS8842
        tristate "Micrel KSZ8841/42 with generic bus interface"
        depends on HAS_IOMEM && DMA_ENGINE
        help
-        This platform driver is for KSZ8841(1-port) / KS8842(2-port)
-        ethernet switch chip (managed, VLAN, QoS) from Micrel or
-        Timberdale(FPGA).
+         This platform driver is for KSZ8841(1-port) / KS8842(2-port)
+         ethernet switch chip (managed, VLAN, QoS) from Micrel or
+         Timberdale(FPGA).
 
 config KS8851
-       tristate "Micrel KS8851 SPI"
-       depends on SPI
-       select MII
+       tristate "Micrel KS8851 SPI"
+       depends on SPI
+       select MII
        select CRC32
-       help
-         SPI driver for Micrel KS8851 SPI attached network chip.
+       help
+         SPI driver for Micrel KS8851 SPI attached network chip.
 
 config KS8851_MLL
        tristate "Micrel KS8851 MLL"
@@ -2133,25 +2133,25 @@ config IP1000
          will be called ipg.  This is recommended.
 
 config IGB
-       tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
-       depends on PCI
-       ---help---
-         This driver supports Intel(R) 82575/82576 gigabit ethernet family of
-         adapters.  For more information on how to identify your adapter, go
-         to the Adapter & Driver ID Guide at:
+       tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
+       depends on PCI
+       ---help---
+         This driver supports Intel(R) 82575/82576 gigabit ethernet family of
+         adapters.  For more information on how to identify your adapter, go
+         to the Adapter & Driver ID Guide at:
 
-         <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+         <http://support.intel.com/support/network/adapter/pro100/21397.htm>
 
-         For general information and support, go to the Intel support
-         website at:
+         For general information and support, go to the Intel support
+         website at:
 
-         <http://support.intel.com>
+         <http://support.intel.com>
 
-         More specific information on configuring the driver is in
-         <file:Documentation/networking/e1000.txt>.
+         More specific information on configuring the driver is in
+         <file:Documentation/networking/e1000.txt>.
 
-         To compile this driver as a module, choose M here. The module
-         will be called igb.
+         To compile this driver as a module, choose M here. The module
+         will be called igb.
 
 config IGB_DCA
        bool "Direct Cache Access (DCA) Support"
@@ -2163,25 +2163,25 @@ config IGB_DCA
          is used, with the intent of lessening the impact of cache misses.
 
 config IGBVF
-       tristate "Intel(R) 82576 Virtual Function Ethernet support"
-       depends on PCI
-       ---help---
-         This driver supports Intel(R) 82576 virtual functions.  For more
-         information on how to identify your adapter, go to the Adapter &
-         Driver ID Guide at:
+       tristate "Intel(R) 82576 Virtual Function Ethernet support"
+       depends on PCI
+       ---help---
+         This driver supports Intel(R) 82576 virtual functions.  For more
+         information on how to identify your adapter, go to the Adapter &
+         Driver ID Guide at:
 
-         <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+         <http://support.intel.com/support/network/adapter/pro100/21397.htm>
 
-         For general information and support, go to the Intel support
-         website at:
+         For general information and support, go to the Intel support
+         website at:
 
-         <http://support.intel.com>
+         <http://support.intel.com>
 
-         More specific information on configuring the driver is in
-         <file:Documentation/networking/e1000.txt>.
+         More specific information on configuring the driver is in
+         <file:Documentation/networking/e1000.txt>.
 
-         To compile this driver as a module, choose M here. The module
-         will be called igbvf.
+         To compile this driver as a module, choose M here. The module
+         will be called igbvf.
 
 source "drivers/net/ixp2000/Kconfig"
 
@@ -2300,14 +2300,14 @@ config SKGE
          will be called skge.  This is recommended.
 
 config SKGE_DEBUG
-       bool "Debugging interface"
-       depends on SKGE && DEBUG_FS
-       help
-        This option adds the ability to dump driver state for debugging.
-        The file /sys/kernel/debug/skge/ethX displays the state of the internal
-        transmit and receive rings.
+       bool "Debugging interface"
+       depends on SKGE && DEBUG_FS
+       help
+         This option adds the ability to dump driver state for debugging.
+         The file /sys/kernel/debug/skge/ethX displays the state of the internal
+         transmit and receive rings.
 
-        If unsure, say N.
+         If unsure, say N.
 
 config SKY2
        tristate "SysKonnect Yukon2 support"
@@ -2326,14 +2326,14 @@ config SKY2
          will be called sky2.  This is recommended.
 
 config SKY2_DEBUG
-       bool "Debugging interface"
-       depends on SKY2 && DEBUG_FS
-       help
-        This option adds the ability to dump driver state for debugging.
-        The file /sys/kernel/debug/sky2/ethX displays the state of the internal
-        transmit and receive rings.
+       bool "Debugging interface"
+       depends on SKY2 && DEBUG_FS
+       help
+         This option adds the ability to dump driver state for debugging.
+         The file /sys/kernel/debug/sky2/ethX displays the state of the internal
+         transmit and receive rings.
 
-        If unsure, say N.
+         If unsure, say N.
 
 config VIA_VELOCITY
        tristate "VIA Velocity support"
@@ -2389,12 +2389,12 @@ config SPIDER_NET
          Cell Processor-Based Blades from IBM.
 
 config TSI108_ETH
-          tristate "Tundra TSI108 gigabit Ethernet support"
-          depends on TSI108_BRIDGE
-          help
-            This driver supports Tundra TSI108 gigabit Ethernet ports.
-            To compile this driver as a module, choose M here: the module
-            will be called tsi108_eth.
+       tristate "Tundra TSI108 gigabit Ethernet support"
+       depends on TSI108_BRIDGE
+       help
+         This driver supports Tundra TSI108 gigabit Ethernet ports.
+         To compile this driver as a module, choose M here: the module
+         will be called tsi108_eth.
 
 config GELIC_NET
        tristate "PS3 Gigabit Ethernet driver"
@@ -2573,32 +2573,32 @@ config MDIO
        tristate
 
 config CHELSIO_T1
-        tristate "Chelsio 10Gb Ethernet support"
-        depends on PCI
+       tristate "Chelsio 10Gb Ethernet support"
+       depends on PCI
        select CRC32
        select MDIO
-        help
-          This driver supports Chelsio gigabit and 10-gigabit
-          Ethernet cards. More information about adapter features and
+       help
+         This driver supports Chelsio gigabit and 10-gigabit
+         Ethernet cards. More information about adapter features and
          performance tuning is in <file:Documentation/networking/cxgb.txt>.
 
-          For general information about Chelsio and our products, visit
-          our website at <http://www.chelsio.com>.
+         For general information about Chelsio and our products, visit
+         our website at <http://www.chelsio.com>.
 
-          For customer support, please visit our customer support page at
-          <http://www.chelsio.com/support.html>.
+         For customer support, please visit our customer support page at
+         <http://www.chelsio.com/support.html>.
 
-          Please send feedback to <linux-bugs@chelsio.com>.
+         Please send feedback to <linux-bugs@chelsio.com>.
 
-          To compile this driver as a module, choose M here: the module
-          will be called cxgb.
+         To compile this driver as a module, choose M here: the module
+         will be called cxgb.
 
 config CHELSIO_T1_1G
-        bool "Chelsio gigabit Ethernet support"
-        depends on CHELSIO_T1
-        help
-          Enables support for Chelsio's gigabit Ethernet PCI cards.  If you
-          are using only 10G cards say 'N' here.
+       bool "Chelsio gigabit Ethernet support"
+       depends on CHELSIO_T1
+       help
+         Enables support for Chelsio's gigabit Ethernet PCI cards.  If you
+         are using only 10G cards say 'N' here.
 
 config CHELSIO_T3_DEPENDS
        tristate
@@ -2728,26 +2728,26 @@ config IXGBE_DCB
          If unsure, say N.
 
 config IXGBEVF
-       tristate "Intel(R) 82599 Virtual Function Ethernet support"
-       depends on PCI_MSI
-       ---help---
-         This driver supports Intel(R) 82599 virtual functions.  For more
-         information on how to identify your adapter, go to the Adapter &
-         Driver ID Guide at:
+       tristate "Intel(R) 82599 Virtual Function Ethernet support"
+       depends on PCI_MSI
+       ---help---
+         This driver supports Intel(R) 82599 virtual functions.  For more
+         information on how to identify your adapter, go to the Adapter &
+         Driver ID Guide at:
 
-         <http://support.intel.com/support/network/sb/CS-008441.htm>
+         <http://support.intel.com/support/network/sb/CS-008441.htm>
 
-         For general information and support, go to the Intel support
-         website at:
+         For general information and support, go to the Intel support
+         website at:
 
-         <http://support.intel.com>
+         <http://support.intel.com>
 
-         More specific information on configuring the driver is in
-         <file:Documentation/networking/ixgbevf.txt>.
+         More specific information on configuring the driver is in
+         <file:Documentation/networking/ixgbevf.txt>.
 
-         To compile this driver as a module, choose M here. The module
-         will be called ixgbevf.  MSI-X interrupt support is required
-         for this driver to work correctly.
+         To compile this driver as a module, choose M here. The module
+         will be called ixgbevf.  MSI-X interrupt support is required
+         for this driver to work correctly.
 
 config IXGB
        tristate "Intel(R) PRO/10GbE support"
@@ -2772,29 +2772,38 @@ config IXGB
          will be called ixgb.
 
 config S2IO
-       tristate "S2IO 10Gbe XFrame NIC"
+       tristate "Exar Xframe 10Gb Ethernet Adapter"
        depends on PCI
        ---help---
-         This driver supports the 10Gbe XFrame NIC of S2IO. 
+         This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
+
          More specific information on configuring the driver is in 
          <file:Documentation/networking/s2io.txt>.
 
+         To compile this driver as a module, choose M here. The module
+         will be called s2io.
+
 config VXGE
-       tristate "Neterion X3100 Series 10GbE PCIe Server Adapter"
+       tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
        depends on PCI && INET
        ---help---
-         This driver supports Neterion Inc's X3100 Series 10 GbE PCIe
+         This driver supports Exar Corp's X3100 Series 10 GbE PCIe
          I/O Virtualized Server Adapter.
+
          More specific information on configuring the driver is in
          <file:Documentation/networking/vxge.txt>.
 
+         To compile this driver as a module, choose M here. The module
+         will be called vxge.
+
 config VXGE_DEBUG_TRACE_ALL
        bool "Enabling All Debug trace statments in driver"
        default n
        depends on VXGE
        ---help---
          Say Y here if you want to enabling all the debug trace statements in
-         driver. By  default only few debug trace statements are enabled.
+         the vxge driver. By default only few debug trace statements are
+         enabled.
 
 config MYRI10GE
        tristate "Myricom Myri-10G Ethernet support"
@@ -2906,18 +2915,18 @@ config QLGE
          will be called qlge.
 
 config BNA
-        tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
-        depends on PCI
-        ---help---
-          This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
-          cards.
-          To compile this driver as a module, choose M here: the module
-          will be called bna.
+       tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
+       depends on PCI
+       ---help---
+         This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
+         cards.
+         To compile this driver as a module, choose M here: the module
+         will be called bna.
 
-          For general information and support, go to the Brocade support
-          website at:
+         For general information and support, go to the Brocade support
+         website at:
 
-          <http://support.brocade.com>
+         <http://support.brocade.com>
 
 source "drivers/net/sfc/Kconfig"
 
@@ -3227,18 +3236,18 @@ config PPP_BSDCOMP
          modules once you have said "make modules". If unsure, say N.
 
 config PPP_MPPE
-       tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
-       depends on PPP && EXPERIMENTAL
-       select CRYPTO
-       select CRYPTO_SHA1
-       select CRYPTO_ARC4
-       select CRYPTO_ECB
-       ---help---
-         Support for the MPPE Encryption protocol, as employed by the
-        Microsoft Point-to-Point Tunneling Protocol.
-
-        See http://pptpclient.sourceforge.net/ for information on
-        configuring PPTP clients and servers to utilize this method.
+       tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
+       depends on PPP && EXPERIMENTAL
+       select CRYPTO
+       select CRYPTO_SHA1
+       select CRYPTO_ARC4
+       select CRYPTO_ECB
+       ---help---
+         Support for the MPPE Encryption protocol, as employed by the
+         Microsoft Point-to-Point Tunneling Protocol.
+
+         See http://pptpclient.sourceforge.net/ for information on
+         configuring PPTP clients and servers to utilize this method.
 
 config PPPOE
        tristate "PPP over Ethernet (EXPERIMENTAL)"
@@ -3397,14 +3406,14 @@ config VIRTIO_NET
        depends on EXPERIMENTAL && VIRTIO
        ---help---
          This is the virtual network driver for virtio.  It can be used with
-          lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
+         lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
 
 config VMXNET3
-       tristate "VMware VMXNET3 ethernet driver"
-       depends on PCI && INET
-       help
-         This driver supports VMware's vmxnet3 virtual ethernet NIC.
-         To compile this driver as a module, choose M here: the
-         module will be called vmxnet3.
+       tristate "VMware VMXNET3 ethernet driver"
+       depends on PCI && INET
+       help
+         This driver supports VMware's vmxnet3 virtual ethernet NIC.
+         To compile this driver as a module, choose M here: the
+         module will be called vmxnet3.
 
 endif # NETDEVICES
index 4545d5a06c24f9e9cea64a70db01327a1d52ad21..bfea499a351309f2292998208e9b302ff3a58c07 100644 (file)
 #define TX_DESC_SIZE           10
 #define MAX_RBUFF_SZ           0x600
 #define MAX_TBUFF_SZ           0x600
-#define TX_TIMEOUT             50
+#define TX_TIMEOUT             (HZ/2)
 #define DELAY                  1000
 #define CAM0                   0x0
 
index 89876897a6fed5244358740795e98207507799a6..871b1633f543c2d515a88261317cf526a87484bb 100644 (file)
@@ -150,7 +150,7 @@ struct net_local {
 #define PORT_OFFSET(o) (o)
 
 
-#define TX_TIMEOUT             10
+#define TX_TIMEOUT             (HZ/10)
 
 
 /* Index to functions, as function prototypes. */
index 8cb27cb7bca1b347c0b072d25e98ef5b58acdb5a..ce0091eb06f580a2b63bcc35f9f019a078d3acbb 100644 (file)
@@ -116,7 +116,7 @@ MODULE_LICENSE("GPL");
 #define RX_RING_LEN_BITS               (RX_LOG_RING_SIZE << 5)
 #define        RX_RING_MOD_MASK                (RX_RING_SIZE - 1)
 
-#define TX_TIMEOUT     20
+#define TX_TIMEOUT     (HZ/5)
 
 /* The LANCE Rx and Tx ring descriptors. */
 struct lance_rx_head {
index b6da4cf3694baf89d988089642a9842c551d1d2f..4bebff3faeab4c35e54680ed958ccd558ffda833 100644 (file)
@@ -325,7 +325,7 @@ static void ax_block_output(struct net_device *dev, int count,
 static void
 ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
 {
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
        unsigned int memr;
 
@@ -364,7 +364,7 @@ ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
 static unsigned int
 ax_phy_ei_inbits(struct net_device *dev, int no)
 {
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
        unsigned int memr;
        unsigned int result = 0;
@@ -412,7 +412,7 @@ ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
 static int
 ax_phy_read(struct net_device *dev, int phy_addr, int reg)
 {
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        unsigned long flags;
        unsigned int result;
 
@@ -435,7 +435,7 @@ ax_phy_read(struct net_device *dev, int phy_addr, int reg)
 static void
 ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
 {
-       struct ei_device *ei = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei = netdev_priv(dev);
        struct ax_device  *ax = to_ax_dev(dev);
        unsigned long flags;
 
index 580919619252e3d0594f5365226bc295a8efcba9..38aeffef2a832be22a48ceaa14b3d612bbb4ee64 100644 (file)
@@ -3904,7 +3904,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                              MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
                if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
                    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
-                       return 0;;
+                       return 0;
                msleep(1);
        }
        return -EINVAL;
@@ -3988,7 +3988,7 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                              MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
                if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
                    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
-                       return 0;;
+                       return 0;
                msleep(1);
        }
 
index 9709b8569666d56ff84fb263429eb6f27c244033..92057d7058daf8db70956f3999d0d1075ecb9740 100644 (file)
@@ -8078,7 +8078,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
        int port = BP_PORT(bp);
        u32 val, val2;
        u32 config;
-       u32 ext_phy_type, ext_phy_config;;
+       u32 ext_phy_type, ext_phy_config;
 
        bp->link_params.bp = bp;
        bp->link_params.port = port;
index 881914bc4e9c0177a371c76c9781568aa0b4ffdd..48cf24ff4e6fe5e3d4762a2ad40f76ae91f14e29 100644 (file)
@@ -2474,8 +2474,7 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
                goto out;
 
        read_lock(&bond->lock);
-       slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
-                                       orig_dev);
+       slave = bond_get_slave_by_dev(netdev_priv(dev), orig_dev);
        if (!slave)
                goto out_unlock;
 
index 71a169740d05daf463b04f2ff591a943f8c1847e..0273ad0b57bb00167591414ca2937ce84e6794b1 100644 (file)
@@ -873,17 +873,11 @@ static void bond_mc_del(struct bonding *bond, void *addr)
 static void __bond_resend_igmp_join_requests(struct net_device *dev)
 {
        struct in_device *in_dev;
-       struct ip_mc_list *im;
 
        rcu_read_lock();
        in_dev = __in_dev_get_rcu(dev);
-       if (in_dev) {
-               read_lock(&in_dev->mc_list_lock);
-               for (im = in_dev->mc_list; im; im = im->next)
-                       ip_mc_rejoin_group(im);
-               read_unlock(&in_dev->mc_list_lock);
-       }
-
+       if (in_dev)
+               ip_mc_rejoin_groups(in_dev);
        rcu_read_unlock();
 }
 
@@ -3211,7 +3205,7 @@ out:
 #ifdef CONFIG_PROC_FS
 
 static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(&dev_base_lock)
+       __acquires(RCU)
        __acquires(&bond->lock)
 {
        struct bonding *bond = seq->private;
@@ -3220,7 +3214,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
        int i;
 
        /* make sure the bond won't be taken away */
-       read_lock(&dev_base_lock);
+       rcu_read_lock();
        read_lock(&bond->lock);
 
        if (*pos == 0)
@@ -3250,12 +3244,12 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 
 static void bond_info_seq_stop(struct seq_file *seq, void *v)
        __releases(&bond->lock)
-       __releases(&dev_base_lock)
+       __releases(RCU)
 {
        struct bonding *bond = seq->private;
 
        read_unlock(&bond->lock);
-       read_unlock(&dev_base_lock);
+       rcu_read_unlock();
 }
 
 static void bond_info_show_master(struct seq_file *seq)
index 4eedb12df6caf2e4676cbe11e2134e97ec01def1..ad3ae46a4c0110d05f26f3644c6caa403f7ee36e 100644 (file)
@@ -286,7 +286,7 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
                return NULL;
        }
 
-       return (struct bonding *)netdev_priv(slave->dev->master);
+       return netdev_priv(slave->dev->master);
 }
 
 static inline bool bond_is_lb(const struct bonding *bond)
index 64c378cd0c34125471f6cb3fcae10482ae1e0656..74cd880c7e0676c2387f300a9a79b0b3f3e63ae0 100644 (file)
@@ -182,7 +182,7 @@ static int mscan_restart(struct net_device *dev)
 
                priv->can.state = CAN_STATE_ERROR_ACTIVE;
                WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
-                    "bus-off state expected");
+                    "bus-off state expected\n");
                out_8(&regs->canmisc, MSCAN_BOHOLD);
                /* Re-enable receive interrupts. */
                out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
index 672718261c6872d7fb62578788bd90d54a359789..238622a04bc17ed80626ba30306179be41a34771 100644 (file)
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
 
-#define MAX_MSG_OBJ            32
-#define MSG_OBJ_RX             0 /* The receive message object flag. */
-#define MSG_OBJ_TX             1 /* The transmit message object flag. */
-
-#define ENABLE                 1 /* The enable flag */
-#define DISABLE                        0 /* The disable flag */
-#define CAN_CTRL_INIT          0x0001 /* The INIT bit of CANCONT register. */
-#define CAN_CTRL_IE            0x0002 /* The IE bit of CAN control register */
-#define CAN_CTRL_IE_SIE_EIE    0x000e
-#define CAN_CTRL_CCE           0x0040
-#define CAN_CTRL_OPT           0x0080 /* The OPT bit of CANCONT register. */
-#define CAN_OPT_SILENT         0x0008 /* The Silent bit of CANOPT reg. */
-#define CAN_OPT_LBACK          0x0010 /* The LoopBack bit of CANOPT reg. */
-#define CAN_CMASK_RX_TX_SET    0x00f3
-#define CAN_CMASK_RX_TX_GET    0x0073
-#define CAN_CMASK_ALL          0xff
-#define CAN_CMASK_RDWR         0x80
-#define CAN_CMASK_ARB          0x20
-#define CAN_CMASK_CTRL         0x10
-#define CAN_CMASK_MASK         0x40
-#define CAN_CMASK_NEWDAT       0x04
-#define CAN_CMASK_CLRINTPND    0x08
-
-#define CAN_IF_MCONT_NEWDAT    0x8000
-#define CAN_IF_MCONT_INTPND    0x2000
-#define CAN_IF_MCONT_UMASK     0x1000
-#define CAN_IF_MCONT_TXIE      0x0800
-#define CAN_IF_MCONT_RXIE      0x0400
-#define CAN_IF_MCONT_RMTEN     0x0200
-#define CAN_IF_MCONT_TXRQXT    0x0100
-#define CAN_IF_MCONT_EOB       0x0080
-#define CAN_IF_MCONT_DLC       0x000f
-#define CAN_IF_MCONT_MSGLOST   0x4000
-#define CAN_MASK2_MDIR_MXTD    0xc000
-#define CAN_ID2_DIR            0x2000
-#define CAN_ID_MSGVAL          0x8000
-
-#define CAN_STATUS_INT         0x8000
-#define CAN_IF_CREQ_BUSY       0x8000
-#define CAN_ID2_XTD            0x4000
-
-#define CAN_REC                        0x00007f00
-#define CAN_TEC                        0x000000ff
-
-#define PCH_RX_OK              0x00000010
-#define PCH_TX_OK              0x00000008
-#define PCH_BUS_OFF            0x00000080
-#define PCH_EWARN              0x00000040
-#define PCH_EPASSIV            0x00000020
-#define PCH_LEC0               0x00000001
-#define PCH_LEC1               0x00000002
-#define PCH_LEC2               0x00000004
+#define PCH_MAX_MSG_OBJ                32
+#define PCH_MSG_OBJ_RX         0 /* The receive message object flag. */
+#define PCH_MSG_OBJ_TX         1 /* The transmit message object flag. */
+
+#define PCH_ENABLE             1 /* The enable flag */
+#define PCH_DISABLE            0 /* The disable flag */
+#define PCH_CTRL_INIT          BIT(0) /* The INIT bit of CANCONT register. */
+#define PCH_CTRL_IE            BIT(1) /* The IE bit of CAN control register */
+#define PCH_CTRL_IE_SIE_EIE    (BIT(3) | BIT(2) | BIT(1))
+#define PCH_CTRL_CCE           BIT(6)
+#define PCH_CTRL_OPT           BIT(7) /* The OPT bit of CANCONT register. */
+#define PCH_OPT_SILENT         BIT(3) /* The Silent bit of CANOPT reg. */
+#define PCH_OPT_LBACK          BIT(4) /* The LoopBack bit of CANOPT reg. */
+
+#define PCH_CMASK_RX_TX_SET    0x00f3
+#define PCH_CMASK_RX_TX_GET    0x0073
+#define PCH_CMASK_ALL          0xff
+#define PCH_CMASK_NEWDAT       BIT(2)
+#define PCH_CMASK_CLRINTPND    BIT(3)
+#define PCH_CMASK_CTRL         BIT(4)
+#define PCH_CMASK_ARB          BIT(5)
+#define PCH_CMASK_MASK         BIT(6)
+#define PCH_CMASK_RDWR         BIT(7)
+#define PCH_IF_MCONT_NEWDAT    BIT(15)
+#define PCH_IF_MCONT_MSGLOST   BIT(14)
+#define PCH_IF_MCONT_INTPND    BIT(13)
+#define PCH_IF_MCONT_UMASK     BIT(12)
+#define PCH_IF_MCONT_TXIE      BIT(11)
+#define PCH_IF_MCONT_RXIE      BIT(10)
+#define PCH_IF_MCONT_RMTEN     BIT(9)
+#define PCH_IF_MCONT_TXRQXT    BIT(8)
+#define PCH_IF_MCONT_EOB       BIT(7)
+#define PCH_IF_MCONT_DLC       (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define PCH_MASK2_MDIR_MXTD    (BIT(14) | BIT(15))
+#define PCH_ID2_DIR            BIT(13)
+#define PCH_ID2_XTD            BIT(14)
+#define PCH_ID_MSGVAL          BIT(15)
+#define PCH_IF_CREQ_BUSY       BIT(15)
+
+#define PCH_STATUS_INT         0x8000
+#define PCH_REC                        0x00007f00
+#define PCH_TEC                        0x000000ff
+
+#define PCH_TX_OK              BIT(3)
+#define PCH_RX_OK              BIT(4)
+#define PCH_EPASSIV            BIT(5)
+#define PCH_EWARN              BIT(6)
+#define PCH_BUS_OFF            BIT(7)
+#define PCH_LEC0               BIT(0)
+#define PCH_LEC1               BIT(1)
+#define PCH_LEC2               BIT(2)
 #define PCH_LEC_ALL            (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
 #define PCH_STUF_ERR           PCH_LEC0
 #define PCH_FORM_ERR           PCH_LEC1
 #define PCH_CRC_ERR            (PCH_LEC1 | PCH_LEC2)
 
 /* bit position of certain controller bits. */
-#define BIT_BITT_BRP           0
-#define BIT_BITT_SJW           6
-#define BIT_BITT_TSEG1         8
-#define BIT_BITT_TSEG2         12
-#define BIT_IF1_MCONT_RXIE     10
-#define BIT_IF2_MCONT_TXIE     11
-#define BIT_BRPE_BRPE          6
-#define BIT_ES_TXERRCNT                0
-#define BIT_ES_RXERRCNT                8
-#define MSK_BITT_BRP           0x3f
-#define MSK_BITT_SJW           0xc0
-#define MSK_BITT_TSEG1         0xf00
-#define MSK_BITT_TSEG2         0x7000
-#define MSK_BRPE_BRPE          0x3c0
-#define MSK_BRPE_GET           0x0f
-#define MSK_CTRL_IE_SIE_EIE    0x07
-#define MSK_MCONT_TXIE         0x08
-#define MSK_MCONT_RXIE         0x10
-#define PCH_CAN_NO_TX_BUFF     1
-#define COUNTER_LIMIT          10
+#define PCH_BIT_BRP            0
+#define PCH_BIT_SJW            6
+#define PCH_BIT_TSEG1          8
+#define PCH_BIT_TSEG2          12
+#define PCH_BIT_BRPE_BRPE      6
+#define PCH_MSK_BITT_BRP       0x3f
+#define PCH_MSK_BRPE_BRPE      0x3c0
+#define PCH_MSK_CTRL_IE_SIE_EIE        0x07
+#define PCH_COUNTER_LIMIT      10
 
 #define PCH_CAN_CLK            50000000        /* 50MHz */
 
@@ -181,14 +169,14 @@ struct pch_can_priv {
        struct can_priv can;
        unsigned int can_num;
        struct pci_dev *dev;
-       unsigned int tx_enable[MAX_MSG_OBJ];
-       unsigned int rx_enable[MAX_MSG_OBJ];
-       unsigned int rx_link[MAX_MSG_OBJ];
+       unsigned int tx_enable[PCH_MAX_MSG_OBJ];
+       unsigned int rx_enable[PCH_MAX_MSG_OBJ];
+       unsigned int rx_link[PCH_MAX_MSG_OBJ];
        unsigned int int_enables;
        unsigned int int_stat;
        struct net_device *ndev;
        spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/
-       unsigned int msg_obj[MAX_MSG_OBJ];
+       unsigned int msg_obj[PCH_MAX_MSG_OBJ];
        struct pch_can_regs __iomem *regs;
        struct napi_struct napi;
        unsigned int tx_obj;    /* Point next Tx Obj index */
@@ -228,11 +216,11 @@ static void pch_can_set_run_mode(struct pch_can_priv *priv,
 {
        switch (mode) {
        case PCH_CAN_RUN:
-               pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT);
+               pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
                break;
 
        case PCH_CAN_STOP:
-               pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT);
+               pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
                break;
 
        default:
@@ -246,30 +234,30 @@ static void pch_can_set_optmode(struct pch_can_priv *priv)
        u32 reg_val = ioread32(&priv->regs->opt);
 
        if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
-               reg_val |= CAN_OPT_SILENT;
+               reg_val |= PCH_OPT_SILENT;
 
        if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
-               reg_val |= CAN_OPT_LBACK;
+               reg_val |= PCH_OPT_LBACK;
 
-       pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT);
+       pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
        iowrite32(reg_val, &priv->regs->opt);
 }
 
 static void pch_can_set_int_custom(struct pch_can_priv *priv)
 {
        /* Clearing the IE, SIE and EIE bits of Can control register. */
-       pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+       pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
 
        /* Appropriately setting them. */
        pch_can_bit_set(&priv->regs->cont,
-                       ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1));
+                       ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
 }
 
 /* This function retrieves interrupt enabled for the CAN device. */
 static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
 {
        /* Obtaining the status of IE, SIE and EIE interrupt bits. */
-       *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1);
+       *enables = ((ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1);
 }
 
 static void pch_can_set_int_enables(struct pch_can_priv *priv,
@@ -277,19 +265,19 @@ static void pch_can_set_int_enables(struct pch_can_priv *priv,
 {
        switch (interrupt_no) {
        case PCH_CAN_ENABLE:
-               pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE);
+               pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE);
                break;
 
        case PCH_CAN_DISABLE:
-               pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE);
+               pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
                break;
 
        case PCH_CAN_ALL:
-               pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+               pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
                break;
 
        case PCH_CAN_NONE:
-               pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+               pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
                break;
 
        default:
@@ -300,12 +288,12 @@ static void pch_can_set_int_enables(struct pch_can_priv *priv,
 
 static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
 {
-       u32 counter = COUNTER_LIMIT;
+       u32 counter = PCH_COUNTER_LIMIT;
        u32 ifx_creq;
 
        iowrite32(num, creq_addr);
        while (counter) {
-               ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY;
+               ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
                if (!ifx_creq)
                        break;
                counter--;
@@ -322,22 +310,22 @@ static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num,
 
        spin_lock_irqsave(&priv->msgif_reg_lock, flags);
        /* Reading the receive buffer data from RAM to Interface1 registers */
-       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
        pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
 
        /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
-       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+       iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
                  &priv->regs->if1_cmask);
 
-       if (set == ENABLE) {
+       if (set == PCH_ENABLE) {
                /* Setting the MsgVal and RxIE bits */
-               pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
-               pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+               pch_can_bit_set(&priv->regs->if1_mcont, PCH_IF_MCONT_RXIE);
+               pch_can_bit_set(&priv->regs->if1_id2, PCH_ID_MSGVAL);
 
-       } else if (set == DISABLE) {
+       } else if (set == PCH_DISABLE) {
                /* Resetting the MsgVal and RxIE bits */
-               pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
-               pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+               pch_can_bit_clear(&priv->regs->if1_mcont, PCH_IF_MCONT_RXIE);
+               pch_can_bit_clear(&priv->regs->if1_id2, PCH_ID_MSGVAL);
        }
 
        pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
@@ -350,8 +338,8 @@ static void pch_can_rx_enable_all(struct pch_can_priv *priv)
 
        /* Traversing to obtain the object configured as receivers. */
        for (i = 0; i < PCH_OBJ_NUM; i++) {
-               if (priv->msg_obj[i] == MSG_OBJ_RX)
-                       pch_can_set_rx_enable(priv, i + 1, ENABLE);
+               if (priv->msg_obj[i] == PCH_MSG_OBJ_RX)
+                       pch_can_set_rx_enable(priv, i + 1, PCH_ENABLE);
        }
 }
 
@@ -361,8 +349,8 @@ static void pch_can_rx_disable_all(struct pch_can_priv *priv)
 
        /* Traversing to obtain the object configured as receivers. */
        for (i = 0; i < PCH_OBJ_NUM; i++) {
-               if (priv->msg_obj[i] == MSG_OBJ_RX)
-                       pch_can_set_rx_enable(priv, i + 1, DISABLE);
+               if (priv->msg_obj[i] == PCH_MSG_OBJ_RX)
+                       pch_can_set_rx_enable(priv, i + 1, PCH_DISABLE);
        }
 }
 
@@ -373,22 +361,22 @@ static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
 
        spin_lock_irqsave(&priv->msgif_reg_lock, flags);
        /* Reading the Msg buffer from Message RAM to Interface2 registers. */
-       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+       iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
        pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
 
        /* Setting the IF2CMASK register for accessing the
                MsgVal and TxIE bits */
-       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+       iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
                 &priv->regs->if2_cmask);
 
-       if (set == ENABLE) {
+       if (set == PCH_ENABLE) {
                /* Setting the MsgVal and TxIE bits */
-               pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
-               pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
-       } else if (set == DISABLE) {
+               pch_can_bit_set(&priv->regs->if2_mcont, PCH_IF_MCONT_TXIE);
+               pch_can_bit_set(&priv->regs->if2_id2, PCH_ID_MSGVAL);
+       } else if (set == PCH_DISABLE) {
                /* Resetting the MsgVal and TxIE bits. */
-               pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
-               pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+               pch_can_bit_clear(&priv->regs->if2_mcont, PCH_IF_MCONT_TXIE);
+               pch_can_bit_clear(&priv->regs->if2_id2, PCH_ID_MSGVAL);
        }
 
        pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
@@ -401,8 +389,8 @@ static void pch_can_tx_enable_all(struct pch_can_priv *priv)
 
        /* Traversing to obtain the object configured as transmit object. */
        for (i = 0; i < PCH_OBJ_NUM; i++) {
-               if (priv->msg_obj[i] == MSG_OBJ_TX)
-                       pch_can_set_tx_enable(priv, i + 1, ENABLE);
+               if (priv->msg_obj[i] == PCH_MSG_OBJ_TX)
+                       pch_can_set_tx_enable(priv, i + 1, PCH_ENABLE);
        }
 }
 
@@ -412,8 +400,8 @@ static void pch_can_tx_disable_all(struct pch_can_priv *priv)
 
        /* Traversing to obtain the object configured as transmit object. */
        for (i = 0; i < PCH_OBJ_NUM; i++) {
-               if (priv->msg_obj[i] == MSG_OBJ_TX)
-                       pch_can_set_tx_enable(priv, i + 1, DISABLE);
+               if (priv->msg_obj[i] == PCH_MSG_OBJ_TX)
+                       pch_can_set_tx_enable(priv, i + 1, PCH_DISABLE);
        }
 }
 
@@ -423,15 +411,15 @@ static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num,
        unsigned long flags;
 
        spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
        pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
 
-       if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
+       if (((ioread32(&priv->regs->if1_id2)) & PCH_ID_MSGVAL) &&
                        ((ioread32(&priv->regs->if1_mcont)) &
-                       CAN_IF_MCONT_RXIE))
-               *enable = ENABLE;
+                       PCH_IF_MCONT_RXIE))
+               *enable = PCH_ENABLE;
        else
-               *enable = DISABLE;
+               *enable = PCH_DISABLE;
        spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
 }
 
@@ -441,15 +429,15 @@ static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num,
        unsigned long flags;
 
        spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+       iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
        pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
 
-       if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) &&
+       if (((ioread32(&priv->regs->if2_id2)) & PCH_ID_MSGVAL) &&
                        ((ioread32(&priv->regs->if2_mcont)) &
-                       CAN_IF_MCONT_TXIE)) {
-               *enable = ENABLE;
+                       PCH_IF_MCONT_TXIE)) {
+               *enable = PCH_ENABLE;
        } else {
-               *enable = DISABLE;
+               *enable = PCH_DISABLE;
        }
        spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
 }
@@ -465,13 +453,13 @@ static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
        unsigned long flags;
 
        spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
        pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
-       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask);
-       if (set == ENABLE)
-               pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+       iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL, &priv->regs->if1_cmask);
+       if (set == PCH_ENABLE)
+               pch_can_bit_clear(&priv->regs->if1_mcont, PCH_IF_MCONT_EOB);
        else
-               pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+               pch_can_bit_set(&priv->regs->if1_mcont, PCH_IF_MCONT_EOB);
 
        pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
        spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
@@ -483,13 +471,13 @@ static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
        unsigned long flags;
 
        spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
        pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
 
-       if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB)
-               *link = DISABLE;
+       if (ioread32(&priv->regs->if1_mcont) & PCH_IF_MCONT_EOB)
+               *link = PCH_DISABLE;
        else
-               *link = ENABLE;
+               *link = PCH_ENABLE;
        spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
 }
 
@@ -498,7 +486,7 @@ static void pch_can_clear_buffers(struct pch_can_priv *priv)
        int i;
 
        for (i = 0; i < PCH_RX_OBJ_NUM; i++) {
-               iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
+               iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
                iowrite32(0xffff, &priv->regs->if1_mask1);
                iowrite32(0xffff, &priv->regs->if1_mask2);
                iowrite32(0x0, &priv->regs->if1_id1);
@@ -508,14 +496,14 @@ static void pch_can_clear_buffers(struct pch_can_priv *priv)
                iowrite32(0x0, &priv->regs->if1_dataa2);
                iowrite32(0x0, &priv->regs->if1_datab1);
                iowrite32(0x0, &priv->regs->if1_datab2);
-               iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
-                         CAN_CMASK_ARB | CAN_CMASK_CTRL,
+               iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
+                         PCH_CMASK_ARB | PCH_CMASK_CTRL,
                          &priv->regs->if1_cmask);
                pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
        }
 
        for (i = i;  i < PCH_OBJ_NUM; i++) {
-               iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
+               iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
                iowrite32(0xffff, &priv->regs->if2_mask1);
                iowrite32(0xffff, &priv->regs->if2_mask2);
                iowrite32(0x0, &priv->regs->if2_id1);
@@ -525,8 +513,8 @@ static void pch_can_clear_buffers(struct pch_can_priv *priv)
                iowrite32(0x0, &priv->regs->if2_dataa2);
                iowrite32(0x0, &priv->regs->if2_datab1);
                iowrite32(0x0, &priv->regs->if2_datab2);
-               iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
-                         CAN_CMASK_ARB | CAN_CMASK_CTRL,
+               iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
+                         PCH_CMASK_ARB | PCH_CMASK_CTRL,
                          &priv->regs->if2_cmask);
                pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
        }
@@ -540,8 +528,8 @@ static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
        spin_lock_irqsave(&priv->msgif_reg_lock, flags);
 
        for (i = 0; i < PCH_OBJ_NUM; i++) {
-               if (priv->msg_obj[i] == MSG_OBJ_RX) {
-                       iowrite32(CAN_CMASK_RX_TX_GET,
+               if (priv->msg_obj[i] == PCH_MSG_OBJ_RX) {
+                       iowrite32(PCH_CMASK_RX_TX_GET,
                                &priv->regs->if1_cmask);
                        pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
 
@@ -549,48 +537,48 @@ static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
                        iowrite32(0x0, &priv->regs->if1_id2);
 
                        pch_can_bit_set(&priv->regs->if1_mcont,
-                                       CAN_IF_MCONT_UMASK);
+                                       PCH_IF_MCONT_UMASK);
 
                        /* Set FIFO mode set to 0 except last Rx Obj*/
                        pch_can_bit_clear(&priv->regs->if1_mcont,
-                                         CAN_IF_MCONT_EOB);
+                                         PCH_IF_MCONT_EOB);
                        /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
                        if (i == (PCH_RX_OBJ_NUM - 1))
                                pch_can_bit_set(&priv->regs->if1_mcont,
-                                                 CAN_IF_MCONT_EOB);
+                                                 PCH_IF_MCONT_EOB);
 
                        iowrite32(0, &priv->regs->if1_mask1);
                        pch_can_bit_clear(&priv->regs->if1_mask2,
-                                         0x1fff | CAN_MASK2_MDIR_MXTD);
+                                         0x1fff | PCH_MASK2_MDIR_MXTD);
 
                        /* Setting CMASK for writing */
-                       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
-                                 CAN_CMASK_ARB | CAN_CMASK_CTRL,
+                       iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
+                                 PCH_CMASK_ARB | PCH_CMASK_CTRL,
                                  &priv->regs->if1_cmask);
 
                        pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
-               } else if (priv->msg_obj[i] == MSG_OBJ_TX) {
-                       iowrite32(CAN_CMASK_RX_TX_GET,
+               } else if (priv->msg_obj[i] == PCH_MSG_OBJ_TX) {
+                       iowrite32(PCH_CMASK_RX_TX_GET,
                                &priv->regs->if2_cmask);
                        pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
 
                        /* Resetting DIR bit for reception */
                        iowrite32(0x0, &priv->regs->if2_id1);
                        iowrite32(0x0, &priv->regs->if2_id2);
-                       pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR);
+                       pch_can_bit_set(&priv->regs->if2_id2, PCH_ID2_DIR);
 
                        /* Setting EOB bit for transmitter */
-                       iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont);
+                       iowrite32(PCH_IF_MCONT_EOB, &priv->regs->if2_mcont);
 
                        pch_can_bit_set(&priv->regs->if2_mcont,
-                                       CAN_IF_MCONT_UMASK);
+                                       PCH_IF_MCONT_UMASK);
 
                        iowrite32(0, &priv->regs->if2_mask1);
                        pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff);
 
                        /* Setting CMASK for writing */
-                       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
-                                 CAN_CMASK_ARB | CAN_CMASK_CTRL,
+                       iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
+                                 PCH_CMASK_ARB | PCH_CMASK_CTRL,
                                  &priv->regs->if2_cmask);
 
                        pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
@@ -632,39 +620,39 @@ static void pch_can_release(struct pch_can_priv *priv)
 /* This function clears interrupt(s) from the CAN device. */
 static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
 {
-       if (mask == CAN_STATUS_INT) {
+       if (mask == PCH_STATUS_INT) {
                ioread32(&priv->regs->stat);
                return;
        }
 
        /* Clear interrupt for transmit object */
-       if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) {
+       if (priv->msg_obj[mask - 1] == PCH_MSG_OBJ_TX) {
                /* Setting CMASK for clearing interrupts for
                                         frame transmission. */
-               iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+               iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
                          &priv->regs->if2_cmask);
 
                /* Resetting the ID registers. */
                pch_can_bit_set(&priv->regs->if2_id2,
-                              CAN_ID2_DIR | (0x7ff << 2));
+                              PCH_ID2_DIR | (0x7ff << 2));
                iowrite32(0x0, &priv->regs->if2_id1);
 
                /* Claring NewDat, TxRqst & IntPnd */
                pch_can_bit_clear(&priv->regs->if2_mcont,
-                                 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
-                                 CAN_IF_MCONT_TXRQXT);
+                                 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
+                                 PCH_IF_MCONT_TXRQXT);
                pch_can_check_if_busy(&priv->regs->if2_creq, mask);
-       } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
+       } else if (priv->msg_obj[mask - 1] == PCH_MSG_OBJ_RX) {
                /* Setting CMASK for clearing the reception interrupts. */
-               iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+               iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
                          &priv->regs->if1_cmask);
 
                /* Clearing the Dir bit. */
-               pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+               pch_can_bit_clear(&priv->regs->if1_id2, PCH_ID2_DIR);
 
                /* Clearing NewDat & IntPnd */
                pch_can_bit_clear(&priv->regs->if1_mcont,
-                                 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND);
+                                 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
 
                pch_can_check_if_busy(&priv->regs->if1_creq, mask);
        }
@@ -712,9 +700,9 @@ static void pch_can_error(struct net_device *ndev, u32 status)
                priv->can.can_stats.error_warning++;
                cf->can_id |= CAN_ERR_CRTL;
                errc = ioread32(&priv->regs->errc);
-               if (((errc & CAN_REC) >> 8) > 96)
+               if (((errc & PCH_REC) >> 8) > 96)
                        cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
-               if ((errc & CAN_TEC) > 96)
+               if ((errc & PCH_TEC) > 96)
                        cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
                dev_warn(&ndev->dev,
                        "%s -> Error Counter is more than 96.\n", __func__);
@@ -725,9 +713,9 @@ static void pch_can_error(struct net_device *ndev, u32 status)
                state = CAN_STATE_ERROR_PASSIVE;
                cf->can_id |= CAN_ERR_CRTL;
                errc = ioread32(&priv->regs->errc);
-               if (((errc & CAN_REC) >> 8) > 127)
+               if (((errc & PCH_REC) >> 8) > 127)
                        cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
-               if ((errc & CAN_TEC) > 127)
+               if ((errc & PCH_TEC) > 127)
                        cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
                dev_err(&ndev->dev,
                        "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
@@ -795,20 +783,20 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
        struct net_device_stats *stats = &(priv->ndev->stats);
 
        /* Reading the messsage object from the Message RAM */
-       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+       iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
        pch_can_check_if_busy(&priv->regs->if1_creq, int_stat);
 
        /* Reading the MCONT register. */
        reg = ioread32(&priv->regs->if1_mcont);
        reg &= 0xffff;
 
-       for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) {
+       for (k = int_stat; !(reg & PCH_IF_MCONT_EOB); k++) {
                /* If MsgLost bit set. */
-               if (reg & CAN_IF_MCONT_MSGLOST) {
+               if (reg & PCH_IF_MCONT_MSGLOST) {
                        dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
                        pch_can_bit_clear(&priv->regs->if1_mcont,
-                                         CAN_IF_MCONT_MSGLOST);
-                       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL,
+                                         PCH_IF_MCONT_MSGLOST);
+                       iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
                                  &priv->regs->if1_cmask);
                        pch_can_check_if_busy(&priv->regs->if1_creq, k);
 
@@ -828,7 +816,7 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
                        rcv_pkts++;
                        goto RX_NEXT;
                }
-               if (!(reg & CAN_IF_MCONT_NEWDAT))
+               if (!(reg & PCH_IF_MCONT_NEWDAT))
                        goto RX_NEXT;
 
                skb = alloc_can_skb(priv->ndev, &cf);
@@ -836,7 +824,7 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
                        return -ENOMEM;
 
                /* Get Received data */
-               ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14;
+               ide = ((ioread32(&priv->regs->if1_id2)) & PCH_ID2_XTD) >> 14;
                if (ide) {
                        id = (ioread32(&priv->regs->if1_id1) & 0xffff);
                        id |= (((ioread32(&priv->regs->if1_id2)) &
@@ -848,7 +836,7 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
                        cf->can_id = (id & CAN_SFF_MASK);
                }
 
-               rtr = (ioread32(&priv->regs->if1_id2) &  CAN_ID2_DIR);
+               rtr = (ioread32(&priv->regs->if1_id2) &  PCH_ID2_DIR);
                if (rtr) {
                        cf->can_dlc = 0;
                        cf->can_id |= CAN_RTR_FLAG;
@@ -871,15 +859,15 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
                stats->rx_bytes += cf->can_dlc;
 
                if (k < PCH_FIFO_THRESH) {
-                       iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL |
-                                 CAN_CMASK_ARB, &priv->regs->if1_cmask);
+                       iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
+                                 PCH_CMASK_ARB, &priv->regs->if1_cmask);
 
                        /* Clearing the Dir bit. */
-                       pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+                       pch_can_bit_clear(&priv->regs->if1_id2, PCH_ID2_DIR);
 
                        /* Clearing NewDat & IntPnd */
                        pch_can_bit_clear(&priv->regs->if1_mcont,
-                                         CAN_IF_MCONT_INTPND);
+                                         PCH_IF_MCONT_INTPND);
                        pch_can_check_if_busy(&priv->regs->if1_creq, k);
                } else if (k > PCH_FIFO_THRESH) {
                        pch_can_int_clr(priv, k);
@@ -890,7 +878,7 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
                }
 RX_NEXT:
                /* Reading the messsage object from the Message RAM */
-               iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+               iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
                pch_can_check_if_busy(&priv->regs->if1_creq, k + 1);
                reg = ioread32(&priv->regs->if1_mcont);
        }
@@ -913,7 +901,7 @@ static int pch_can_rx_poll(struct napi_struct *napi, int quota)
                return 0;
 
 INT_STAT:
-       if (int_stat == CAN_STATUS_INT) {
+       if (int_stat == PCH_STATUS_INT) {
                reg_stat = ioread32(&priv->regs->stat);
                if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
                        if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
@@ -922,7 +910,7 @@ INT_STAT:
 
                if (reg_stat & PCH_TX_OK) {
                        spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-                       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+                       iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
                        pch_can_check_if_busy(&priv->regs->if2_creq,
                                               ioread32(&priv->regs->intr));
                        spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
@@ -933,7 +921,7 @@ INT_STAT:
                        pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
 
                int_stat = pch_can_int_pending(priv);
-               if (int_stat == CAN_STATUS_INT)
+               if (int_stat == PCH_STATUS_INT)
                        goto INT_STAT;
        }
 
@@ -945,14 +933,14 @@ MSG_OBJ:
                if (rcv_pkts < 0)
                        return 0;
        } else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) {
-               if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) {
+               if (priv->msg_obj[int_stat - 1] == PCH_MSG_OBJ_TX) {
                        /* Handle transmission interrupt */
                        can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1);
                        spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-                       iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND,
+                       iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
                                  &priv->regs->if2_cmask);
                        dlc = ioread32(&priv->regs->if2_mcont) &
-                                      CAN_IF_MCONT_DLC;
+                                      PCH_IF_MCONT_DLC;
                        pch_can_check_if_busy(&priv->regs->if2_creq, int_stat);
                        spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
                        if (dlc > 8)
@@ -963,7 +951,7 @@ MSG_OBJ:
        }
 
        int_stat = pch_can_int_pending(priv);
-       if (int_stat == CAN_STATUS_INT)
+       if (int_stat == PCH_STATUS_INT)
                goto INT_STAT;
        else if (int_stat >= 1 && int_stat <= 32)
                goto MSG_OBJ;
@@ -983,17 +971,17 @@ static int pch_set_bittiming(struct net_device *ndev)
        u32 brp;
 
        /* Setting the CCE bit for accessing the Can Timing register. */
-       pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE);
+       pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
 
        brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
-       canbit = brp & MSK_BITT_BRP;
-       canbit |= (bt->sjw - 1) << BIT_BITT_SJW;
-       canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1;
-       canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2;
-       bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE;
+       canbit = brp & PCH_MSK_BITT_BRP;
+       canbit |= (bt->sjw - 1) << PCH_BIT_SJW;
+       canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1;
+       canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2;
+       bepe = (brp & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE;
        iowrite32(canbit, &priv->regs->bitt);
        iowrite32(bepe, &priv->regs->brpe);
-       pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE);
+       pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
 
        return 0;
 }
@@ -1137,19 +1125,19 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
        spin_lock_irqsave(&priv->msgif_reg_lock, flags);
 
        /* Reading the Msg Obj from the Msg RAM to the Interface register. */
-       iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+       iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
        pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
 
        /* Setting the CMASK register. */
-       pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL);
+       pch_can_bit_set(&priv->regs->if2_cmask, PCH_CMASK_ALL);
 
        /* If ID extended is set. */
        pch_can_bit_clear(&priv->regs->if2_id1, 0xffff);
-       pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD);
+       pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | PCH_ID2_XTD);
        if (cf->can_id & CAN_EFF_FLAG) {
                pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff);
                pch_can_bit_set(&priv->regs->if2_id2,
-                               ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD);
+                               ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD);
        } else {
                pch_can_bit_set(&priv->regs->if2_id1, 0);
                pch_can_bit_set(&priv->regs->if2_id2,
@@ -1158,7 +1146,7 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        /* If remote frame has to be transmitted.. */
        if (cf->can_id & CAN_RTR_FLAG)
-               pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR);
+               pch_can_bit_clear(&priv->regs->if2_id2, PCH_ID2_DIR);
 
        for (i = 0, j = 0; i < cf->can_dlc; j++) {
                iowrite32(le32_to_cpu(cf->data[i++]),
@@ -1177,12 +1165,12 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        /* Clearing IntPend, NewDat & TxRqst */
        pch_can_bit_clear(&priv->regs->if2_mcont,
-                         CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
-                         CAN_IF_MCONT_TXRQXT);
+                         PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
+                         PCH_IF_MCONT_TXRQXT);
 
        /* Setting NewDat, TxRqst bits */
        pch_can_bit_set(&priv->regs->if2_mcont,
-                       CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT);
+                       PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT);
 
        pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
 
@@ -1245,7 +1233,7 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
 
        /* Save Tx buffer enable state */
        for (i = 0; i < PCH_OBJ_NUM; i++) {
-               if (priv->msg_obj[i] == MSG_OBJ_TX)
+               if (priv->msg_obj[i] == PCH_MSG_OBJ_TX)
                        pch_can_get_tx_enable(priv, i + 1,
                                              &(priv->tx_enable[i]));
        }
@@ -1255,7 +1243,7 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
 
        /* Save Rx buffer enable state */
        for (i = 0; i < PCH_OBJ_NUM; i++) {
-               if (priv->msg_obj[i] == MSG_OBJ_RX) {
+               if (priv->msg_obj[i] == PCH_MSG_OBJ_RX) {
                        pch_can_get_rx_enable(priv, i + 1,
                                                &(priv->rx_enable[i]));
                        pch_can_get_rx_buffer_link(priv, i + 1,
@@ -1313,7 +1301,7 @@ static int pch_can_resume(struct pci_dev *pdev)
 
        /* Enabling the transmit buffer. */
        for (i = 0; i < PCH_OBJ_NUM; i++) {
-               if (priv->msg_obj[i] == MSG_OBJ_TX) {
+               if (priv->msg_obj[i] == PCH_MSG_OBJ_TX) {
                        pch_can_set_tx_enable(priv, i + 1,
                                              priv->tx_enable[i]);
                }
@@ -1321,7 +1309,7 @@ static int pch_can_resume(struct pci_dev *pdev)
 
        /* Configuring the receive buffer and enabling them. */
        for (i = 0; i < PCH_OBJ_NUM; i++) {
-               if (priv->msg_obj[i] == MSG_OBJ_RX) {
+               if (priv->msg_obj[i] == PCH_MSG_OBJ_RX) {
                        /* Restore buffer link */
                        pch_can_set_rx_buffer_link(priv, i + 1,
                                                   priv->rx_link[i]);
@@ -1349,8 +1337,8 @@ static int pch_can_get_berr_counter(const struct net_device *dev,
 {
        struct pch_can_priv *priv = netdev_priv(dev);
 
-       bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC;
-       bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8;
+       bec->txerr = ioread32(&priv->regs->errc) & PCH_TEC;
+       bec->rxerr = (ioread32(&priv->regs->errc) & PCH_REC) >> 8;
 
        return 0;
 }
@@ -1410,10 +1398,10 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
 
        priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
        for (index = 0; index < PCH_RX_OBJ_NUM;)
-               priv->msg_obj[index++] = MSG_OBJ_RX;
+               priv->msg_obj[index++] = PCH_MSG_OBJ_RX;
 
        for (index = index;  index < PCH_OBJ_NUM;)
-               priv->msg_obj[index++] = MSG_OBJ_TX;
+               priv->msg_obj[index++] = PCH_MSG_OBJ_TX;
 
        netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM);
 
index 5bfccfdf3bbb37d5bde186a6f5103d2d53c96a94..09c3e9db931618067156cb07621d25dee10e81e4 100644 (file)
@@ -107,17 +107,13 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev,
        res_size = resource_size(&res);
 
        if (!request_mem_region(res.start, res_size, DRV_NAME)) {
-               dev_err(&ofdev->dev, "couldn't request %#llx..%#llx\n",
-                       (unsigned long long)res.start,
-                       (unsigned long long)res.end);
+               dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
                return -EBUSY;
        }
 
        base = ioremap_nocache(res.start, res_size);
        if (!base) {
-               dev_err(&ofdev->dev, "couldn't ioremap %#llx..%#llx\n",
-                       (unsigned long long)res.start,
-                       (unsigned long long)res.end);
+               dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
                err = -ENOMEM;
                goto exit_release_mem;
        }
index 92bac19ad60ab0be5383b7566f48cc5dbbf2ab8e..594ca9c2c10a81bbbddf01bb493b4303862d1f39 100644 (file)
@@ -1695,7 +1695,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
                *work = num;
                return -EINVAL;
        }
-       *work = 2 + req2->num_additional_wqes;;
+       *work = 2 + req2->num_additional_wqes;
 
        l5_cid = req1->iscsi_conn_id;
        if (l5_cid >= MAX_ISCSI_TBL_SZ)
index 8ea01962e045436c1a9c17dff6ab0d34f2efbba3..4766b4116b412702d84735aaf6b119906c6430e1 100644 (file)
@@ -60,7 +60,7 @@ enum {
         * MSI-X interrupt index usage.
         */
        MSIX_FW         = 0,            /* MSI-X index for firmware Q */
-       MSIX_NIQFLINT   = 1,            /* MSI-X index base for Ingress Qs */
+       MSIX_IQFLINT    = 1,            /* MSI-X index base for Ingress Qs */
        MSIX_EXTRAS     = 1,
        MSIX_ENTRIES    = MAX_ETH_QSETS + MSIX_EXTRAS,
 
index c3449bbc585a682b57a39b306f251f9691e27e0b..9246d2fa6cf9bfa835c34ded95b6c8febddebe77 100644 (file)
@@ -280,9 +280,7 @@ static void name_msix_vecs(struct adapter *adapter)
                const struct port_info *pi = netdev_priv(dev);
                int qs, msi;
 
-               for (qs = 0, msi = MSIX_NIQFLINT;
-                    qs < pi->nqsets;
-                    qs++, msi++) {
+               for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
                        snprintf(adapter->msix_info[msi].desc, namelen,
                                 "%s-%d", dev->name, qs);
                        adapter->msix_info[msi].desc[namelen] = 0;
@@ -309,7 +307,7 @@ static int request_msix_queue_irqs(struct adapter *adapter)
        /*
         * Ethernet queues.
         */
-       msi = MSIX_NIQFLINT;
+       msi = MSIX_IQFLINT;
        for_each_ethrxq(s, rxq) {
                err = request_irq(adapter->msix_info[msi].vec,
                                  t4vf_sge_intr_msix, 0,
@@ -337,7 +335,7 @@ static void free_msix_queue_irqs(struct adapter *adapter)
        int rxq, msi;
 
        free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
-       msi = MSIX_NIQFLINT;
+       msi = MSIX_IQFLINT;
        for_each_ethrxq(s, rxq)
                free_irq(adapter->msix_info[msi++].vec,
                         &s->ethrxq[rxq].rspq);
@@ -527,7 +525,7 @@ static int setup_sge_queues(struct adapter *adapter)
         * brought up at which point lots of things get nailed down
         * permanently ...
         */
-       msix = MSIX_NIQFLINT;
+       msix = MSIX_IQFLINT;
        for_each_port(adapter, pidx) {
                struct net_device *dev = adapter->port[pidx];
                struct port_info *pi = netdev_priv(dev);
@@ -1348,6 +1346,8 @@ struct queue_port_stats {
        u64 rx_csum;
        u64 vlan_ex;
        u64 vlan_ins;
+       u64 lro_pkts;
+       u64 lro_merged;
 };
 
 /*
@@ -1385,6 +1385,8 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
        "RxCsumGood        ",
        "VLANextractions   ",
        "VLANinsertions    ",
+       "GROPackets        ",
+       "GROMerged         ",
 };
 
 /*
@@ -1434,6 +1436,8 @@ static void collect_sge_port_stats(const struct adapter *adapter,
                stats->rx_csum += rxq->stats.rx_cso;
                stats->vlan_ex += rxq->stats.vlan_ex;
                stats->vlan_ins += txq->vlan_ins;
+               stats->lro_pkts += rxq->stats.lro_pkts;
+               stats->lro_merged += rxq->stats.lro_merged;
        }
 }
 
@@ -1529,15 +1533,20 @@ static void cxgb4vf_get_wol(struct net_device *dev,
        memset(&wol->sopass, 0, sizeof(wol->sopass));
 }
 
+/*
+ * TCP Segmentation Offload flags which we support.
+ */
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
 /*
  * Set TCP Segmentation Offloading feature capabilities.
  */
 static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
 {
        if (tso)
-               dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+               dev->features |= TSO_FLAGS;
        else
-               dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+               dev->features &= ~TSO_FLAGS;
        return 0;
 }
 
@@ -2028,7 +2037,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
  * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above.  We leave
  * it to our caller to tear down the directory (debugfs_root).
  */
-static void __devexit cleanup_debugfs(struct adapter *adapter)
+static void cleanup_debugfs(struct adapter *adapter)
 {
        BUG_ON(adapter->debugfs_root == NULL);
 
@@ -2046,7 +2055,7 @@ static void __devexit cleanup_debugfs(struct adapter *adapter)
  * adapter parameters we're going to be using and initialize basic adapter
  * hardware support.
  */
-static int adap_init0(struct adapter *adapter)
+static int __devinit adap_init0(struct adapter *adapter)
 {
        struct vf_resources *vfres = &adapter->params.vfres;
        struct sge_params *sge_params = &adapter->params.sge;
@@ -2470,7 +2479,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
                version_printed = 1;
        }
 
-
        /*
         * Initialize generic PCI device state.
         */
@@ -2607,7 +2615,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
                netif_carrier_off(netdev);
                netdev->irq = pdev->irq;
 
-               netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+               netdev->features = (NETIF_F_SG | TSO_FLAGS |
                                    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                                    NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
                                    NETIF_F_GRO);
index ecf0770bf0ff60bde745ff12c00e337a4aa54982..e0b3d1bc2fdf6f206e538517aa5f4fe203c140bb 100644 (file)
@@ -1568,6 +1568,9 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
        } else
                skb_checksum_none_assert(skb);
 
+       /*
+        * Deliver the packet to the stack.
+        */
        if (unlikely(pkt->vlan_ex)) {
                struct vlan_group *grp = pi->vlan_grp;
 
@@ -2143,7 +2146,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
 
                /*
                 * Calculate the size of the hardware free list ring plus
-                * status page (which the SGE will place at the end of the
+                * Status Page (which the SGE will place after the end of the
                 * free list ring) in Egress Queue Units.
                 */
                flsz = (fl->size / FL_PER_EQ_UNIT +
@@ -2240,8 +2243,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
        struct port_info *pi = netdev_priv(dev);
 
        /*
-        * Calculate the size of the hardware TX Queue (including the
-        * status age on the end) in units of TX Descriptors.
+        * Calculate the size of the hardware TX Queue (including the Status
+        * Page on the end of the TX Queue) in units of TX Descriptors.
         */
        nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
 
index e306c20dfaee3fd2c54a7d4e3497ddd8ba787d19..f7d7f976064b6cf6c888a524e34726147fb03728 100644 (file)
@@ -1276,7 +1276,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
  */
 int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
 {
-       struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl;
+       const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
        u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
 
        switch (opcode) {
@@ -1284,7 +1284,8 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
                /*
                 * Link/module state change message.
                 */
-               const struct fw_port_cmd *port_cmd = (void *)rpl;
+               const struct fw_port_cmd *port_cmd =
+                       (const struct fw_port_cmd *)rpl;
                u32 word;
                int action, port_id, link_ok, speed, fc, pidx;
 
index 9f6aeefa06bf9dd8060da75e57d5b796450abd32..2d4c4fc1d90053fa6c04785eac027ef0a59d424c 100644 (file)
@@ -1675,7 +1675,7 @@ dm9000_drv_remove(struct platform_device *pdev)
        platform_set_drvdata(pdev, NULL);
 
        unregister_netdev(ndev);
-       dm9000_release_board(pdev, (board_info_t *) netdev_priv(ndev));
+       dm9000_release_board(pdev, netdev_priv(ndev));
        free_netdev(ndev);              /* free device structure */
 
        dev_dbg(&pdev->dev, "released and freed device\n");
index 7236f1a53ba0997da7ba66a218db4b56091faedf..9333921010cc23c7eff6d4a54ee30cbc3dc3fdbf 100644 (file)
@@ -74,6 +74,9 @@ static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
 static s32 e1000_led_on_82574(struct e1000_hw *hw);
 static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
 static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
 
 /**
  *  e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -107,6 +110,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
        case e1000_82574:
        case e1000_82583:
                phy->type                = e1000_phy_bm;
+               phy->ops.acquire = e1000_get_hw_semaphore_82574;
+               phy->ops.release = e1000_put_hw_semaphore_82574;
                break;
        default:
                return -E1000_ERR_PHY;
@@ -200,6 +205,17 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
                break;
        }
 
+       /* Function Pointers */
+       switch (hw->mac.type) {
+       case e1000_82574:
+       case e1000_82583:
+               nvm->ops.acquire = e1000_get_hw_semaphore_82574;
+               nvm->ops.release = e1000_put_hw_semaphore_82574;
+               break;
+       default:
+               break;
+       }
+
        return 0;
 }
 
@@ -542,6 +558,94 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
        swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
        ew32(SWSM, swsm);
 }
+/**
+ *  e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore during reset.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
+{
+       u32 extcnf_ctrl;
+       s32 ret_val = 0;
+       s32 i = 0;
+
+       extcnf_ctrl = er32(EXTCNF_CTRL);
+       extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+       do {
+               ew32(EXTCNF_CTRL, extcnf_ctrl);
+               extcnf_ctrl = er32(EXTCNF_CTRL);
+
+               if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+                       break;
+
+               extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+               msleep(2);
+               i++;
+       } while (i < MDIO_OWNERSHIP_TIMEOUT);
+
+       if (i == MDIO_OWNERSHIP_TIMEOUT) {
+               /* Release semaphores */
+               e1000_put_hw_semaphore_82573(hw);
+               e_dbg("Driver can't access the PHY\n");
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_82573 - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used during reset.
+ *
+ **/
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
+{
+       u32 extcnf_ctrl;
+
+       extcnf_ctrl = er32(EXTCNF_CTRL);
+       extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+       ew32(EXTCNF_CTRL, extcnf_ctrl);
+}
+
+static DEFINE_MUTEX(swflag_mutex);
+
+/**
+ *  e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
+{
+       s32 ret_val;
+
+       mutex_lock(&swflag_mutex);
+       ret_val = e1000_get_hw_semaphore_82573(hw);
+       if (ret_val)
+               mutex_unlock(&swflag_mutex);
+       return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_82574 - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ *
+ **/
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
+{
+       e1000_put_hw_semaphore_82573(hw);
+       mutex_unlock(&swflag_mutex);
+}
 
 /**
  *  e1000_acquire_nvm_82571 - Request for access to the EEPROM
@@ -562,8 +666,6 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
 
        switch (hw->mac.type) {
        case e1000_82573:
-       case e1000_82574:
-       case e1000_82583:
                break;
        default:
                ret_val = e1000e_acquire_nvm(hw);
@@ -853,9 +955,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
  **/
 static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 {
-       u32 ctrl, extcnf_ctrl, ctrl_ext, icr;
+       u32 ctrl, ctrl_ext, icr;
        s32 ret_val;
-       u16 i = 0;
 
        /*
         * Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -880,33 +981,33 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
         */
        switch (hw->mac.type) {
        case e1000_82573:
+               ret_val = e1000_get_hw_semaphore_82573(hw);
+               break;
        case e1000_82574:
        case e1000_82583:
-               extcnf_ctrl = er32(EXTCNF_CTRL);
-               extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
-               do {
-                       ew32(EXTCNF_CTRL, extcnf_ctrl);
-                       extcnf_ctrl = er32(EXTCNF_CTRL);
-
-                       if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
-                               break;
-
-                       extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
-                       msleep(2);
-                       i++;
-               } while (i < MDIO_OWNERSHIP_TIMEOUT);
+               ret_val = e1000_get_hw_semaphore_82574(hw);
                break;
        default:
                break;
        }
+       if (ret_val)
+               e_dbg("Cannot acquire MDIO ownership\n");
 
        ctrl = er32(CTRL);
 
        e_dbg("Issuing a global reset to MAC\n");
        ew32(CTRL, ctrl | E1000_CTRL_RST);
 
+       /* Must release MDIO ownership and mutex after MAC reset. */
+       switch (hw->mac.type) {
+       case e1000_82574:
+       case e1000_82583:
+               e1000_put_hw_semaphore_82574(hw);
+               break;
+       default:
+               break;
+       }
+
        if (hw->nvm.type == e1000_nvm_flash_hw) {
                udelay(10);
                ctrl_ext = er32(CTRL_EXT);
@@ -1431,8 +1532,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
                         * auto-negotiation in the TXCW register and disable
                         * forced link in the Device Control register in an
                         * attempt to auto-negotiate with our link partner.
+                        * If the partner code word is null, stop forcing
+                        * and restart auto negotiation.
                         */
-                       if (rxcw & E1000_RXCW_C) {
+                       if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW))  {
                                /* Enable autoneg, and unforce link up */
                                ew32(TXCW, mac->txcw);
                                ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
index d3f7a9c3f9738e8ed6aa8a60078d9c93572f6a35..016ea383145add5f9c20a28524c6d3acb8c6e21f 100644 (file)
 #define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
 
 /* Receive Configuration Word */
+#define E1000_RXCW_CW         0x0000ffff        /* RxConfigWord mask */
 #define E1000_RXCW_IV         0x08000000        /* Receive config invalid */
 #define E1000_RXCW_C          0x20000000        /* Receive config */
 #define E1000_RXCW_SYNCH      0x40000000        /* Receive config synch */
index c4ca1629f532e49b502c78b76e0e99226c728379..9b3f0a996b00903763458150b82a32311645c1bd 100644 (file)
@@ -4595,7 +4595,7 @@ dma_error:
                        i += tx_ring->count;
                i--;
                buffer_info = &tx_ring->buffer_info[i];
-               e1000_put_txbuf(adapter, buffer_info);;
+               e1000_put_txbuf(adapter, buffer_info);
        }
 
        return 0;
@@ -5465,6 +5465,36 @@ static void e1000_shutdown(struct pci_dev *pdev)
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
+
+static irqreturn_t e1000_intr_msix(int irq, void *data)
+{
+       struct net_device *netdev = data;
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       int vector, msix_irq;
+
+       if (adapter->msix_entries) {
+               vector = 0;
+               msix_irq = adapter->msix_entries[vector].vector;
+               disable_irq(msix_irq);
+               e1000_intr_msix_rx(msix_irq, netdev);
+               enable_irq(msix_irq);
+
+               vector++;
+               msix_irq = adapter->msix_entries[vector].vector;
+               disable_irq(msix_irq);
+               e1000_intr_msix_tx(msix_irq, netdev);
+               enable_irq(msix_irq);
+
+               vector++;
+               msix_irq = adapter->msix_entries[vector].vector;
+               disable_irq(msix_irq);
+               e1000_msix_other(msix_irq, netdev);
+               enable_irq(msix_irq);
+       }
+
+       return IRQ_HANDLED;
+}
+
 /*
  * Polling 'interrupt' - used by things like netconsole to send skbs
  * without having to re-enable interrupts. It's not called while
@@ -5474,10 +5504,21 @@ static void e1000_netpoll(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
-       disable_irq(adapter->pdev->irq);
-       e1000_intr(adapter->pdev->irq, netdev);
-
-       enable_irq(adapter->pdev->irq);
+       switch (adapter->int_mode) {
+       case E1000E_INT_MODE_MSIX:
+               e1000_intr_msix(adapter->pdev->irq, netdev);
+               break;
+       case E1000E_INT_MODE_MSI:
+               disable_irq(adapter->pdev->irq);
+               e1000_intr_msi(adapter->pdev->irq, netdev);
+               enable_irq(adapter->pdev->irq);
+               break;
+       default: /* E1000E_INT_MODE_LEGACY */
+               disable_irq(adapter->pdev->irq);
+               e1000_intr(adapter->pdev->irq, netdev);
+               enable_irq(adapter->pdev->irq);
+               break;
+       }
 }
 #endif
 
index 7c826319ee5a153111a59a1c517992dadcd5a84e..9e19fbc2f1764921d7b3a9a6f4b5ddf11b069910 100644 (file)
@@ -302,7 +302,7 @@ struct eepro_local {
 #define ee_id_eepro10p0 0x10   /* ID for eepro/10+ */
 #define ee_id_eepro10p1 0x31
 
-#define TX_TIMEOUT 40
+#define TX_TIMEOUT ((4*HZ)/10)
 
 /* Index to functions, as function prototypes. */
 
index c91d364c55279e81425ba50d80ec8cbcb9ec54c0..70672541364ec967781f0c4679047ab79d30bb1d 100644 (file)
@@ -32,7 +32,7 @@
 
 #define DRV_NAME               "enic"
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION            "1.4.1.6"
+#define DRV_VERSION            "1.4.1.7"
 #define DRV_COPYRIGHT          "Copyright 2008-2010 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX          6
index a466ef91dd4351d58f37e4ed5b443395ce4725e6..9f293fa24768816b7e6e305f3ac24c93cecf4ec0 100644 (file)
@@ -2042,7 +2042,7 @@ static int enic_dev_hang_reset(struct enic *enic)
 
 static int enic_set_rsskey(struct enic *enic)
 {
-       u64 rss_key_buf_pa;
+       dma_addr_t rss_key_buf_pa;
        union vnic_rss_key *rss_key_buf_va = NULL;
        union vnic_rss_key rss_key = {
                .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
@@ -2073,7 +2073,7 @@ static int enic_set_rsskey(struct enic *enic)
 
 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
 {
-       u64 rss_cpu_buf_pa;
+       dma_addr_t rss_cpu_buf_pa;
        union vnic_rss_cpu *rss_cpu_buf_va = NULL;
        unsigned int i;
        int err;
index e9f5d030bc267923acb405d394f1ba67be1101ae..50c1213f61fe938dea1b8877d1c2dbeede6c7d2a 100644 (file)
@@ -366,9 +366,8 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
        struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-       unsigned long flags;
 
-       spin_lock_irqsave(&priv->lock, flags);
+       spin_lock(&priv->lock);
        while (bcom_buffer_done(priv->tx_dmatsk)) {
                struct sk_buff *skb;
                struct bcom_fec_bd *bd;
@@ -379,7 +378,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
 
                dev_kfree_skb_irq(skb);
        }
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock(&priv->lock);
 
        netif_wake_queue(dev);
 
@@ -395,9 +394,8 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
        struct bcom_fec_bd *bd;
        u32 status, physaddr;
        int length;
-       unsigned long flags;
 
-       spin_lock_irqsave(&priv->lock, flags);
+       spin_lock(&priv->lock);
 
        while (bcom_buffer_done(priv->rx_dmatsk)) {
 
@@ -429,7 +427,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
 
                /* Process the received skb - Drop the spin lock while
                 * calling into the network stack */
-               spin_unlock_irqrestore(&priv->lock, flags);
+               spin_unlock(&priv->lock);
 
                dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
                                 DMA_FROM_DEVICE);
@@ -438,10 +436,10 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
                rskb->protocol = eth_type_trans(rskb, dev);
                netif_rx(rskb);
 
-               spin_lock_irqsave(&priv->lock, flags);
+               spin_lock(&priv->lock);
        }
 
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock(&priv->lock);
 
        return IRQ_HANDLED;
 }
@@ -452,7 +450,6 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
        struct mpc52xx_fec_priv *priv = netdev_priv(dev);
        struct mpc52xx_fec __iomem *fec = priv->fec;
        u32 ievent;
-       unsigned long flags;
 
        ievent = in_be32(&fec->ievent);
 
@@ -470,9 +467,9 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
                if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
                        dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
 
-               spin_lock_irqsave(&priv->lock, flags);
+               spin_lock(&priv->lock);
                mpc52xx_fec_reset(dev);
-               spin_unlock_irqrestore(&priv->lock, flags);
+               spin_unlock(&priv->lock);
 
                return IRQ_HANDLED;
        }
index c2f150d8f2d9ee8e5568f6259cee71f337cb702b..0fa3db3dd8b664d133c80902e09e7bca2aacbcc9 100644 (file)
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel(R) 82576 Virtual Function Linux driver
-# Copyright(c) 2009 Intel Corporation.
+# Copyright(c) 2009 - 2010 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
index 88a47537518a80b19e3a4790b2858c87137110bd..79f2604673feaa8a40c306b559eeff1adb3f2e6c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 33add708bcbece9b67a519c73c709c98f5f2d94b..abb3606928fb131c4878f1369824bf1f03b65ae8 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 2009 Intel Corporation.
+  Copyright(c) 2009 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index debeee2dc717c6f86261c53ea93f46b84a07e9ca..9d4d63e536d40e0daedade509b35e377a7ae65d7 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 2009 Intel Corporation.
+  Copyright(c) 2009 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -126,7 +126,6 @@ struct igbvf_buffer {
                        unsigned int page_offset;
                };
        };
-       struct page *page;
 };
 
 union igbvf_desc {
index 819a8ec901dcab9039ed97a558cc5364e2cd2ccd..3d6f4cc3998af9a97c3d1ba65977466f8728414d 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 2009 Intel Corporation.
+  Copyright(c) 2009 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 4938609dbfb587bbba575be82250b76f02093585..c2883c45d477434aed8ecfaacd51b5e3503be009 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 28af019c97bb8bdd7bc413bd494a0cac6aef03dc..4c998b7726da1c4e15c432730834d6eb3fd74371 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 2009 Intel Corporation.
+  Copyright(c) 2009 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 
 #include "igbvf.h"
 
-#define DRV_VERSION "1.0.0-k0"
+#define DRV_VERSION "1.0.8-k0"
 char igbvf_driver_name[] = "igbvf";
 const char igbvf_driver_version[] = DRV_VERSION;
 static const char igbvf_driver_string[] =
                                "Intel(R) Virtual Function Network Driver";
-static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+static const char igbvf_copyright[] =
+                               "Copyright (c) 2009 - 2010 Intel Corporation.";
 
 static int igbvf_poll(struct napi_struct *napi, int budget);
 static void igbvf_reset(struct igbvf_adapter *);
@@ -1851,8 +1852,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
 
        if (link) {
                if (!netif_carrier_ok(netdev)) {
-                       bool txb2b = 1;
-
                        mac->ops.get_link_up_info(&adapter->hw,
                                                  &adapter->link_speed,
                                                  &adapter->link_duplex);
@@ -1862,11 +1861,9 @@ static void igbvf_watchdog_task(struct work_struct *work)
                        adapter->tx_timeout_factor = 1;
                        switch (adapter->link_speed) {
                        case SPEED_10:
-                               txb2b = 0;
                                adapter->tx_timeout_factor = 16;
                                break;
                        case SPEED_100:
-                               txb2b = 0;
                                /* maybe add some timeout factor ? */
                                break;
                        }
index b9e24ed70d0a14e633d0cbbc9601ace8d87d6555..77e18d3d6b1537303dd2b8599a023d9caeed99b4 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 2009 Intel Corporation.
+  Copyright(c) 2009 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index a9a61efa964cf7b7cc32b439952ed2a444fa1ebe..0cc13c6ed4187efdd88f3d9684691d66e0ee15d7 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 2009 Intel Corporation.
+  Copyright(c) 2009 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 1e8ce3741a67d554af5123b3022ffd233668431e..c36ea21f17fa7db8f739f37a0a6f93e117649673 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 2009 Intel Corporation.
+  Copyright(c) 2009 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 8df645e78f2e9618bd22b83225efbfde4f43008a..38e15be6d5135e00af3766da939f5ab93c5db87b 100644 (file)
@@ -1605,7 +1605,7 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        }
        veth_dev[i] = dev;
 
-       port = (struct veth_port*)netdev_priv(dev);
+       port = netdev_priv(dev);
 
        /* Start the state machine on each connection on this vlan. If we're
         * the first dev to do so this will commence link negotiation */
index 8f81efb49169f150e3e45510117a904e6a7a7c82..7d7387fbdecd33e2c589857cbe5f375b576b0d21 100644 (file)
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
 
 ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
               ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
-              ixgbe_mbx.o
+              ixgbe_mbx.o ixgbe_x540.o
 
 ixgbe-$(CONFIG_IXGBE_DCB) +=  ixgbe_dcb.o ixgbe_dcb_82598.o \
                               ixgbe_dcb_82599.o ixgbe_dcb_nl.o
index ed8703cfffb7ec06ede55d5dce2ff02324d47bd6..3ae30b8cb7d691a374ab0c36f18e4a182f5cf937 100644 (file)
 #define IXGBE_MIN_RXD                       64
 
 /* flow control */
-#define IXGBE_DEFAULT_FCRTL            0x10000
 #define IXGBE_MIN_FCRTL                           0x40
 #define IXGBE_MAX_FCRTL                        0x7FF80
-#define IXGBE_DEFAULT_FCRTH            0x20000
 #define IXGBE_MIN_FCRTH                          0x600
 #define IXGBE_MAX_FCRTH                        0x7FFF0
 #define IXGBE_DEFAULT_FCPAUSE           0xFFFF
@@ -130,7 +128,9 @@ struct ixgbe_tx_buffer {
        unsigned long time_stamp;
        u16 length;
        u16 next_to_watch;
-       u16 mapped_as_page;
+       unsigned int bytecount;
+       u16 gso_segs;
+       u8 mapped_as_page;
 };
 
 struct ixgbe_rx_buffer {
@@ -146,12 +146,56 @@ struct ixgbe_queue_stats {
        u64 bytes;
 };
 
+struct ixgbe_tx_queue_stats {
+       u64 restart_queue;
+       u64 tx_busy;
+       u64 completed;
+       u64 tx_done_old;
+};
+
+struct ixgbe_rx_queue_stats {
+       u64 rsc_count;
+       u64 rsc_flush;
+       u64 non_eop_descs;
+       u64 alloc_rx_page_failed;
+       u64 alloc_rx_buff_failed;
+};
+
+enum ixbge_ring_state_t {
+       __IXGBE_TX_FDIR_INIT_DONE,
+       __IXGBE_TX_DETECT_HANG,
+       __IXGBE_HANG_CHECK_ARMED,
+       __IXGBE_RX_PS_ENABLED,
+       __IXGBE_RX_RSC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+       test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+       set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+       clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+       test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+       set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+       clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_rsc_enabled(ring) \
+       test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define set_ring_rsc_enabled(ring) \
+       set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define clear_ring_rsc_enabled(ring) \
+       clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
 struct ixgbe_ring {
        void *desc;                     /* descriptor ring memory */
+       struct device *dev;             /* device for DMA mapping */
+       struct net_device *netdev;      /* netdev ring belongs to */
        union {
                struct ixgbe_tx_buffer *tx_buffer_info;
                struct ixgbe_rx_buffer *rx_buffer_info;
        };
+       unsigned long state;
        u8 atr_sample_rate;
        u8 atr_count;
        u16 count;                      /* amount of descriptors */
@@ -160,38 +204,30 @@ struct ixgbe_ring {
        u16 next_to_clean;
 
        u8 queue_index; /* needed for multiqueue queue management */
-
-#define IXGBE_RING_RX_PS_ENABLED                (u8)(1)
-       u8 flags;                       /* per ring feature flags */
-       u16 head;
-       u16 tail;
-
-       unsigned int total_bytes;
-       unsigned int total_packets;
-
-#ifdef CONFIG_IXGBE_DCA
-       /* cpu for tx queue */
-       int cpu;
-#endif
-
-       u16 work_limit;                 /* max work per interrupt */
-       u16 reg_idx;                    /* holds the special value that gets
+       u8 reg_idx;                     /* holds the special value that gets
                                         * the hardware register offset
                                         * associated with this ring, which is
                                         * different for DCB and RSS modes
                                         */
 
+       u16 work_limit;                 /* max work per interrupt */
+
+       u8 __iomem *tail;
+
+       unsigned int total_bytes;
+       unsigned int total_packets;
+
        struct ixgbe_queue_stats stats;
        struct u64_stats_sync syncp;
+       union {
+               struct ixgbe_tx_queue_stats tx_stats;
+               struct ixgbe_rx_queue_stats rx_stats;
+       };
        int numa_node;
-       unsigned long reinit_state;
-       u64 rsc_count;                  /* stat for coalesced packets */
-       u64 rsc_flush;                  /* stats for flushed packets */
-       u32 restart_queue;              /* track tx queue restarts */
-       u32 non_eop_descs;              /* track hardware descriptor chaining */
-
        unsigned int size;              /* length in bytes */
        dma_addr_t dma;                 /* phys. address of descriptor ring */
+       struct rcu_head rcu;
+       struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
 } ____cacheline_internodealigned_in_smp;
 
 enum ixgbe_ring_f_enum {
@@ -237,6 +273,9 @@ struct ixgbe_q_vector {
        unsigned int v_idx; /* index of q_vector within array, also used for
                             * finding the bit in EICR and friends that
                             * represents the vector for this ring */
+#ifdef CONFIG_IXGBE_DCA
+       int cpu;            /* CPU for DCA */
+#endif
        struct napi_struct napi;
        DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
        DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
@@ -246,6 +285,7 @@ struct ixgbe_q_vector {
        u8 rx_itr;
        u32 eitr;
        cpumask_var_t affinity_mask;
+       char name[IFNAMSIZ + 9];
 };
 
 /* Helper macros to switch between ints/sec and what the register uses.
@@ -294,7 +334,6 @@ struct ixgbe_adapter {
        u16 bd_number;
        struct work_struct reset_task;
        struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
-       char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
        struct ixgbe_dcb_config dcb_cfg;
        struct ixgbe_dcb_config temp_dcb_cfg;
        u8 dcb_set_bitmap;
@@ -417,6 +456,7 @@ struct ixgbe_adapter {
        int node;
        struct work_struct check_overtemp_task;
        u32 interrupt_event;
+       char lsc_int_name[IFNAMSIZ + 9];
 
        /* SR-IOV */
        DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -428,17 +468,25 @@ enum ixbge_state_t {
        __IXGBE_TESTING,
        __IXGBE_RESETTING,
        __IXGBE_DOWN,
-       __IXGBE_FDIR_INIT_DONE,
        __IXGBE_SFP_MODULE_NOT_FOUND
 };
 
+struct ixgbe_rsc_cb {
+       dma_addr_t dma;
+       u16 skb_cnt;
+       bool delay_unmap;
+};
+#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+
 enum ixgbe_boards {
        board_82598,
        board_82599,
+       board_X540,
 };
 
 extern struct ixgbe_info ixgbe_82598_info;
 extern struct ixgbe_info ixgbe_82599_info;
+extern struct ixgbe_info ixgbe_X540_info;
 #ifdef CONFIG_IXGBE_DCB
 extern const struct dcbnl_rtnl_ops dcbnl_ops;
 extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
@@ -454,26 +502,24 @@ extern void ixgbe_down(struct ixgbe_adapter *adapter);
 extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
 extern void ixgbe_reset(struct ixgbe_adapter *adapter);
 extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
+extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
+extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
 extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
 extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
 extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
 extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
 extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
-                                        struct net_device *,
                                         struct ixgbe_adapter *,
                                         struct ixgbe_ring *);
-extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *,
+extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
                                              struct ixgbe_tx_buffer *);
-extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
-                                   struct ixgbe_ring *rx_ring,
-                                   int cleaned_count);
+extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
 extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
 extern int ethtool_ioctl(struct ifreq *ifr);
+extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
 extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
 extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
 extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
@@ -498,6 +544,10 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
                                          u16 flex_byte);
 extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
                                       u8 l4type);
+extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+                                   struct ixgbe_ring *ring);
+extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+                               struct ixgbe_ring *ring);
 extern void ixgbe_set_rx_mode(struct net_device *netdev);
 #ifdef IXGBE_FCOE
 extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
index 9c02d6014cc43856fa405d8b0c0cc140aef63dde..d0f1d9d2c416e2838d0816739eceb8907ffd8c59 100644 (file)
@@ -38,9 +38,6 @@
 #define IXGBE_82598_MC_TBL_SIZE  128
 #define IXGBE_82598_VFT_TBL_SIZE 128
 
-static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
-                                             ixgbe_link_speed *speed,
-                                             bool *autoneg);
 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
                                          ixgbe_link_speed speed,
                                          bool autoneg,
@@ -156,7 +153,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
        if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
                mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
                mac->ops.get_link_capabilities =
-                                 &ixgbe_get_copper_link_capabilities_82598;
+                       &ixgbe_get_copper_link_capabilities_generic;
        }
 
        switch (hw->phy.type) {
@@ -273,37 +270,6 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
        return status;
 }
 
-/**
- *  ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @autoneg: boolean auto-negotiation value
- *
- *  Determines the link capabilities by reading the AUTOC register.
- **/
-static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
-                                                   ixgbe_link_speed *speed,
-                                                   bool *autoneg)
-{
-       s32 status = IXGBE_ERR_LINK_SETUP;
-       u16 speed_ability;
-
-       *speed = 0;
-       *autoneg = true;
-
-       status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
-                                     &speed_ability);
-
-       if (status == 0) {
-               if (speed_ability & MDIO_SPEED_10G)
-                   *speed |= IXGBE_LINK_SPEED_10GB_FULL;
-               if (speed_ability & MDIO_PMA_SPEED_1000)
-                   *speed |= IXGBE_LINK_SPEED_1GB_FULL;
-       }
-
-       return status;
-}
-
 /**
  *  ixgbe_get_media_type_82598 - Determines media type
  *  @hw: pointer to hardware structure
@@ -357,6 +323,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
        u32 fctrl_reg;
        u32 rmcs_reg;
        u32 reg;
+       u32 rx_pba_size;
        u32 link_speed = 0;
        bool link_up;
 
@@ -459,16 +426,18 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
 
        /* Set up and enable Rx high/low water mark thresholds, enable XON. */
        if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
-               if (hw->fc.send_xon) {
-                       IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
-                                       (hw->fc.low_water | IXGBE_FCRTL_XONE));
-               } else {
-                       IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
-                                       hw->fc.low_water);
-               }
+               rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+               rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+
+               reg = (rx_pba_size - hw->fc.low_water) << 6;
+               if (hw->fc.send_xon)
+                       reg |= IXGBE_FCRTL_XONE;
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
+
+               reg = (rx_pba_size - hw->fc.high_water) << 10;
+               reg |= IXGBE_FCRTH_FCEN;
 
-               IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
-                               (hw->fc.high_water | IXGBE_FCRTH_FCEN));
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
        }
 
        /* Configure pause time (2 TCs per register) */
@@ -1222,6 +1191,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
 static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
        .init_params            = &ixgbe_init_eeprom_params_generic,
        .read                   = &ixgbe_read_eerd_generic,
+       .calc_checksum          = &ixgbe_calc_eeprom_checksum_generic,
        .validate_checksum      = &ixgbe_validate_eeprom_checksum_generic,
        .update_checksum        = &ixgbe_update_eeprom_checksum_generic,
 };
index 0bd8fbb5bfd0dc5c32771e3521744a1f50808e64..e34643eef1627c3d3a43633902bb55a2e3e31788 100644 (file)
@@ -56,9 +56,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
                                ixgbe_link_speed speed,
                                bool autoneg,
                                bool autoneg_wait_to_complete);
-static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
-                                             ixgbe_link_speed *speed,
-                                             bool *autoneg);
 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
                                          ixgbe_link_speed speed,
                                          bool autoneg,
@@ -174,7 +171,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
        if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
                mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
                mac->ops.get_link_capabilities =
-                                 &ixgbe_get_copper_link_capabilities_82599;
+                       &ixgbe_get_copper_link_capabilities_generic;
        }
 
        /* Set necessary function pointers based on phy type */
@@ -184,6 +181,10 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
                phy->ops.get_firmware_version =
                             &ixgbe_get_phy_firmware_version_tnx;
                break;
+       case ixgbe_phy_aq:
+               phy->ops.get_firmware_version =
+                       &ixgbe_get_phy_firmware_version_generic;
+               break;
        default:
                break;
        }
@@ -289,37 +290,6 @@ out:
        return status;
 }
 
-/**
- *  ixgbe_get_copper_link_capabilities_82599 - Determines link capabilities
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @autoneg: boolean auto-negotiation value
- *
- *  Determines the link capabilities by reading the AUTOC register.
- **/
-static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
-                                                    ixgbe_link_speed *speed,
-                                                    bool *autoneg)
-{
-       s32 status = IXGBE_ERR_LINK_SETUP;
-       u16 speed_ability;
-
-       *speed = 0;
-       *autoneg = true;
-
-       status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
-                                     &speed_ability);
-
-       if (status == 0) {
-               if (speed_ability & MDIO_SPEED_10G)
-                   *speed |= IXGBE_LINK_SPEED_10GB_FULL;
-               if (speed_ability & MDIO_PMA_SPEED_1000)
-                   *speed |= IXGBE_LINK_SPEED_1GB_FULL;
-       }
-
-       return status;
-}
-
 /**
  *  ixgbe_get_media_type_82599 - Get media type
  *  @hw: pointer to hardware structure
@@ -332,7 +302,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
 
        /* Detect if there is a copper PHY attached. */
        if (hw->phy.type == ixgbe_phy_cu_unknown ||
-           hw->phy.type == ixgbe_phy_tn) {
+           hw->phy.type == ixgbe_phy_tn ||
+           hw->phy.type == ixgbe_phy_aq) {
                media_type = ixgbe_media_type_copper;
                goto out;
        }
@@ -1924,6 +1895,7 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
        hw->phy.ops.identify(hw);
 
        if (hw->phy.type == ixgbe_phy_tn ||
+           hw->phy.type == ixgbe_phy_aq ||
            hw->phy.type == ixgbe_phy_cu_unknown) {
                hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
                                     &ext_ability);
@@ -2125,51 +2097,6 @@ fw_version_out:
        return status;
 }
 
-/**
- *  ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
- *  the EEPROM
- *  @hw: pointer to hardware structure
- *  @wwnn_prefix: the alternative WWNN prefix
- *  @wwpn_prefix: the alternative WWPN prefix
- *
- *  This function will read the EEPROM from the alternative SAN MAC address
- *  block to check the support for the alternative WWNN/WWPN prefix support.
- **/
-static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
-                                      u16 *wwpn_prefix)
-{
-       u16 offset, caps;
-       u16 alt_san_mac_blk_offset;
-
-       /* clear output first */
-       *wwnn_prefix = 0xFFFF;
-       *wwpn_prefix = 0xFFFF;
-
-       /* check if alternative SAN MAC is supported */
-       hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
-                           &alt_san_mac_blk_offset);
-
-       if ((alt_san_mac_blk_offset == 0) ||
-           (alt_san_mac_blk_offset == 0xFFFF))
-               goto wwn_prefix_out;
-
-       /* check capability in alternative san mac address block */
-       offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
-       hw->eeprom.ops.read(hw, offset, &caps);
-       if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
-               goto wwn_prefix_out;
-
-       /* get the corresponding prefix for WWNN/WWPN */
-       offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
-       hw->eeprom.ops.read(hw, offset, wwnn_prefix);
-
-       offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
-       hw->eeprom.ops.read(hw, offset, wwpn_prefix);
-
-wwn_prefix_out:
-       return 0;
-}
-
 static struct ixgbe_mac_operations mac_ops_82599 = {
        .init_hw                = &ixgbe_init_hw_generic,
        .reset_hw               = &ixgbe_reset_hw_82599,
@@ -2181,7 +2108,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
        .get_mac_addr           = &ixgbe_get_mac_addr_generic,
        .get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
        .get_device_caps        = &ixgbe_get_device_caps_82599,
-       .get_wwn_prefix         = &ixgbe_get_wwn_prefix_82599,
+       .get_wwn_prefix         = &ixgbe_get_wwn_prefix_generic,
        .stop_adapter           = &ixgbe_stop_adapter_generic,
        .get_bus_info           = &ixgbe_get_bus_info_generic,
        .set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie,
@@ -2214,6 +2141,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
        .init_params            = &ixgbe_init_eeprom_params_generic,
        .read                   = &ixgbe_read_eerd_generic,
        .write                  = &ixgbe_write_eeprom_generic,
+       .calc_checksum          = &ixgbe_calc_eeprom_checksum_generic,
        .validate_checksum      = &ixgbe_validate_eeprom_checksum_generic,
        .update_checksum        = &ixgbe_update_eeprom_checksum_generic,
 };
@@ -2240,5 +2168,5 @@ struct ixgbe_info ixgbe_82599_info = {
        .mac_ops                = &mac_ops_82599,
        .eeprom_ops             = &eeprom_ops_82599,
        .phy_ops                = &phy_ops_82599,
-       .mbx_ops                = &mbx_ops_82599,
+       .mbx_ops                = &mbx_ops_generic,
 };
index e3eca13163891c7ba339b71c1b95768247c4542e..56052570cac570f93e41710a04cce7bbaa86cbd8 100644 (file)
@@ -45,14 +45,12 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
-static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
 
 static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
 static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
 static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
 
 /**
  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -638,7 +636,7 @@ out:
  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
  *  read or write is done respectively.
  **/
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
 {
        u32 i;
        u32 reg;
@@ -1009,7 +1007,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
  *  ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
  *  @hw: pointer to hardware structure
  **/
-static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
 {
        u16 i;
        u16 j;
@@ -1072,7 +1070,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
        status = hw->eeprom.ops.read(hw, 0, &checksum);
 
        if (status == 0) {
-               checksum = ixgbe_calc_eeprom_checksum(hw);
+               checksum = hw->eeprom.ops.calc_checksum(hw);
 
                hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
 
@@ -1110,7 +1108,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
        status = hw->eeprom.ops.read(hw, 0, &checksum);
 
        if (status == 0) {
-               checksum = ixgbe_calc_eeprom_checksum(hw);
+               checksum = hw->eeprom.ops.calc_checksum(hw);
                status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
                                            checksum);
        } else {
@@ -1595,6 +1593,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
        u32 mflcn_reg, fccfg_reg;
        u32 reg;
        u32 rx_pba_size;
+       u32 fcrtl, fcrth;
 
 #ifdef CONFIG_DCB
        if (hw->fc.requested_mode == ixgbe_fc_pfc)
@@ -1671,41 +1670,21 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
        IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
        IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
 
-       reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
-       /* Thresholds are different for link flow control when in DCB mode */
-       if (reg & IXGBE_MTQC_RT_ENA) {
-               rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+       rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+       rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
 
-               /* Always disable XON for LFC when in DCB mode */
-               reg = (rx_pba_size >> 5) & 0xFFE0;
-               IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
+       fcrth = (rx_pba_size - hw->fc.high_water) << 10;
+       fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
 
-               reg = (rx_pba_size >> 2) & 0xFFE0;
-               if (hw->fc.current_mode & ixgbe_fc_tx_pause)
-                       reg |= IXGBE_FCRTH_FCEN;
-               IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
-       } else {
-               /*
-                * Set up and enable Rx high/low water mark thresholds,
-                * enable XON.
-                */
-               if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
-                       if (hw->fc.send_xon) {
-                               IXGBE_WRITE_REG(hw,
-                                             IXGBE_FCRTL_82599(packetbuf_num),
-                                             (hw->fc.low_water |
-                                             IXGBE_FCRTL_XONE));
-                       } else {
-                               IXGBE_WRITE_REG(hw,
-                                             IXGBE_FCRTL_82599(packetbuf_num),
-                                             hw->fc.low_water);
-                       }
-
-                       IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
-                                      (hw->fc.high_water | IXGBE_FCRTH_FCEN));
-               }
+       if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+               fcrth |= IXGBE_FCRTH_FCEN;
+               if (hw->fc.send_xon)
+                       fcrtl |= IXGBE_FCRTL_XONE;
        }
 
+       IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
+       IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
+
        /* Configure pause time (2 TCs per register) */
        reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
        if ((packetbuf_num & 1) == 0)
@@ -2705,3 +2684,48 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
 
        return 0;
 }
+
+/**
+ *  ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
+ *  the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @wwnn_prefix: the alternative WWNN prefix
+ *  @wwpn_prefix: the alternative WWPN prefix
+ *
+ *  This function will read the EEPROM from the alternative SAN MAC address
+ *  block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+                                        u16 *wwpn_prefix)
+{
+       u16 offset, caps;
+       u16 alt_san_mac_blk_offset;
+
+       /* clear output first */
+       *wwnn_prefix = 0xFFFF;
+       *wwpn_prefix = 0xFFFF;
+
+       /* check if alternative SAN MAC is supported */
+       hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+                           &alt_san_mac_blk_offset);
+
+       if ((alt_san_mac_blk_offset == 0) ||
+           (alt_san_mac_blk_offset == 0xFFFF))
+               goto wwn_prefix_out;
+
+       /* check capability in alternative san mac address block */
+       offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+       hw->eeprom.ops.read(hw, offset, &caps);
+       if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+               goto wwn_prefix_out;
+
+       /* get the corresponding prefix for WWNN/WWPN */
+       offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+       hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+       offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+       hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+       return 0;
+}
index 424c223437dcf909c5248485adad16030f68e0bd..341ca514a2810682249e18ea1a1abb4d79e2ca87 100644 (file)
@@ -49,9 +49,11 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
                                        u16 *data);
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
                                            u16 *checksum_val);
 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
 
 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
                           u32 enable_addr);
@@ -81,7 +83,8 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
                                  ixgbe_link_speed *speed,
                                  bool *link_up, bool link_up_wait_to_complete);
-
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+                                 u16 *wwpn_prefix);
 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
 
index 0d44c6470ca36429ca020a0c81105fecbf6e0cdd..d16c260c1f50adaf0c93aaf6cc2eae9b807c0f5b 100644 (file)
@@ -42,7 +42,8 @@
  * It should be called only after the rules are checked by
  * ixgbe_dcb_check_config().
  */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
+                                  struct ixgbe_dcb_config *dcb_config,
                                   int max_frame, u8 direction)
 {
        struct tc_bw_alloc *p;
@@ -124,7 +125,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
                         * credit may not be enough to send out a TSO
                         * packet in descriptor plane arbitration.
                         */
-                       if (credit_max &&
+                       if ((hw->mac.type == ixgbe_mac_82598EB) &&
+                           credit_max &&
                            (credit_max < MINIMUM_CREDIT_FOR_TSO))
                                credit_max = MINIMUM_CREDIT_FOR_TSO;
 
@@ -150,10 +152,17 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
                         struct ixgbe_dcb_config *dcb_config)
 {
        s32 ret = 0;
-       if (hw->mac.type == ixgbe_mac_82598EB)
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
-       else if (hw->mac.type == ixgbe_mac_82599EB)
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
+               break;
+       default:
+               break;
+       }
        return ret;
 }
 
index 0208a87b129e3d6d0bc1c64045964c661ed7e319..1cfe38ee16440e0a7c3d6fce1fb858ef18fd68c4 100644 (file)
@@ -150,7 +150,8 @@ struct ixgbe_dcb_config {
 /* DCB driver APIs */
 
 /* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8);
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
+                                  struct ixgbe_dcb_config *, int, u8);
 
 /* DCB hw initialization */
 s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
index 50288bcadc5985bac86d29e2ff12abd1777875bc..9a5e89c12e050281af83dbf2b143264fdf974fe3 100644 (file)
@@ -256,21 +256,17 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
         * for each traffic class.
         */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               if (dcb_config->rx_pba_cfg == pba_equal) {
-                       rx_pba_size = IXGBE_RXPBSIZE_64KB;
-               } else {
-                       rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
-                                             : IXGBE_RXPBSIZE_48KB;
-               }
+               rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+               rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+               reg = (rx_pba_size - hw->fc.low_water) << 10;
 
-               reg = ((rx_pba_size >> 5) &  0xFFF0);
                if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
                    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
                        reg |= IXGBE_FCRTL_XONE;
 
                IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
 
-               reg = ((rx_pba_size >> 2) & 0xFFF0);
+               reg = (rx_pba_size - hw->fc.high_water) << 10;
                if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
                    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
                        reg |= IXGBE_FCRTH_FCEN;
index 05f224715073cf46e42e52db91a1b57fca226ddc..374e1f74d0f51c20bbfe6e245d8500f90a015dab 100644 (file)
@@ -251,19 +251,17 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
 
        /* Configure PFC Tx thresholds per TC */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-               if (dcb_config->rx_pba_cfg == pba_equal)
-                       rx_pba_size = IXGBE_RXPBSIZE_64KB;
-               else
-                       rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
-                                             : IXGBE_RXPBSIZE_48KB;
+               rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+               rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+
+               reg = (rx_pba_size - hw->fc.low_water) << 10;
 
-               reg = ((rx_pba_size >> 5) & 0xFFE0);
                if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
                    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
                        reg |= IXGBE_FCRTL_XONE;
                IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
 
-               reg = ((rx_pba_size >> 2) & 0xFFE0);
+               reg = (rx_pba_size - hw->fc.high_water) << 10;
                if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
                    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
                        reg |= IXGBE_FCRTH_FCEN;
index b53b465e24af471f32e72d2d191d98751d43b6d3..bf566e8a455e485978c66d8478166b0c885e64af 100644 (file)
@@ -130,15 +130,21 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
                        netdev->netdev_ops->ndo_stop(netdev);
                ixgbe_clear_interrupt_scheme(adapter);
 
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+               switch (adapter->hw.mac.type) {
+               case ixgbe_mac_82598EB:
                        adapter->last_lfc_mode = adapter->hw.fc.current_mode;
                        adapter->hw.fc.requested_mode = ixgbe_fc_none;
-               }
-               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
-               if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+                       break;
+               case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
                        adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
                        adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+                       break;
+               default:
+                       break;
                }
+
                adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
                ixgbe_init_interrupt_scheme(adapter);
                if (netif_running(netdev))
@@ -155,8 +161,14 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
                        adapter->dcb_cfg.pfc_mode_enable = false;
                        adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
                        adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
-                       if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+                       switch (adapter->hw.mac.type) {
+                       case ixgbe_mac_82599EB:
+                       case ixgbe_mac_X540:
                                adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+                               break;
+                       default:
+                               break;
+                       }
 
                        ixgbe_init_interrupt_scheme(adapter);
                        if (netif_running(netdev))
@@ -178,9 +190,14 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
        for (i = 0; i < netdev->addr_len; i++)
                perm_addr[i] = adapter->hw.mac.perm_addr[i];
 
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                for (j = 0; j < netdev->addr_len; j++, i++)
                        perm_addr[i] = adapter->hw.mac.san_addr[j];
+               break;
+       default:
+               break;
        }
 }
 
@@ -366,15 +383,29 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
        }
 
        if (adapter->dcb_cfg.pfc_mode_enable) {
-               if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
-                       (adapter->hw.fc.current_mode != ixgbe_fc_pfc))
-                       adapter->last_lfc_mode = adapter->hw.fc.current_mode;
+               switch (adapter->hw.mac.type) {
+               case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+                       if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
+                               adapter->last_lfc_mode =
+                                                 adapter->hw.fc.current_mode;
+                       break;
+               default:
+                       break;
+               }
                adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
        } else {
-               if (adapter->hw.mac.type != ixgbe_mac_82598EB)
-                       adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
-               else
+               switch (adapter->hw.mac.type) {
+               case ixgbe_mac_82598EB:
                        adapter->hw.fc.requested_mode = ixgbe_fc_none;
+                       break;
+               case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+                       adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+                       break;
+               default:
+                       break;
+               }
        }
 
        if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
index 3dc731c22ff2480af0a068cea4dfe0a677486407..f9b58394fbb6a90afc149b54e9098c6aad7ca4f4 100644 (file)
@@ -185,6 +185,16 @@ static int ixgbe_get_settings(struct net_device *netdev,
                                             ADVERTISED_FIBRE);
                        ecmd->port = PORT_FIBRE;
                        ecmd->autoneg = AUTONEG_DISABLE;
+               } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
+                          (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
+                       ecmd->supported |= (SUPPORTED_1000baseT_Full |
+                                           SUPPORTED_Autoneg |
+                                           SUPPORTED_FIBRE);
+                       ecmd->advertising = (ADVERTISED_10000baseT_Full |
+                                            ADVERTISED_1000baseT_Full |
+                                            ADVERTISED_Autoneg |
+                                            ADVERTISED_FIBRE);
+                       ecmd->port = PORT_FIBRE;
                } else {
                        ecmd->supported |= (SUPPORTED_1000baseT_Full |
                                            SUPPORTED_FIBRE);
@@ -204,6 +214,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
        /* Get PHY type */
        switch (adapter->hw.phy.type) {
        case ixgbe_phy_tn:
+       case ixgbe_phy_aq:
        case ixgbe_phy_cu_unknown:
                /* Copper 10G-BASET */
                ecmd->port = PORT_TP;
@@ -332,13 +343,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
        else
                pause->autoneg = 1;
 
-#ifdef CONFIG_DCB
-       if (hw->fc.current_mode == ixgbe_fc_pfc) {
-               pause->rx_pause = 0;
-               pause->tx_pause = 0;
-       }
-
-#endif
        if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
                pause->rx_pause = 1;
        } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
@@ -346,6 +350,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
        } else if (hw->fc.current_mode == ixgbe_fc_full) {
                pause->rx_pause = 1;
                pause->tx_pause = 1;
+#ifdef CONFIG_DCB
+       } else if (hw->fc.current_mode == ixgbe_fc_pfc) {
+               pause->rx_pause = 0;
+               pause->tx_pause = 0;
+#endif
        }
 }
 
@@ -363,7 +372,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
                return -EINVAL;
 
 #endif
-
        fc = hw->fc;
 
        if (pause->autoneg != AUTONEG_ENABLE)
@@ -412,11 +420,6 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
        else
                adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
 
-       if (netif_running(netdev))
-               ixgbe_reinit_locked(adapter);
-       else
-               ixgbe_reset(adapter);
-
        return 0;
 }
 
@@ -428,16 +431,21 @@ static u32 ixgbe_get_tx_csum(struct net_device *netdev)
 static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       u32 feature_list;
 
-       if (data) {
-               netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-               if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-                       netdev->features |= NETIF_F_SCTP_CSUM;
-       } else {
-               netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-               if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-                       netdev->features &= ~NETIF_F_SCTP_CSUM;
+       feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               feature_list |= NETIF_F_SCTP_CSUM;
+               break;
+       default:
+               break;
        }
+       if (data)
+               netdev->features |= feature_list;
+       else
+               netdev->features &= ~feature_list;
 
        return 0;
 }
@@ -530,10 +538,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
        regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
        regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
        regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
-       for (i = 0; i < 8; i++)
-               regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
-       for (i = 0; i < 8; i++)
-               regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
+       for (i = 0; i < 8; i++) {
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
+                       regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
+                       break;
+               case ixgbe_mac_82599EB:
+                       regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
+                       regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+                       break;
+               default:
+                       break;
+               }
+       }
        regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
        regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
 
@@ -615,6 +633,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
        regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
        regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
 
+       /* DCB */
        regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
        regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
        regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
@@ -905,13 +924,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                        memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
                               sizeof(struct ixgbe_ring));
                        temp_tx_ring[i].count = new_tx_count;
-                       err = ixgbe_setup_tx_resources(adapter,
-                                                      &temp_tx_ring[i]);
+                       err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
                        if (err) {
                                while (i) {
                                        i--;
-                                       ixgbe_free_tx_resources(adapter,
-                                                             &temp_tx_ring[i]);
+                                       ixgbe_free_tx_resources(&temp_tx_ring[i]);
                                }
                                goto clear_reset;
                        }
@@ -930,13 +947,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                        memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
                               sizeof(struct ixgbe_ring));
                        temp_rx_ring[i].count = new_rx_count;
-                       err = ixgbe_setup_rx_resources(adapter,
-                                                      &temp_rx_ring[i]);
+                       err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
                        if (err) {
                                while (i) {
                                        i--;
-                                       ixgbe_free_rx_resources(adapter,
-                                                             &temp_rx_ring[i]);
+                                       ixgbe_free_rx_resources(&temp_rx_ring[i]);
                                }
                                goto err_setup;
                        }
@@ -951,8 +966,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                /* tx */
                if (new_tx_count != adapter->tx_ring_count) {
                        for (i = 0; i < adapter->num_tx_queues; i++) {
-                               ixgbe_free_tx_resources(adapter,
-                                                       adapter->tx_ring[i]);
+                               ixgbe_free_tx_resources(adapter->tx_ring[i]);
                                memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
                                       sizeof(struct ixgbe_ring));
                        }
@@ -962,8 +976,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                /* rx */
                if (new_rx_count != adapter->rx_ring_count) {
                        for (i = 0; i < adapter->num_rx_queues; i++) {
-                               ixgbe_free_rx_resources(adapter,
-                                                       adapter->rx_ring[i]);
+                               ixgbe_free_rx_resources(adapter->rx_ring[i]);
                                memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
                                       sizeof(struct ixgbe_ring));
                        }
@@ -1237,12 +1250,20 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
        u32 value, before, after;
        u32 i, toggle;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               toggle = 0x7FFFF30F;
-               test = reg_test_82599;
-       } else {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                toggle = 0x7FFFF3FF;
                test = reg_test_82598;
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               toggle = 0x7FFFF30F;
+               test = reg_test_82599;
+               break;
+       default:
+               *data = 1;
+               return 1;
+               break;
        }
 
        /*
@@ -1460,16 +1481,21 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
        reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
        IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
 
-       if (hw->mac.type == ixgbe_mac_82599EB) {
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
                reg_ctl &= ~IXGBE_DMATXCTL_TE;
                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
+               break;
+       default:
+               break;
        }
 
        ixgbe_reset(adapter);
 
-       ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring);
-       ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring);
+       ixgbe_free_tx_resources(&adapter->test_tx_ring);
+       ixgbe_free_rx_resources(&adapter->test_rx_ring);
 }
 
 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
@@ -1483,17 +1509,24 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
        /* Setup Tx descriptor ring and Tx buffers */
        tx_ring->count = IXGBE_DEFAULT_TXD;
        tx_ring->queue_index = 0;
+       tx_ring->dev = &adapter->pdev->dev;
+       tx_ring->netdev = adapter->netdev;
        tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
        tx_ring->numa_node = adapter->node;
 
-       err = ixgbe_setup_tx_resources(adapter, tx_ring);
+       err = ixgbe_setup_tx_resources(tx_ring);
        if (err)
                return 1;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
                reg_data |= IXGBE_DMATXCTL_TE;
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
+               break;
+       default:
+               break;
        }
 
        ixgbe_configure_tx_ring(adapter, tx_ring);
@@ -1501,11 +1534,13 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
        /* Setup Rx Descriptor ring and Rx buffers */
        rx_ring->count = IXGBE_DEFAULT_RXD;
        rx_ring->queue_index = 0;
+       rx_ring->dev = &adapter->pdev->dev;
+       rx_ring->netdev = adapter->netdev;
        rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
        rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
        rx_ring->numa_node = adapter->node;
 
-       err = ixgbe_setup_rx_resources(adapter, rx_ring);
+       err = ixgbe_setup_rx_resources(rx_ring);
        if (err) {
                ret_val = 4;
                goto err_nomem;
@@ -1604,8 +1639,7 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
        return 13;
 }
 
-static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
-                                  struct ixgbe_ring *rx_ring,
+static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
                                   struct ixgbe_ring *tx_ring,
                                   unsigned int size)
 {
@@ -1627,7 +1661,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
                rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
 
                /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
-               dma_unmap_single(&adapter->pdev->dev,
+               dma_unmap_single(rx_ring->dev,
                                 rx_buffer_info->dma,
                                 bufsz,
                                 DMA_FROM_DEVICE);
@@ -1639,7 +1673,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
 
                /* unmap buffer on Tx side */
                tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
-               ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
 
                /* increment Rx/Tx next to clean counters */
                rx_ntc++;
@@ -1655,7 +1689,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
        }
 
        /* re-map buffers to ring, store next to clean values */
-       ixgbe_alloc_rx_buffers(adapter, rx_ring, count);
+       ixgbe_alloc_rx_buffers(rx_ring, count);
        rx_ring->next_to_clean = rx_ntc;
        tx_ring->next_to_clean = tx_ntc;
 
@@ -1699,7 +1733,6 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
                for (i = 0; i < 64; i++) {
                        skb_get(skb);
                        tx_ret_val = ixgbe_xmit_frame_ring(skb,
-                                                          adapter->netdev,
                                                           adapter,
                                                           tx_ring);
                        if (tx_ret_val == NETDEV_TX_OK)
@@ -1714,8 +1747,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
                /* allow 200 milliseconds for packets to go from Tx to Rx */
                msleep(200);
 
-               good_cnt = ixgbe_clean_test_rings(adapter, rx_ring,
-                                                 tx_ring, size);
+               good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
                if (good_cnt != 64) {
                        ret_val = 13;
                        break;
@@ -1848,6 +1880,13 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
        int retval = 1;
 
        switch(hw->device_id) {
+       case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+               /* All except this subdevice support WOL */
+               if (hw->subsystem_device_id ==
+                   IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
+                       wol->supported = 0;
+                       break;
+               }
        case IXGBE_DEV_ID_82599_KX4:
                retval = 0;
                break;
@@ -1985,6 +2024,41 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
        return 0;
 }
 
+/*
+ * this function must be called before setting the new value of
+ * rx_itr_setting
+ */
+static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
+                            struct ethtool_coalesce *ec)
+{
+       struct net_device *netdev = adapter->netdev;
+
+       if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
+               return false;
+
+       /* if interrupt rate is too high then disable RSC */
+       if (ec->rx_coalesce_usecs != 1 &&
+           ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) {
+               if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+                       e_info(probe, "rx-usecs set too low, "
+                                     "disabling RSC\n");
+                       adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+                       return true;
+               }
+       } else {
+               /* check the feature flag value and enable RSC if necessary */
+               if ((netdev->features & NETIF_F_LRO) &&
+                   !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
+                       e_info(probe, "rx-usecs set to %d, "
+                                     "re-enabling RSC\n",
+                              ec->rx_coalesce_usecs);
+                       adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+                       return true;
+               }
+       }
+       return false;
+}
+
 static int ixgbe_set_coalesce(struct net_device *netdev,
                               struct ethtool_coalesce *ec)
 {
@@ -2002,17 +2076,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
                adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
 
        if (ec->rx_coalesce_usecs > 1) {
-               u32 max_int;
-               if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
-                       max_int = IXGBE_MAX_RSC_INT_RATE;
-               else
-                       max_int = IXGBE_MAX_INT_RATE;
-
                /* check the limits */
-               if ((1000000/ec->rx_coalesce_usecs > max_int) ||
+               if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
                    (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
                        return -EINVAL;
 
+               /* check the old value and enable RSC if necessary */
+               need_reset = ixgbe_update_rsc(adapter, ec);
+
                /* store the value in ints/second */
                adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
 
@@ -2021,32 +2092,21 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
                /* clear the lower bit as its used for dynamic state */
                adapter->rx_itr_setting &= ~1;
        } else if (ec->rx_coalesce_usecs == 1) {
+               /* check the old value and enable RSC if necessary */
+               need_reset = ixgbe_update_rsc(adapter, ec);
+
                /* 1 means dynamic mode */
                adapter->rx_eitr_param = 20000;
                adapter->rx_itr_setting = 1;
        } else {
+               /* check the old value and enable RSC if necessary */
+               need_reset = ixgbe_update_rsc(adapter, ec);
                /*
                 * any other value means disable eitr, which is best
                 * served by setting the interrupt rate very high
                 */
                adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
                adapter->rx_itr_setting = 0;
-
-               /*
-                * if hardware RSC is enabled, disable it when
-                * setting low latency mode, to avoid errata, assuming
-                * that when the user set low latency mode they want
-                * it at the cost of anything else
-                */
-               if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
-                       adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
-                       if (netdev->features & NETIF_F_LRO) {
-                               netdev->features &= ~NETIF_F_LRO;
-                               e_info(probe, "rx-usecs set to 0, "
-                                      "disabling RSC\n");
-                       }
-                       need_reset = true;
-               }
        }
 
        if (ec->tx_coalesce_usecs > 1) {
@@ -2133,28 +2193,39 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
                return rc;
 
        /* if state changes we need to update adapter->flags and reset */
-       if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
-               /*
-                * cast both to bool and verify if they are set the same
-                * but only enable RSC if itr is non-zero, as
-                * itr=0 and RSC are mutually exclusive
-                */
-               if (((!!(data & ETH_FLAG_LRO)) !=
-                    (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) &&
-                   adapter->rx_itr_setting) {
+       if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
+           (!!(data & ETH_FLAG_LRO) !=
+            !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
+               if ((data & ETH_FLAG_LRO) &&
+                   (!adapter->rx_itr_setting ||
+                    (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
+                       e_info(probe, "rx-usecs set too low, "
+                                     "not enabling RSC.\n");
+               } else {
                        adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
                        switch (adapter->hw.mac.type) {
                        case ixgbe_mac_82599EB:
                                need_reset = true;
                                break;
+                       case ixgbe_mac_X540: {
+                               int i;
+                               for (i = 0; i < adapter->num_rx_queues; i++) {
+                                       struct ixgbe_ring *ring =
+                                                         adapter->rx_ring[i];
+                                       if (adapter->flags2 &
+                                           IXGBE_FLAG2_RSC_ENABLED) {
+                                               ixgbe_configure_rscctl(adapter,
+                                                                      ring);
+                                       } else {
+                                               ixgbe_clear_rscctl(adapter,
+                                                                  ring);
+                                       }
+                               }
+                       }
+                               break;
                        default:
                                break;
                        }
-               } else if (!adapter->rx_itr_setting) {
-                       netdev->features &= ~NETIF_F_LRO;
-                       if (data & ETH_FLAG_LRO)
-                               e_info(probe, "rx-usecs set to 0, "
-                                      "LRO/RSC cannot be enabled.\n");
                }
        }
 
index 05efa6a8ce8e080c6cea2d85e31bcfd09728e8dd..6342d4859790804a83981f65175d7653bb3f19a5 100644 (file)
@@ -68,7 +68,7 @@ static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
 {
        ddp->len = 0;
-       ddp->err = 0;
+       ddp->err = 1;
        ddp->udl = NULL;
        ddp->udp = 0UL;
        ddp->sgl = NULL;
@@ -92,6 +92,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
        struct ixgbe_fcoe *fcoe;
        struct ixgbe_adapter *adapter;
        struct ixgbe_fcoe_ddp *ddp;
+       u32 fcbuff;
 
        if (!netdev)
                goto out_ddp_put;
@@ -115,7 +116,14 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
                                (xid | IXGBE_FCDMARW_WE));
+
+               /* guaranteed to be invalidated after 100us */
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
+                               (xid | IXGBE_FCDMARW_RE));
+               fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
                spin_unlock_bh(&fcoe->lock);
+               if (fcbuff & IXGBE_FCBUFF_VALID)
+                       udelay(100);
        }
        if (ddp->sgl)
                pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
@@ -168,6 +176,11 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
                return 0;
        }
 
+       /* no DDP if we are already down or resetting */
+       if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+           test_bit(__IXGBE_RESETTING, &adapter->state))
+               return 0;
+
        fcoe = &adapter->fcoe;
        if (!fcoe->pool) {
                e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
index fbad4d819608f234d0eff25175bccf9a8545c20b..5409af3da06c0d107c0240e666130eb4e076a503 100644 (file)
@@ -59,6 +59,7 @@ static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
        [board_82598] = &ixgbe_82598_info,
        [board_82599] = &ixgbe_82599_info,
+       [board_X540] = &ixgbe_X540_info,
 };
 
 /* ixgbe_pci_tbl - PCI Device ID Table
@@ -112,6 +113,8 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
         board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
         board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
+        board_82599 },
 
        /* required last entry */
        {0, }
@@ -560,6 +563,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
                IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
                break;
        case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                if (direction == -1) {
                        /* other causes */
                        msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -589,29 +593,34 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
 {
        u32 mask;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
-       } else {
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                mask = (qmask & 0xFFFFFFFF);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
                mask = (qmask >> 32);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+               break;
+       default:
+               break;
        }
 }
 
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
-                                     struct ixgbe_tx_buffer
-                                     *tx_buffer_info)
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
+                                     struct ixgbe_tx_buffer *tx_buffer_info)
 {
        if (tx_buffer_info->dma) {
                if (tx_buffer_info->mapped_as_page)
-                       dma_unmap_page(&adapter->pdev->dev,
+                       dma_unmap_page(tx_ring->dev,
                                       tx_buffer_info->dma,
                                       tx_buffer_info->length,
                                       DMA_TO_DEVICE);
                else
-                       dma_unmap_single(&adapter->pdev->dev,
+                       dma_unmap_single(tx_ring->dev,
                                         tx_buffer_info->dma,
                                         tx_buffer_info->length,
                                         DMA_TO_DEVICE);
@@ -626,92 +635,166 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
 }
 
 /**
- * ixgbe_tx_xon_state - check the tx ring xon state
- * @adapter: the ixgbe adapter
- * @tx_ring: the corresponding tx_ring
+ * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
+ * @adapter: driver private struct
+ * @index: reg idx of queue to query (0-127)
  *
- * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
- * corresponding TC of this tx_ring when checking TFCS.
+ * Helper function to determine the traffic index for a paticular
+ * register index.
  *
- * Returns : true if in xon state (currently not paused)
+ * Returns : a tc index for use in range 0-7, or 0-3
  */
-static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
-                                     struct ixgbe_ring *tx_ring)
+u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
 {
-       u32 txoff = IXGBE_TFCS_TXOFF;
+       int tc = -1;
+       int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
 
-#ifdef CONFIG_IXGBE_DCB
-       if (adapter->dcb_cfg.pfc_mode_enable) {
-               int tc;
-               int reg_idx = tx_ring->reg_idx;
-               int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+       /* if DCB is not enabled the queues have no TC */
+       if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+               return tc;
+
+       /* check valid range */
+       if (reg_idx >= adapter->hw.mac.max_tx_queues)
+               return tc;
 
-               switch (adapter->hw.mac.type) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               tc = reg_idx >> 2;
+               break;
+       default:
+               if (dcb_i != 4 && dcb_i != 8)
+                       break;
+
+               /* if VMDq is enabled the lowest order bits determine TC */
+               if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+                                     IXGBE_FLAG_VMDQ_ENABLED)) {
+                       tc = reg_idx & (dcb_i - 1);
+                       break;
+               }
+
+               /*
+                * Convert the reg_idx into the correct TC. This bitmask
+                * targets the last full 32 ring traffic class and assigns
+                * it a value of 1. From there the rest of the rings are
+                * based on shifting the mask further up to include the
+                * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
+                * will only ever be 8 or 4 and that reg_idx will never
+                * be greater then 128. The code without the power of 2
+                * optimizations would be:
+                * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
+                */
+               tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
+               tc >>= 9 - (reg_idx >> 5);
+       }
+
+       return tc;
+}
+
+static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_hw_stats *hwstats = &adapter->stats;
+       u32 data = 0;
+       u32 xoff[8] = {0};
+       int i;
+
+       if ((hw->fc.current_mode == ixgbe_fc_full) ||
+           (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
+               switch (hw->mac.type) {
                case ixgbe_mac_82598EB:
-                       tc = reg_idx >> 2;
-                       txoff = IXGBE_TFCS_TXOFF0;
+                       data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
                        break;
-               case ixgbe_mac_82599EB:
-                       tc = 0;
-                       txoff = IXGBE_TFCS_TXOFF;
-                       if (dcb_i == 8) {
-                               /* TC0, TC1 */
-                               tc = reg_idx >> 5;
-                               if (tc == 2) /* TC2, TC3 */
-                                       tc += (reg_idx - 64) >> 4;
-                               else if (tc == 3) /* TC4, TC5, TC6, TC7 */
-                                       tc += 1 + ((reg_idx - 96) >> 3);
-                       } else if (dcb_i == 4) {
-                               /* TC0, TC1 */
-                               tc = reg_idx >> 6;
-                               if (tc == 1) {
-                                       tc += (reg_idx - 64) >> 5;
-                                       if (tc == 2) /* TC2, TC3 */
-                                               tc += (reg_idx - 96) >> 4;
-                               }
-                       }
+               default:
+                       data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+               }
+               hwstats->lxoffrxc += data;
+
+               /* refill credits (no tx hang) if we received xoff */
+               if (!data)
+                       return;
+
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       clear_bit(__IXGBE_HANG_CHECK_ARMED,
+                                 &adapter->tx_ring[i]->state);
+               return;
+       } else if (!(adapter->dcb_cfg.pfc_mode_enable))
+               return;
+
+       /* update stats for each tc, only valid with PFC enabled */
+       for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
                        break;
                default:
-                       tc = 0;
+                       xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
                }
-               txoff <<= tc;
+               hwstats->pxoffrxc[i] += xoff[i];
        }
-#endif
-       return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
+
+       /* disarm tx queues that have received xoff frames */
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+               u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
+
+               if (xoff[tc])
+                       clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
+       }
+}
+
+static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
+{
+       return ring->tx_stats.completed;
 }
 
-static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
-                                      struct ixgbe_ring *tx_ring,
-                                      unsigned int eop)
+static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
 {
+       struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
        struct ixgbe_hw *hw = &adapter->hw;
 
-       /* Detect a transmit hang in hardware, this serializes the
-        * check with the clearing of time_stamp and movement of eop */
-       adapter->detect_tx_hung = false;
-       if (tx_ring->tx_buffer_info[eop].time_stamp &&
-           time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
-           ixgbe_tx_xon_state(adapter, tx_ring)) {
-               /* detected Tx unit hang */
-               union ixgbe_adv_tx_desc *tx_desc;
-               tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
-               e_err(drv, "Detected Tx Unit Hang\n"
-                     "  Tx Queue             <%d>\n"
-                     "  TDH, TDT             <%x>, <%x>\n"
-                     "  next_to_use          <%x>\n"
-                     "  next_to_clean        <%x>\n"
-                     "tx_buffer_info[next_to_clean]\n"
-                     "  time_stamp           <%lx>\n"
-                     "  jiffies              <%lx>\n",
-                     tx_ring->queue_index,
-                     IXGBE_READ_REG(hw, tx_ring->head),
-                     IXGBE_READ_REG(hw, tx_ring->tail),
-                     tx_ring->next_to_use, eop,
-                     tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
-               return true;
+       u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
+       u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
+
+       if (head != tail)
+               return (head < tail) ?
+                       tail - head : (tail + ring->count - head);
+
+       return 0;
+}
+
+static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+{
+       u32 tx_done = ixgbe_get_tx_completed(tx_ring);
+       u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
+       u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
+       bool ret = false;
+
+       clear_check_for_tx_hang(tx_ring);
+
+       /*
+        * Check for a hung queue, but be thorough. This verifies
+        * that a transmit has been completed since the previous
+        * check AND there is at least one packet pending. The
+        * ARMED bit is set to indicate a potential hang. The
+        * bit is cleared if a pause frame is received to remove
+        * false hang detection due to PFC or 802.3x frames. By
+        * requiring this to fail twice we avoid races with
+        * pfc clearing the ARMED bit and conditions where we
+        * run the check_tx_hang logic with a transmit completion
+        * pending but without time to complete it yet.
+        */
+       if ((tx_done_old == tx_done) && tx_pending) {
+               /* make sure it is true for two checks in a row */
+               ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
+                                      &tx_ring->state);
+       } else {
+               /* update completed stats and continue */
+               tx_ring->tx_stats.tx_done_old = tx_done;
+               /* reset the countdown */
+               clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
        }
 
-       return false;
+       return ret;
 }
 
 #define IXGBE_MAX_TXD_PWR       14
@@ -734,11 +817,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                               struct ixgbe_ring *tx_ring)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct net_device *netdev = adapter->netdev;
        union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
        struct ixgbe_tx_buffer *tx_buffer_info;
-       unsigned int i, eop, count = 0;
        unsigned int total_bytes = 0, total_packets = 0;
+       u16 i, eop, count = 0;
 
        i = tx_ring->next_to_clean;
        eop = tx_ring->tx_buffer_info[i].next_to_watch;
@@ -749,148 +831,182 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                bool cleaned = false;
                rmb(); /* read buffer_info after eop_desc */
                for ( ; !cleaned; count++) {
-                       struct sk_buff *skb;
                        tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
                        tx_buffer_info = &tx_ring->tx_buffer_info[i];
-                       cleaned = (i == eop);
-                       skb = tx_buffer_info->skb;
-
-                       if (cleaned && skb) {
-                               unsigned int segs, bytecount;
-                               unsigned int hlen = skb_headlen(skb);
-
-                               /* gso_segs is currently only valid for tcp */
-                               segs = skb_shinfo(skb)->gso_segs ?: 1;
-#ifdef IXGBE_FCOE
-                               /* adjust for FCoE Sequence Offload */
-                               if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
-                                   && skb_is_gso(skb)
-                                   && vlan_get_protocol(skb) ==
-                                   htons(ETH_P_FCOE)) {
-                                       hlen = skb_transport_offset(skb) +
-                                               sizeof(struct fc_frame_header) +
-                                               sizeof(struct fcoe_crc_eof);
-                                       segs = DIV_ROUND_UP(skb->len - hlen,
-                                               skb_shinfo(skb)->gso_size);
-                               }
-#endif /* IXGBE_FCOE */
-                               /* multiply data chunks by size of headers */
-                               bytecount = ((segs - 1) * hlen) + skb->len;
-                               total_packets += segs;
-                               total_bytes += bytecount;
-                       }
-
-                       ixgbe_unmap_and_free_tx_resource(adapter,
-                                                        tx_buffer_info);
 
                        tx_desc->wb.status = 0;
+                       cleaned = (i == eop);
 
                        i++;
                        if (i == tx_ring->count)
                                i = 0;
+
+                       if (cleaned && tx_buffer_info->skb) {
+                               total_bytes += tx_buffer_info->bytecount;
+                               total_packets += tx_buffer_info->gso_segs;
+                       }
+
+                       ixgbe_unmap_and_free_tx_resource(tx_ring,
+                                                        tx_buffer_info);
                }
 
+               tx_ring->tx_stats.completed++;
                eop = tx_ring->tx_buffer_info[i].next_to_watch;
                eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
        }
 
        tx_ring->next_to_clean = i;
+       tx_ring->total_bytes += total_bytes;
+       tx_ring->total_packets += total_packets;
+       u64_stats_update_begin(&tx_ring->syncp);
+       tx_ring->stats.packets += total_packets;
+       tx_ring->stats.bytes += total_bytes;
+       u64_stats_update_end(&tx_ring->syncp);
+
+       if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
+               /* schedule immediate reset if we believe we hung */
+               struct ixgbe_hw *hw = &adapter->hw;
+               tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
+               e_err(drv, "Detected Tx Unit Hang\n"
+                       "  Tx Queue             <%d>\n"
+                       "  TDH, TDT             <%x>, <%x>\n"
+                       "  next_to_use          <%x>\n"
+                       "  next_to_clean        <%x>\n"
+                       "tx_buffer_info[next_to_clean]\n"
+                       "  time_stamp           <%lx>\n"
+                       "  jiffies              <%lx>\n",
+                       tx_ring->queue_index,
+                       IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+                       IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
+                       tx_ring->next_to_use, eop,
+                       tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+
+               netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+               e_info(probe,
+                      "tx hang %d detected on queue %d, resetting adapter\n",
+                       adapter->tx_timeout_count + 1, tx_ring->queue_index);
+
+               /* schedule immediate reset if we believe we hung */
+               ixgbe_tx_timeout(adapter->netdev);
+
+               /* the adapter is about to reset, no point in enabling stuff */
+               return true;
+       }
 
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
-       if (unlikely(count && netif_carrier_ok(netdev) &&
+       if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
                     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
                 */
                smp_mb();
-               if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+               if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
                    !test_bit(__IXGBE_DOWN, &adapter->state)) {
-                       netif_wake_subqueue(netdev, tx_ring->queue_index);
-                       ++tx_ring->restart_queue;
-               }
-       }
-
-       if (adapter->detect_tx_hung) {
-               if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
-                       /* schedule immediate reset if we believe we hung */
-                       e_info(probe, "tx hang %d detected, resetting "
-                              "adapter\n", adapter->tx_timeout_count + 1);
-                       ixgbe_tx_timeout(adapter->netdev);
+                       netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
+                       ++tx_ring->tx_stats.restart_queue;
                }
        }
 
-       /* re-arm the interrupt */
-       if (count >= tx_ring->work_limit)
-               ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
-
-       tx_ring->total_bytes += total_bytes;
-       tx_ring->total_packets += total_packets;
-       u64_stats_update_begin(&tx_ring->syncp);
-       tx_ring->stats.packets += total_packets;
-       tx_ring->stats.bytes += total_bytes;
-       u64_stats_update_end(&tx_ring->syncp);
        return count < tx_ring->work_limit;
 }
 
 #ifdef CONFIG_IXGBE_DCA
 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
-                               struct ixgbe_ring *rx_ring)
+                               struct ixgbe_ring *rx_ring,
+                               int cpu)
 {
+       struct ixgbe_hw *hw = &adapter->hw;
        u32 rxctrl;
-       int cpu = get_cpu();
-       int q = rx_ring->reg_idx;
-
-       if (rx_ring->cpu != cpu) {
-               rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-                       rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
-                       rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-               } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-                       rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
-                       rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
-                                  IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
-               }
-               rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
-               rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
-               rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
-               rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
-                           IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
-               rx_ring->cpu = cpu;
+       u8 reg_idx = rx_ring->reg_idx;
+
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
+               rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
+               rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+                          IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
+               break;
+       default:
+               break;
        }
-       put_cpu();
+       rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+       rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
+       rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+       rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+                   IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+       IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
 }
 
 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
-                               struct ixgbe_ring *tx_ring)
+                               struct ixgbe_ring *tx_ring,
+                               int cpu)
 {
+       struct ixgbe_hw *hw = &adapter->hw;
        u32 txctrl;
+       u8 reg_idx = tx_ring->reg_idx;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
+               txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
+               txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+               txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+               txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
+               txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
+               txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+                          IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+               txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+               txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
+               break;
+       default:
+               break;
+       }
+}
+
+static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
+{
+       struct ixgbe_adapter *adapter = q_vector->adapter;
        int cpu = get_cpu();
-       int q = tx_ring->reg_idx;
-       struct ixgbe_hw *hw = &adapter->hw;
+       long r_idx;
+       int i;
 
-       if (tx_ring->cpu != cpu) {
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
-                       txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
-                       txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-                       txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
-                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
-               } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
-                       txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
-                       txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
-                                 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
-                       txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
-                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
-               }
-               tx_ring->cpu = cpu;
+       if (q_vector->cpu == cpu)
+               goto out_no_update;
+
+       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+       for (i = 0; i < q_vector->txr_count; i++) {
+               ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
+               r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+                                     r_idx + 1);
+       }
+
+       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+       for (i = 0; i < q_vector->rxr_count; i++) {
+               ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
+               r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+                                     r_idx + 1);
        }
+
+       q_vector->cpu = cpu;
+out_no_update:
        put_cpu();
 }
 
 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
 {
+       int num_q_vectors;
        int i;
 
        if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -899,22 +1015,25 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
        /* always use CB2 mode, difference is masked in the CB driver */
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
 
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               adapter->tx_ring[i]->cpu = -1;
-               ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
-       }
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               adapter->rx_ring[i]->cpu = -1;
-               ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+               num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+       else
+               num_q_vectors = 1;
+
+       for (i = 0; i < num_q_vectors; i++) {
+               adapter->q_vector[i]->cpu = -1;
+               ixgbe_update_dca(adapter->q_vector[i]);
        }
 }
 
 static int __ixgbe_notify_dca(struct device *dev, void *data)
 {
-       struct net_device *netdev = dev_get_drvdata(dev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
        unsigned long event = *(unsigned long *)data;
 
+       if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+               return 0;
+
        switch (event) {
        case DCA_PROVIDER_ADD:
                /* if we're already enabled, don't do it again */
@@ -1013,8 +1132,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
-static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
-                                        struct ixgbe_ring *rx_ring, u32 val)
+static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
 {
        /*
         * Force memory writes to complete before letting h/w
@@ -1023,72 +1141,81 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
         * such as IA-64).
         */
        wmb();
-       IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
+       writel(val, rx_ring->tail);
 }
 
 /**
  * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
  **/
-void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
-                           struct ixgbe_ring *rx_ring,
-                           int cleaned_count)
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
 {
-       struct net_device *netdev = adapter->netdev;
-       struct pci_dev *pdev = adapter->pdev;
        union ixgbe_adv_rx_desc *rx_desc;
        struct ixgbe_rx_buffer *bi;
-       unsigned int i;
-       unsigned int bufsz = rx_ring->rx_buf_len;
+       struct sk_buff *skb;
+       u16 i = rx_ring->next_to_use;
 
-       i = rx_ring->next_to_use;
-       bi = &rx_ring->rx_buffer_info[i];
+       /* do nothing if no valid netdev defined */
+       if (!rx_ring->netdev)
+               return;
 
        while (cleaned_count--) {
                rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+               bi = &rx_ring->rx_buffer_info[i];
+               skb = bi->skb;
 
-               if (!bi->page_dma &&
-                   (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
-                       if (!bi->page) {
-                               bi->page = netdev_alloc_page(netdev);
-                               if (!bi->page) {
-                                       adapter->alloc_rx_page_failed++;
-                                       goto no_buffers;
-                               }
-                               bi->page_offset = 0;
-                       } else {
-                               /* use a half page if we're re-using */
-                               bi->page_offset ^= (PAGE_SIZE / 2);
-                       }
-
-                       bi->page_dma = dma_map_page(&pdev->dev, bi->page,
-                                                   bi->page_offset,
-                                                   (PAGE_SIZE / 2),
-                                                   DMA_FROM_DEVICE);
-               }
-
-               if (!bi->skb) {
-                       struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
-                                                                       bufsz);
-                       bi->skb = skb;
-
+               if (!skb) {
+                       skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+                                                       rx_ring->rx_buf_len);
                        if (!skb) {
-                               adapter->alloc_rx_buff_failed++;
+                               rx_ring->rx_stats.alloc_rx_buff_failed++;
                                goto no_buffers;
                        }
                        /* initialize queue mapping */
                        skb_record_rx_queue(skb, rx_ring->queue_index);
+                       bi->skb = skb;
                }
 
                if (!bi->dma) {
-                       bi->dma = dma_map_single(&pdev->dev,
-                                                bi->skb->data,
+                       bi->dma = dma_map_single(rx_ring->dev,
+                                                skb->data,
                                                 rx_ring->rx_buf_len,
                                                 DMA_FROM_DEVICE);
+                       if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+                               rx_ring->rx_stats.alloc_rx_buff_failed++;
+                               bi->dma = 0;
+                               goto no_buffers;
+                       }
                }
-               /* Refresh the desc even if buffer_addrs didn't change because
-                * each write-back erases this info. */
-               if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+
+               if (ring_is_ps_enabled(rx_ring)) {
+                       if (!bi->page) {
+                               bi->page = netdev_alloc_page(rx_ring->netdev);
+                               if (!bi->page) {
+                                       rx_ring->rx_stats.alloc_rx_page_failed++;
+                                       goto no_buffers;
+                               }
+                       }
+
+                       if (!bi->page_dma) {
+                               /* use a half page if we're re-using */
+                               bi->page_offset ^= PAGE_SIZE / 2;
+                               bi->page_dma = dma_map_page(rx_ring->dev,
+                                                           bi->page,
+                                                           bi->page_offset,
+                                                           PAGE_SIZE / 2,
+                                                           DMA_FROM_DEVICE);
+                               if (dma_mapping_error(rx_ring->dev,
+                                                     bi->page_dma)) {
+                                       rx_ring->rx_stats.alloc_rx_page_failed++;
+                                       bi->page_dma = 0;
+                                       goto no_buffers;
+                               }
+                       }
+
+                       /* Refresh the desc even if buffer_addrs didn't change
+                        * because each write-back erases this info. */
                        rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
                        rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
                } else {
@@ -1099,56 +1226,48 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
                i++;
                if (i == rx_ring->count)
                        i = 0;
-               bi = &rx_ring->rx_buffer_info[i];
        }
 
 no_buffers:
        if (rx_ring->next_to_use != i) {
                rx_ring->next_to_use = i;
-               if (i-- == 0)
-                       i = (rx_ring->count - 1);
-
-               ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
+               ixgbe_release_rx_desc(rx_ring, i);
        }
 }
 
-static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
-{
-       return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
-}
-
-static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
-{
-       return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
-}
-
-static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
+static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
 {
-       return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
-               IXGBE_RXDADV_RSCCNT_MASK) >>
-               IXGBE_RXDADV_RSCCNT_SHIFT;
+       /* HW will not DMA in data larger than the given buffer, even if it
+        * parses the (NFS, of course) header to be larger.  In that case, it
+        * fills the header buffer and spills the rest into the page.
+        */
+       u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
+       u16 hlen = (hdr_info &  IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+                   IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+       if (hlen > IXGBE_RX_HDR_SIZE)
+               hlen = IXGBE_RX_HDR_SIZE;
+       return hlen;
 }
 
 /**
  * ixgbe_transform_rsc_queue - change rsc queue into a full packet
  * @skb: pointer to the last skb in the rsc queue
- * @count: pointer to number of packets coalesced in this context
  *
  * This function changes a queue full of hw rsc buffers into a completed
  * packet.  It uses the ->prev pointers to find the first packet and then
  * turns it into the frag list owner.
  **/
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
-                                                       u64 *count)
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
 {
        unsigned int frag_list_size = 0;
+       unsigned int skb_cnt = 1;
 
        while (skb->prev) {
                struct sk_buff *prev = skb->prev;
                frag_list_size += skb->len;
                skb->prev = NULL;
                skb = prev;
-               *count += 1;
+               skb_cnt++;
        }
 
        skb_shinfo(skb)->frag_list = skb->next;
@@ -1156,68 +1275,59 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
        skb->len += frag_list_size;
        skb->data_len += frag_list_size;
        skb->truesize += frag_list_size;
+       IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
+
        return skb;
 }
 
-struct ixgbe_rsc_cb {
-       dma_addr_t dma;
-       bool delay_unmap;
-};
-
-#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
+{
+       return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
+               IXGBE_RXDADV_RSCCNT_MASK);
+}
 
-static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                               struct ixgbe_ring *rx_ring,
                               int *work_done, int work_to_do)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct pci_dev *pdev = adapter->pdev;
        union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
        struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
        struct sk_buff *skb;
-       unsigned int i, rsc_count = 0;
-       u32 len, staterr;
-       u16 hdr_info;
-       bool cleaned = false;
-       int cleaned_count = 0;
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+       const int current_node = numa_node_id();
 #ifdef IXGBE_FCOE
        int ddp_bytes = 0;
 #endif /* IXGBE_FCOE */
+       u32 staterr;
+       u16 i;
+       u16 cleaned_count = 0;
+       bool pkt_is_rsc = false;
 
        i = rx_ring->next_to_clean;
        rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
-       rx_buffer_info = &rx_ring->rx_buffer_info[i];
 
        while (staterr & IXGBE_RXD_STAT_DD) {
                u32 upper_len = 0;
-               if (*work_done >= work_to_do)
-                       break;
-               (*work_done)++;
 
                rmb(); /* read descriptor and rx_buffer_info after status DD */
-               if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
-                       hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
-                       len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
-                              IXGBE_RXDADV_HDRBUFLEN_SHIFT;
-                       upper_len = le16_to_cpu(rx_desc->wb.upper.length);
-                       if ((len > IXGBE_RX_HDR_SIZE) ||
-                           (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
-                               len = IXGBE_RX_HDR_SIZE;
-               } else {
-                       len = le16_to_cpu(rx_desc->wb.upper.length);
-               }
 
-               cleaned = true;
+               rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
                skb = rx_buffer_info->skb;
-               prefetch(skb->data);
                rx_buffer_info->skb = NULL;
+               prefetch(skb->data);
 
+               if (ring_is_rsc_enabled(rx_ring))
+                       pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
+
+               /* if this is a skb from previous receive DMA will be 0 */
                if (rx_buffer_info->dma) {
-                       if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
-                           (!(staterr & IXGBE_RXD_STAT_EOP)) &&
-                                (!(skb->prev))) {
+                       u16 hlen;
+                       if (pkt_is_rsc &&
+                           !(staterr & IXGBE_RXD_STAT_EOP) &&
+                           !skb->prev) {
                                /*
                                 * When HWRSC is enabled, delay unmapping
                                 * of the first packet. It carries the
@@ -1228,29 +1338,42 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                IXGBE_RSC_CB(skb)->delay_unmap = true;
                                IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
                        } else {
-                               dma_unmap_single(&pdev->dev,
+                               dma_unmap_single(rx_ring->dev,
                                                 rx_buffer_info->dma,
                                                 rx_ring->rx_buf_len,
                                                 DMA_FROM_DEVICE);
                        }
                        rx_buffer_info->dma = 0;
-                       skb_put(skb, len);
+
+                       if (ring_is_ps_enabled(rx_ring)) {
+                               hlen = ixgbe_get_hlen(rx_desc);
+                               upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+                       } else {
+                               hlen = le16_to_cpu(rx_desc->wb.upper.length);
+                       }
+
+                       skb_put(skb, hlen);
+               } else {
+                       /* assume packet split since header is unmapped */
+                       upper_len = le16_to_cpu(rx_desc->wb.upper.length);
                }
 
                if (upper_len) {
-                       dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
-                                      PAGE_SIZE / 2, DMA_FROM_DEVICE);
+                       dma_unmap_page(rx_ring->dev,
+                                      rx_buffer_info->page_dma,
+                                      PAGE_SIZE / 2,
+                                      DMA_FROM_DEVICE);
                        rx_buffer_info->page_dma = 0;
                        skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
                                           rx_buffer_info->page,
                                           rx_buffer_info->page_offset,
                                           upper_len);
 
-                       if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
-                           (page_count(rx_buffer_info->page) != 1))
-                               rx_buffer_info->page = NULL;
-                       else
+                       if ((page_count(rx_buffer_info->page) == 1) &&
+                           (page_to_nid(rx_buffer_info->page) == current_node))
                                get_page(rx_buffer_info->page);
+                       else
+                               rx_buffer_info->page = NULL;
 
                        skb->len += upper_len;
                        skb->data_len += upper_len;
@@ -1265,10 +1388,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                prefetch(next_rxd);
                cleaned_count++;
 
-               if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
-                       rsc_count = ixgbe_get_rsc_count(rx_desc);
-
-               if (rsc_count) {
+               if (pkt_is_rsc) {
                        u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
                                     IXGBE_RXDADV_NEXTP_SHIFT;
                        next_buffer = &rx_ring->rx_buffer_info[nextp];
@@ -1276,32 +1396,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        next_buffer = &rx_ring->rx_buffer_info[i];
                }
 
-               if (staterr & IXGBE_RXD_STAT_EOP) {
-                       if (skb->prev)
-                               skb = ixgbe_transform_rsc_queue(skb,
-                                                               &(rx_ring->rsc_count));
-                       if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
-                               if (IXGBE_RSC_CB(skb)->delay_unmap) {
-                                       dma_unmap_single(&pdev->dev,
-                                                        IXGBE_RSC_CB(skb)->dma,
-                                                        rx_ring->rx_buf_len,
-                                                        DMA_FROM_DEVICE);
-                                       IXGBE_RSC_CB(skb)->dma = 0;
-                                       IXGBE_RSC_CB(skb)->delay_unmap = false;
-                               }
-                               if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
-                                       rx_ring->rsc_count +=
-                                               skb_shinfo(skb)->nr_frags;
-                               else
-                                       rx_ring->rsc_count++;
-                               rx_ring->rsc_flush++;
-                       }
-                       u64_stats_update_begin(&rx_ring->syncp);
-                       rx_ring->stats.packets++;
-                       rx_ring->stats.bytes += skb->len;
-                       u64_stats_update_end(&rx_ring->syncp);
-               } else {
-                       if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+               if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+                       if (ring_is_ps_enabled(rx_ring)) {
                                rx_buffer_info->skb = next_buffer->skb;
                                rx_buffer_info->dma = next_buffer->dma;
                                next_buffer->skb = skb;
@@ -1310,12 +1406,45 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                skb->next = next_buffer->skb;
                                skb->next->prev = skb;
                        }
-                       rx_ring->non_eop_descs++;
+                       rx_ring->rx_stats.non_eop_descs++;
                        goto next_desc;
                }
 
+               if (skb->prev) {
+                       skb = ixgbe_transform_rsc_queue(skb);
+                       /* if we got here without RSC the packet is invalid */
+                       if (!pkt_is_rsc) {
+                               __pskb_trim(skb, 0);
+                               rx_buffer_info->skb = skb;
+                               goto next_desc;
+                       }
+               }
+
+               if (ring_is_rsc_enabled(rx_ring)) {
+                       if (IXGBE_RSC_CB(skb)->delay_unmap) {
+                               dma_unmap_single(rx_ring->dev,
+                                                IXGBE_RSC_CB(skb)->dma,
+                                                rx_ring->rx_buf_len,
+                                                DMA_FROM_DEVICE);
+                               IXGBE_RSC_CB(skb)->dma = 0;
+                               IXGBE_RSC_CB(skb)->delay_unmap = false;
+                       }
+               }
+               if (pkt_is_rsc) {
+                       if (ring_is_ps_enabled(rx_ring))
+                               rx_ring->rx_stats.rsc_count +=
+                                       skb_shinfo(skb)->nr_frags;
+                       else
+                               rx_ring->rx_stats.rsc_count +=
+                                       IXGBE_RSC_CB(skb)->skb_cnt;
+                       rx_ring->rx_stats.rsc_flush++;
+               }
+
+               /* ERR_MASK will only have valid bits if EOP set */
                if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
-                       dev_kfree_skb_irq(skb);
+                       /* trim packet back to size 0 and recycle it */
+                       __pskb_trim(skb, 0);
+                       rx_buffer_info->skb = skb;
                        goto next_desc;
                }
 
@@ -1325,7 +1454,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                total_rx_bytes += skb->len;
                total_rx_packets++;
 
-               skb->protocol = eth_type_trans(skb, adapter->netdev);
+               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 #ifdef IXGBE_FCOE
                /* if ddp, not passing to ULD unless for FCP_RSP or error */
                if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
@@ -1339,16 +1468,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 next_desc:
                rx_desc->wb.upper.status_error = 0;
 
+               (*work_done)++;
+               if (*work_done >= work_to_do)
+                       break;
+
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
-                       ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+                       ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
                        cleaned_count = 0;
                }
 
                /* use prefetched values */
                rx_desc = next_rxd;
-               rx_buffer_info = &rx_ring->rx_buffer_info[i];
-
                staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
        }
 
@@ -1356,14 +1487,14 @@ next_desc:
        cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
 
        if (cleaned_count)
-               ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+               ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
 
 #ifdef IXGBE_FCOE
        /* include DDPed FCoE data */
        if (ddp_bytes > 0) {
                unsigned int mss;
 
-               mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
+               mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
                        sizeof(struct fc_frame_header) -
                        sizeof(struct fcoe_crc_eof);
                if (mss > 512)
@@ -1375,8 +1506,10 @@ next_desc:
 
        rx_ring->total_packets += total_rx_packets;
        rx_ring->total_bytes += total_rx_bytes;
-
-       return cleaned;
+       u64_stats_update_begin(&rx_ring->syncp);
+       rx_ring->stats.packets += total_rx_packets;
+       rx_ring->stats.bytes += total_rx_bytes;
+       u64_stats_update_end(&rx_ring->syncp);
 }
 
 static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1390,7 +1523,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_q_vector *q_vector;
-       int i, j, q_vectors, v_idx, r_idx;
+       int i, q_vectors, v_idx, r_idx;
        u32 mask;
 
        q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -1406,8 +1539,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
                                       adapter->num_rx_queues);
 
                for (i = 0; i < q_vector->rxr_count; i++) {
-                       j = adapter->rx_ring[r_idx]->reg_idx;
-                       ixgbe_set_ivar(adapter, 0, j, v_idx);
+                       u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
+                       ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
                        r_idx = find_next_bit(q_vector->rxr_idx,
                                              adapter->num_rx_queues,
                                              r_idx + 1);
@@ -1416,8 +1549,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
                                       adapter->num_tx_queues);
 
                for (i = 0; i < q_vector->txr_count; i++) {
-                       j = adapter->tx_ring[r_idx]->reg_idx;
-                       ixgbe_set_ivar(adapter, 1, j, v_idx);
+                       u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
+                       ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
                        r_idx = find_next_bit(q_vector->txr_idx,
                                              adapter->num_tx_queues,
                                              r_idx + 1);
@@ -1448,11 +1581,19 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
                }
        }
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
                               v_idx);
-       else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                ixgbe_set_ivar(adapter, -1, 1, v_idx);
+               break;
+
+       default:
+               break;
+       }
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
 
        /* set up to autoclear timer, and the vectors */
@@ -1548,12 +1689,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
        int v_idx = q_vector->v_idx;
        u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                /* must write high and low 16 bits to reset counter */
                itr_reg |= (itr_reg << 16);
-       } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                /*
-                * 82599 can support a value of zero, so allow it for
+                * 82599 and X540 can support a value of zero, so allow it for
                 * max interrupt rate, but there is an errata where it can
                 * not be zero with RSC
                 */
@@ -1566,6 +1710,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
                 * immediate assertion of the interrupt
                 */
                itr_reg |= IXGBE_EITR_CNT_WDIS;
+               break;
+       default:
+               break;
        }
        IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
 }
@@ -1573,14 +1720,13 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
 static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
+       int i, r_idx;
        u32 new_itr;
        u8 current_itr, ret_itr;
-       int i, r_idx;
-       struct ixgbe_ring *rx_ring, *tx_ring;
 
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
-               tx_ring = adapter->tx_ring[r_idx];
+               struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
                ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
                                           q_vector->tx_itr,
                                           tx_ring->total_packets,
@@ -1595,7 +1741,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
 
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
-               rx_ring = adapter->rx_ring[r_idx];
+               struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
                ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
                                           q_vector->rx_itr,
                                           rx_ring->total_packets,
@@ -1626,7 +1772,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
 
        if (new_itr != q_vector->eitr) {
                /* do an exponential smoothing */
-               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+               new_itr = ((q_vector->eitr * 9) + new_itr)/10;
 
                /* save the algorithm value here, not the smoothed one */
                q_vector->eitr = new_itr;
@@ -1694,17 +1840,18 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
 {
        struct ixgbe_hw *hw = &adapter->hw;
 
+       if (eicr & IXGBE_EICR_GPI_SDP2) {
+               /* Clear the interrupt */
+               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       schedule_work(&adapter->sfp_config_module_task);
+       }
+
        if (eicr & IXGBE_EICR_GPI_SDP1) {
                /* Clear the interrupt */
                IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
-               schedule_work(&adapter->multispeed_fiber_task);
-       } else if (eicr & IXGBE_EICR_GPI_SDP2) {
-               /* Clear the interrupt */
-               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
-               schedule_work(&adapter->sfp_config_module_task);
-       } else {
-               /* Interrupt isn't for us... */
-               return;
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       schedule_work(&adapter->multispeed_fiber_task);
        }
 }
 
@@ -1744,16 +1891,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
        if (eicr & IXGBE_EICR_MAILBOX)
                ixgbe_msg_task(adapter);
 
-       if (hw->mac.type == ixgbe_mac_82598EB)
-               ixgbe_check_fan_failure(adapter, eicr);
-
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               ixgbe_check_sfp_event(adapter, eicr);
-               adapter->interrupt_event = eicr;
-               if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
-                   ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
-                       schedule_work(&adapter->check_overtemp_task);
-
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                /* Handle Flow Director Full threshold interrupt */
                if (eicr & IXGBE_EICR_FLOW_DIR) {
                        int i;
@@ -1763,12 +1903,24 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
                        for (i = 0; i < adapter->num_tx_queues; i++) {
                                struct ixgbe_ring *tx_ring =
                                                            adapter->tx_ring[i];
-                               if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
-                                                      &tx_ring->reinit_state))
+                               if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
+                                                      &tx_ring->state))
                                        schedule_work(&adapter->fdir_reinit_task);
                        }
                }
+               ixgbe_check_sfp_event(adapter, eicr);
+               if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+                   ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+                       adapter->interrupt_event = eicr;
+                       schedule_work(&adapter->check_overtemp_task);
+               }
+               break;
+       default:
+               break;
        }
+
+       ixgbe_check_fan_failure(adapter, eicr);
+
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
 
@@ -1779,15 +1931,24 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
                                           u64 qmask)
 {
        u32 mask;
+       struct ixgbe_hw *hw = &adapter->hw;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
-       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                mask = (qmask & 0xFFFFFFFF);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
                mask = (qmask >> 32);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+               break;
+       default:
+               break;
        }
        /* skip the flush */
 }
@@ -1796,15 +1957,24 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
                                            u64 qmask)
 {
        u32 mask;
+       struct ixgbe_hw *hw = &adapter->hw;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
-       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                mask = (qmask & 0xFFFFFFFF);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
                mask = (qmask >> 32);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
+               break;
+       default:
+               break;
        }
        /* skip the flush */
 }
@@ -1847,8 +2017,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
        int r_idx;
        int i;
 
+#ifdef CONFIG_IXGBE_DCA
+       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+               ixgbe_update_dca(q_vector);
+#endif
+
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       for (i = 0;  i < q_vector->rxr_count; i++) {
+       for (i = 0; i < q_vector->rxr_count; i++) {
                rx_ring = adapter->rx_ring[r_idx];
                rx_ring->total_bytes = 0;
                rx_ring->total_packets = 0;
@@ -1859,7 +2034,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
        if (!q_vector->rxr_count)
                return IRQ_HANDLED;
 
-       /* disable interrupts on this vector only */
        /* EIAM disabled interrupts (on this vector) for us */
        napi_schedule(&q_vector->napi);
 
@@ -1918,13 +2092,14 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
        int work_done = 0;
        long r_idx;
 
-       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       rx_ring = adapter->rx_ring[r_idx];
 #ifdef CONFIG_IXGBE_DCA
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-               ixgbe_update_rx_dca(adapter, rx_ring);
+               ixgbe_update_dca(q_vector);
 #endif
 
+       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+       rx_ring = adapter->rx_ring[r_idx];
+
        ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
 
        /* If all Rx work done, exit the polling mode */
@@ -1958,13 +2133,14 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
        long r_idx;
        bool tx_clean_complete = true;
 
+#ifdef CONFIG_IXGBE_DCA
+       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+               ixgbe_update_dca(q_vector);
+#endif
+
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
                ring = adapter->tx_ring[r_idx];
-#ifdef CONFIG_IXGBE_DCA
-               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-                       ixgbe_update_tx_dca(adapter, ring);
-#endif
                tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
                r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
                                      r_idx + 1);
@@ -1977,10 +2153,6 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
                ring = adapter->rx_ring[r_idx];
-#ifdef CONFIG_IXGBE_DCA
-               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-                       ixgbe_update_rx_dca(adapter, ring);
-#endif
                ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
                r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
                                      r_idx + 1);
@@ -2019,13 +2191,14 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
        int work_done = 0;
        long r_idx;
 
-       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
-       tx_ring = adapter->tx_ring[r_idx];
 #ifdef CONFIG_IXGBE_DCA
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-               ixgbe_update_tx_dca(adapter, tx_ring);
+               ixgbe_update_dca(q_vector);
 #endif
 
+       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+       tx_ring = adapter->tx_ring[r_idx];
+
        if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
                work_done = budget;
 
@@ -2046,24 +2219,27 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
                                     int r_idx)
 {
        struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+       struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
 
        set_bit(r_idx, q_vector->rxr_idx);
        q_vector->rxr_count++;
+       rx_ring->q_vector = q_vector;
 }
 
 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
                                     int t_idx)
 {
        struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+       struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
 
        set_bit(t_idx, q_vector->txr_idx);
        q_vector->txr_count++;
+       tx_ring->q_vector = q_vector;
 }
 
 /**
  * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
  * @adapter: board private structure to initialize
- * @vectors: allotted vector count for descriptor rings
  *
  * This function maps descriptor rings to the queue-specific vectors
  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
@@ -2071,9 +2247,9 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
  * group the rings as "efficiently" as possible.  You would add new
  * mapping configurations in here.
  **/
-static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
-                                     int vectors)
+static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
 {
+       int q_vectors;
        int v_start = 0;
        int rxr_idx = 0, txr_idx = 0;
        int rxr_remaining = adapter->num_rx_queues;
@@ -2086,11 +2262,13 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
        if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
                goto out;
 
+       q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
        /*
         * The ideal configuration...
         * We have enough vectors to map one per queue.
         */
-       if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+       if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
                for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
                        map_vector_to_rxq(adapter, v_start, rxr_idx);
 
@@ -2106,23 +2284,20 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
         * multiple queues per vector.
         */
        /* Re-adjusting *qpv takes care of the remainder. */
-       for (i = v_start; i < vectors; i++) {
-               rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
+       for (i = v_start; i < q_vectors; i++) {
+               rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
                for (j = 0; j < rqpv; j++) {
                        map_vector_to_rxq(adapter, i, rxr_idx);
                        rxr_idx++;
                        rxr_remaining--;
                }
-       }
-       for (i = v_start; i < vectors; i++) {
-               tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
+               tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
                for (j = 0; j < tqpv; j++) {
                        map_vector_to_txq(adapter, i, txr_idx);
                        txr_idx++;
                        txr_remaining--;
                }
        }
-
 out:
        return err;
 }
@@ -2144,30 +2319,36 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
        /* Decrement for Other and TCP Timer vectors */
        q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
-       /* Map the Tx/Rx rings to the vectors we were allotted. */
-       err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
+       err = ixgbe_map_rings_to_vectors(adapter);
        if (err)
-               goto out;
+               return err;
 
-#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
-                        (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
-                        &ixgbe_msix_clean_many)
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count)        \
+                                         ? &ixgbe_msix_clean_many : \
+                         (_v)->rxr_count ? &ixgbe_msix_clean_rx   : \
+                         (_v)->txr_count ? &ixgbe_msix_clean_tx   : \
+                         NULL)
        for (vector = 0; vector < q_vectors; vector++) {
-               handler = SET_HANDLER(adapter->q_vector[vector]);
+               struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
+               handler = SET_HANDLER(q_vector);
 
                if (handler == &ixgbe_msix_clean_rx) {
-                       sprintf(adapter->name[vector], "%s-%s-%d",
+                       sprintf(q_vector->name, "%s-%s-%d",
                                netdev->name, "rx", ri++);
                } else if (handler == &ixgbe_msix_clean_tx) {
-                       sprintf(adapter->name[vector], "%s-%s-%d",
+                       sprintf(q_vector->name, "%s-%s-%d",
                                netdev->name, "tx", ti++);
-               } else
-                       sprintf(adapter->name[vector], "%s-%s-%d",
-                               netdev->name, "TxRx", vector);
-
+               } else if (handler == &ixgbe_msix_clean_many) {
+                       sprintf(q_vector->name, "%s-%s-%d",
+                               netdev->name, "TxRx", ri++);
+                       ti++;
+               } else {
+                       /* skip this unused q_vector */
+                       continue;
+               }
                err = request_irq(adapter->msix_entries[vector].vector,
-                                 handler, 0, adapter->name[vector],
-                                 adapter->q_vector[vector]);
+                                 handler, 0, q_vector->name,
+                                 q_vector);
                if (err) {
                        e_err(probe, "request_irq failed for MSIX interrupt "
                              "Error: %d\n", err);
@@ -2175,9 +2356,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
                }
        }
 
-       sprintf(adapter->name[vector], "%s:lsc", netdev->name);
+       sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
        err = request_irq(adapter->msix_entries[vector].vector,
-                         ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
+                         ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
        if (err) {
                e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
                goto free_queue_irqs;
@@ -2193,17 +2374,16 @@ free_queue_irqs:
        pci_disable_msix(adapter->pdev);
        kfree(adapter->msix_entries);
        adapter->msix_entries = NULL;
-out:
        return err;
 }
 
 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
-       u8 current_itr;
-       u32 new_itr = q_vector->eitr;
        struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
        struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
+       u32 new_itr = q_vector->eitr;
+       u8 current_itr;
 
        q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
                                            q_vector->tx_itr,
@@ -2233,9 +2413,9 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
 
        if (new_itr != q_vector->eitr) {
                /* do an exponential smoothing */
-               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+               new_itr = ((q_vector->eitr * 9) + new_itr)/10;
 
-               /* save the algorithm value here, not the smoothed one */
+               /* save the algorithm value here */
                q_vector->eitr = new_itr;
 
                ixgbe_write_eitr(q_vector);
@@ -2256,12 +2436,17 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
                mask |= IXGBE_EIMS_GPI_SDP0;
        if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
                mask |= IXGBE_EIMS_GPI_SDP1;
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                mask |= IXGBE_EIMS_ECC;
                mask |= IXGBE_EIMS_GPI_SDP1;
                mask |= IXGBE_EIMS_GPI_SDP2;
                if (adapter->num_vfs)
                        mask |= IXGBE_EIMS_MAILBOX;
+               break;
+       default:
+               break;
        }
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
            adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -2317,13 +2502,21 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        if (eicr & IXGBE_EICR_LSC)
                ixgbe_check_lsc(adapter);
 
-       if (hw->mac.type == ixgbe_mac_82599EB)
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                ixgbe_check_sfp_event(adapter, eicr);
+               if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+                   ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+                       adapter->interrupt_event = eicr;
+                       schedule_work(&adapter->check_overtemp_task);
+               }
+               break;
+       default:
+               break;
+       }
 
        ixgbe_check_fan_failure(adapter, eicr);
-       if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
-           ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
-               schedule_work(&adapter->check_overtemp_task);
 
        if (napi_schedule_prep(&(q_vector->napi))) {
                adapter->tx_ring[0]->total_packets = 0;
@@ -2416,14 +2609,20 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
  **/
 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 {
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
-       } else {
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
                if (adapter->num_vfs > 32)
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
+               break;
+       default:
+               break;
        }
        IXGBE_WRITE_FLUSH(&adapter->hw);
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -2469,7 +2668,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
        u64 tdba = ring->dma;
        int wait_loop = 10;
        u32 txdctl;
-       u16 reg_idx = ring->reg_idx;
+       u8 reg_idx = ring->reg_idx;
 
        /* disable queue to avoid issues while updating state */
        txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
@@ -2484,8 +2683,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
                        ring->count * sizeof(union ixgbe_adv_tx_desc));
        IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
-       ring->head = IXGBE_TDH(reg_idx);
-       ring->tail = IXGBE_TDT(reg_idx);
+       ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
 
        /* configure fetching thresholds */
        if (adapter->rx_itr_setting == 0) {
@@ -2501,7 +2699,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
        }
 
        /* reinitialize flowdirector state */
-       set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
+       if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+           adapter->atr_sample_rate) {
+               ring->atr_sample_rate = adapter->atr_sample_rate;
+               ring->atr_count = 0;
+               set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
+       } else {
+               ring->atr_sample_rate = 0;
+       }
+
+       clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
 
        /* enable queue */
        txdctl |= IXGBE_TXDCTL_ENABLE;
@@ -2592,16 +2799,22 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
                                   struct ixgbe_ring *rx_ring)
 {
        u32 srrctl;
-       int index;
-       struct ixgbe_ring_feature *feature = adapter->ring_feature;
+       u8 reg_idx = rx_ring->reg_idx;
 
-       index = rx_ring->reg_idx;
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-               unsigned long mask;
-               mask = (unsigned long) feature[RING_F_RSS].mask;
-               index = index & mask;
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB: {
+               struct ixgbe_ring_feature *feature = adapter->ring_feature;
+               const int mask = feature[RING_F_RSS].mask;
+               reg_idx = reg_idx & mask;
+       }
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+       default:
+               break;
        }
-       srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
+
+       srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
 
        srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
        srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
@@ -2611,7 +2824,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
        srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
                  IXGBE_SRRCTL_BSIZEHDR_MASK;
 
-       if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+       if (ring_is_ps_enabled(rx_ring)) {
 #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
                srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 #else
@@ -2624,7 +2837,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
                srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
        }
 
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
 }
 
 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2693,20 +2906,37 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 }
 
+/**
+ * ixgbe_clear_rscctl - disable RSC for the indicated ring
+ * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
+ **/
+void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+                        struct ixgbe_ring *ring)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 rscctrl;
+       u8 reg_idx = ring->reg_idx;
+
+       rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
+       rscctrl &= ~IXGBE_RSCCTL_RSCEN;
+       IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
+}
+
 /**
  * ixgbe_configure_rscctl - enable RSC for the indicated ring
  * @adapter:    address of board private structure
  * @index:      index of ring to set
  **/
-static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
                                   struct ixgbe_ring *ring)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u32 rscctrl;
        int rx_buf_len;
-       u16 reg_idx = ring->reg_idx;
+       u8 reg_idx = ring->reg_idx;
 
-       if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+       if (!ring_is_rsc_enabled(ring))
                return;
 
        rx_buf_len = ring->rx_buf_len;
@@ -2717,7 +2947,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
         * total size of max desc * buf_len is not greater
         * than 65535
         */
-       if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+       if (ring_is_ps_enabled(ring)) {
 #if (MAX_SKB_FRAGS > 16)
                rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
 #elif (MAX_SKB_FRAGS > 8)
@@ -2770,9 +3000,9 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
                                       struct ixgbe_ring *ring)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       int reg_idx = ring->reg_idx;
        int wait_loop = IXGBE_MAX_RX_DESC_POLL;
        u32 rxdctl;
+       u8 reg_idx = ring->reg_idx;
 
        /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
        if (hw->mac.type == ixgbe_mac_82598EB &&
@@ -2796,7 +3026,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
        struct ixgbe_hw *hw = &adapter->hw;
        u64 rdba = ring->dma;
        u32 rxdctl;
-       u16 reg_idx = ring->reg_idx;
+       u8 reg_idx = ring->reg_idx;
 
        /* disable queue to avoid issues while updating state */
        rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
@@ -2810,8 +3040,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
                        ring->count * sizeof(union ixgbe_adv_rx_desc));
        IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
-       ring->head = IXGBE_RDH(reg_idx);
-       ring->tail = IXGBE_RDT(reg_idx);
+       ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
 
        ixgbe_configure_srrctl(adapter, ring);
        ixgbe_configure_rscctl(adapter, ring);
@@ -2833,7 +3062,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
 
        ixgbe_rx_desc_queue_enable(adapter, ring);
-       ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
+       ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
 }
 
 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -2956,24 +3185,32 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
                rx_ring->rx_buf_len = rx_buf_len;
 
                if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
-                       rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
+                       set_ring_ps_enabled(rx_ring);
+               else
+                       clear_ring_ps_enabled(rx_ring);
+
+               if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+                       set_ring_rsc_enabled(rx_ring);
                else
-                       rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+                       clear_ring_rsc_enabled(rx_ring);
 
 #ifdef IXGBE_FCOE
                if (netdev->features & NETIF_F_FCOE_MTU) {
                        struct ixgbe_ring_feature *f;
                        f = &adapter->ring_feature[RING_F_FCOE];
                        if ((i >= f->mask) && (i < f->mask + f->indices)) {
-                               rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+                               clear_ring_ps_enabled(rx_ring);
                                if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
                                        rx_ring->rx_buf_len =
                                                IXGBE_FCOE_JUMBO_FRAME_SIZE;
+                       } else if (!ring_is_rsc_enabled(rx_ring) &&
+                                  !ring_is_ps_enabled(rx_ring)) {
+                               rx_ring->rx_buf_len =
+                                               IXGBE_FCOE_JUMBO_FRAME_SIZE;
                        }
                }
 #endif /* IXGBE_FCOE */
        }
-
 }
 
 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
@@ -2996,6 +3233,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
                rdrxctl |= IXGBE_RDRXCTL_MVMEN;
                break;
        case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                /* Disable RSC for ACK packets */
                IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
                   (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@ -3123,6 +3361,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
                break;
        case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        j = adapter->rx_ring[i]->reg_idx;
                        vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3152,6 +3391,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
                break;
        case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        j = adapter->rx_ring[i]->reg_idx;
                        vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3349,8 +3589,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
-       u32 txdctl;
-       int i, j;
 
        if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
                if (hw->mac.type == ixgbe_mac_82598EB)
@@ -3366,25 +3604,18 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
                max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
 #endif
 
-       ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+       ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
                                        DCB_TX_CONFIG);
-       ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+       ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
                                        DCB_RX_CONFIG);
 
-       /* reconfigure the hardware */
-       ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               j = adapter->tx_ring[i]->reg_idx;
-               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
-               /* PThresh workaround for Tx hang with DFP enabled. */
-               txdctl |= 32;
-               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
-       }
        /* Enable VLAN tag insert/strip */
        adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
 
        hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
+
+       /* reconfigure the hardware */
+       ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
 }
 
 #endif
@@ -3516,8 +3747,9 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
                case ixgbe_mac_82598EB:
                        IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
                        break;
-               default:
                case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+               default:
                        IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
                        IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
                        break;
@@ -3562,12 +3794,20 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
                ixgbe_configure_msi_and_legacy(adapter);
 
        /* enable the optics */
-       if (hw->phy.multispeed_fiber)
+       if (hw->phy.multispeed_fiber && hw->mac.ops.enable_tx_laser)
                hw->mac.ops.enable_tx_laser(hw);
 
        clear_bit(__IXGBE_DOWN, &adapter->state);
        ixgbe_napi_enable_all(adapter);
 
+       if (ixgbe_is_sfp(hw)) {
+               ixgbe_sfp_link_config(adapter);
+       } else {
+               err = ixgbe_non_sfp_link_config(hw);
+               if (err)
+                       e_err(probe, "link_config FAILED %d\n", err);
+       }
+
        /* clear any pending interrupts, may auto mask */
        IXGBE_READ_REG(hw, IXGBE_EICR);
        ixgbe_irq_enable(adapter, true, true);
@@ -3588,28 +3828,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
         * devices wouldn't have their type identified yet. We need to
         * kick off the SFP+ module setup first, then try to bring up link.
         * If we're not hot-pluggable SFP+, we just need to configure link
-        * and bring it up.
-        */
-       if (hw->phy.type == ixgbe_phy_unknown) {
-               err = hw->phy.ops.identify(hw);
-               if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-                       /*
-                        * Take the device down and schedule the sfp tasklet
-                        * which will unregister_netdev and log it.
-                        */
-                       ixgbe_down(adapter);
-                       schedule_work(&adapter->sfp_config_module_task);
-                       return err;
-               }
-       }
-
-       if (ixgbe_is_sfp(hw)) {
-               ixgbe_sfp_link_config(adapter);
-       } else {
-               err = ixgbe_non_sfp_link_config(hw);
-               if (err)
-                       e_err(probe, "link_config FAILED %d\n", err);
-       }
+        * and bring it up.
+        */
+       if (hw->phy.type == ixgbe_phy_unknown)
+               schedule_work(&adapter->sfp_config_module_task);
 
        /* enable transmits */
        netif_tx_start_all_queues(adapter->netdev);
@@ -3687,15 +3909,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
  * @rx_ring: ring to free buffers from
  **/
-static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
-                               struct ixgbe_ring *rx_ring)
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = rx_ring->dev;
        unsigned long size;
-       unsigned int i;
+       u16 i;
 
        /* ring already cleared, nothing to do */
        if (!rx_ring->rx_buffer_info)
@@ -3707,7 +3927,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
 
                rx_buffer_info = &rx_ring->rx_buffer_info[i];
                if (rx_buffer_info->dma) {
-                       dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+                       dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
                                         rx_ring->rx_buf_len,
                                         DMA_FROM_DEVICE);
                        rx_buffer_info->dma = 0;
@@ -3718,7 +3938,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
                        do {
                                struct sk_buff *this = skb;
                                if (IXGBE_RSC_CB(this)->delay_unmap) {
-                                       dma_unmap_single(&pdev->dev,
+                                       dma_unmap_single(dev,
                                                         IXGBE_RSC_CB(this)->dma,
                                                         rx_ring->rx_buf_len,
                                                         DMA_FROM_DEVICE);
@@ -3732,7 +3952,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
                if (!rx_buffer_info->page)
                        continue;
                if (rx_buffer_info->page_dma) {
-                       dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+                       dma_unmap_page(dev, rx_buffer_info->page_dma,
                                       PAGE_SIZE / 2, DMA_FROM_DEVICE);
                        rx_buffer_info->page_dma = 0;
                }
@@ -3749,24 +3969,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
-
-       if (rx_ring->head)
-               writel(0, adapter->hw.hw_addr + rx_ring->head);
-       if (rx_ring->tail)
-               writel(0, adapter->hw.hw_addr + rx_ring->tail);
 }
 
 /**
  * ixgbe_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
  * @tx_ring: ring to be cleaned
  **/
-static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
-                               struct ixgbe_ring *tx_ring)
+static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
 {
        struct ixgbe_tx_buffer *tx_buffer_info;
        unsigned long size;
-       unsigned int i;
+       u16 i;
 
        /* ring already cleared, nothing to do */
        if (!tx_ring->tx_buffer_info)
@@ -3775,7 +3988,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
        /* Free all the Tx ring sk_buffs */
        for (i = 0; i < tx_ring->count; i++) {
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
        }
 
        size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -3786,11 +3999,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
-
-       if (tx_ring->head)
-               writel(0, adapter->hw.hw_addr + tx_ring->head);
-       if (tx_ring->tail)
-               writel(0, adapter->hw.hw_addr + tx_ring->tail);
 }
 
 /**
@@ -3802,7 +4010,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
+               ixgbe_clean_rx_ring(adapter->rx_ring[i]);
 }
 
 /**
@@ -3814,7 +4022,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
+               ixgbe_clean_tx_ring(adapter->tx_ring[i]);
 }
 
 void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3823,7 +4031,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 rxctrl;
        u32 txdctl;
-       int i, j;
+       int i;
        int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
        /* signal that we are down to the interrupt handler */
@@ -3881,19 +4089,25 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 
        /* disable transmits in the hardware now that interrupts are off */
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               j = adapter->tx_ring[i]->reg_idx;
-               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
-               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
+               u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
                                (txdctl & ~IXGBE_TXDCTL_ENABLE));
        }
        /* Disable the Tx DMA engine on 82599 */
-       if (hw->mac.type == ixgbe_mac_82599EB)
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
                                (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
                                 ~IXGBE_DMATXCTL_TE));
+               break;
+       default:
+               break;
+       }
 
        /* power down the optics */
-       if (hw->phy.multispeed_fiber)
+       if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser)
                hw->mac.ops.disable_tx_laser(hw);
 
        /* clear n-tuple filters that are cached */
@@ -3925,10 +4139,8 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
        int tx_clean_complete, work_done = 0;
 
 #ifdef CONFIG_IXGBE_DCA
-       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
-               ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
-               ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
-       }
+       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+               ixgbe_update_dca(q_vector);
 #endif
 
        tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
@@ -3956,6 +4168,8 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
+       adapter->tx_timeout_count++;
+
        /* Do the reset outside of interrupt context */
        schedule_work(&adapter->reset_task);
 }
@@ -3970,8 +4184,6 @@ static void ixgbe_reset_task(struct work_struct *work)
            test_bit(__IXGBE_RESETTING, &adapter->state))
                return;
 
-       adapter->tx_timeout_count++;
-
        ixgbe_dump(adapter);
        netdev_err(adapter->netdev, "Reset adapter\n");
        ixgbe_reinit_locked(adapter);
@@ -4221,19 +4433,16 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
 static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
 {
        int i;
-       bool ret = false;
 
-       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-               for (i = 0; i < adapter->num_rx_queues; i++)
-                       adapter->rx_ring[i]->reg_idx = i;
-               for (i = 0; i < adapter->num_tx_queues; i++)
-                       adapter->tx_ring[i]->reg_idx = i;
-               ret = true;
-       } else {
-               ret = false;
-       }
+       if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+               return false;
 
-       return ret;
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               adapter->rx_ring[i]->reg_idx = i;
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               adapter->tx_ring[i]->reg_idx = i;
+
+       return true;
 }
 
 #ifdef CONFIG_IXGBE_DCB
@@ -4250,71 +4459,67 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
        bool ret = false;
        int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
 
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-                       /* the number of queues is assumed to be symmetric */
-                       for (i = 0; i < dcb_i; i++) {
-                               adapter->rx_ring[i]->reg_idx = i << 3;
-                               adapter->tx_ring[i]->reg_idx = i << 2;
-                       }
-                       ret = true;
-               } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-                       if (dcb_i == 8) {
-                               /*
-                                * Tx TC0 starts at: descriptor queue 0
-                                * Tx TC1 starts at: descriptor queue 32
-                                * Tx TC2 starts at: descriptor queue 64
-                                * Tx TC3 starts at: descriptor queue 80
-                                * Tx TC4 starts at: descriptor queue 96
-                                * Tx TC5 starts at: descriptor queue 104
-                                * Tx TC6 starts at: descriptor queue 112
-                                * Tx TC7 starts at: descriptor queue 120
-                                *
-                                * Rx TC0-TC7 are offset by 16 queues each
-                                */
-                               for (i = 0; i < 3; i++) {
-                                       adapter->tx_ring[i]->reg_idx = i << 5;
-                                       adapter->rx_ring[i]->reg_idx = i << 4;
-                               }
-                               for ( ; i < 5; i++) {
-                                       adapter->tx_ring[i]->reg_idx =
-                                                                ((i + 2) << 4);
-                                       adapter->rx_ring[i]->reg_idx = i << 4;
-                               }
-                               for ( ; i < dcb_i; i++) {
-                                       adapter->tx_ring[i]->reg_idx =
-                                                                ((i + 8) << 3);
-                                       adapter->rx_ring[i]->reg_idx = i << 4;
-                               }
+       if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+               return false;
 
-                               ret = true;
-                       } else if (dcb_i == 4) {
-                               /*
-                                * Tx TC0 starts at: descriptor queue 0
-                                * Tx TC1 starts at: descriptor queue 64
-                                * Tx TC2 starts at: descriptor queue 96
-                                * Tx TC3 starts at: descriptor queue 112
-                                *
-                                * Rx TC0-TC3 are offset by 32 queues each
-                                */
-                               adapter->tx_ring[0]->reg_idx = 0;
-                               adapter->tx_ring[1]->reg_idx = 64;
-                               adapter->tx_ring[2]->reg_idx = 96;
-                               adapter->tx_ring[3]->reg_idx = 112;
-                               for (i = 0 ; i < dcb_i; i++)
-                                       adapter->rx_ring[i]->reg_idx = i << 5;
-
-                               ret = true;
-                       } else {
-                               ret = false;
+       /* the number of queues is assumed to be symmetric */
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               for (i = 0; i < dcb_i; i++) {
+                       adapter->rx_ring[i]->reg_idx = i << 3;
+                       adapter->tx_ring[i]->reg_idx = i << 2;
+               }
+               ret = true;
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               if (dcb_i == 8) {
+                       /*
+                        * Tx TC0 starts at: descriptor queue 0
+                        * Tx TC1 starts at: descriptor queue 32
+                        * Tx TC2 starts at: descriptor queue 64
+                        * Tx TC3 starts at: descriptor queue 80
+                        * Tx TC4 starts at: descriptor queue 96
+                        * Tx TC5 starts at: descriptor queue 104
+                        * Tx TC6 starts at: descriptor queue 112
+                        * Tx TC7 starts at: descriptor queue 120
+                        *
+                        * Rx TC0-TC7 are offset by 16 queues each
+                        */
+                       for (i = 0; i < 3; i++) {
+                               adapter->tx_ring[i]->reg_idx = i << 5;
+                               adapter->rx_ring[i]->reg_idx = i << 4;
                        }
-               } else {
-                       ret = false;
+                       for ( ; i < 5; i++) {
+                               adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
+                               adapter->rx_ring[i]->reg_idx = i << 4;
+                       }
+                       for ( ; i < dcb_i; i++) {
+                               adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
+                               adapter->rx_ring[i]->reg_idx = i << 4;
+                       }
+                       ret = true;
+               } else if (dcb_i == 4) {
+                       /*
+                        * Tx TC0 starts at: descriptor queue 0
+                        * Tx TC1 starts at: descriptor queue 64
+                        * Tx TC2 starts at: descriptor queue 96
+                        * Tx TC3 starts at: descriptor queue 112
+                        *
+                        * Rx TC0-TC3 are offset by 32 queues each
+                        */
+                       adapter->tx_ring[0]->reg_idx = 0;
+                       adapter->tx_ring[1]->reg_idx = 64;
+                       adapter->tx_ring[2]->reg_idx = 96;
+                       adapter->tx_ring[3]->reg_idx = 112;
+                       for (i = 0 ; i < dcb_i; i++)
+                               adapter->rx_ring[i]->reg_idx = i << 5;
+                       ret = true;
                }
-       } else {
-               ret = false;
+               break;
+       default:
+               break;
        }
-
        return ret;
 }
 #endif
@@ -4354,55 +4559,55 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
  */
 static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
 {
-       int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
-       bool ret = false;
        struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+       int i;
+       u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
+
+       if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+               return false;
 
-       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 #ifdef CONFIG_IXGBE_DCB
-               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-                       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+               struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 
-                       ixgbe_cache_ring_dcb(adapter);
-                       /* find out queues in TC for FCoE */
-                       fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
-                       fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
-                       /*
-                        * In 82599, the number of Tx queues for each traffic
-                        * class for both 8-TC and 4-TC modes are:
-                        * TCs  : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
-                        * 8 TCs:  32  32  16  16   8   8   8   8
-                        * 4 TCs:  64  64  32  32
-                        * We have max 8 queues for FCoE, where 8 the is
-                        * FCoE redirection table size. If TC for FCoE is
-                        * less than or equal to TC3, we have enough queues
-                        * to add max of 8 queues for FCoE, so we start FCoE
-                        * tx descriptor from the next one, i.e., reg_idx + 1.
-                        * If TC for FCoE is above TC3, implying 8 TC mode,
-                        * and we need 8 for FCoE, we have to take all queues
-                        * in that traffic class for FCoE.
-                        */
-                       if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
-                               fcoe_tx_i--;
-               }
+               ixgbe_cache_ring_dcb(adapter);
+               /* find out queues in TC for FCoE */
+               fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
+               fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
+               /*
+                * In 82599, the number of Tx queues for each traffic
+                * class for both 8-TC and 4-TC modes are:
+                * TCs  : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
+                * 8 TCs:  32  32  16  16   8   8   8   8
+                * 4 TCs:  64  64  32  32
+                * We have max 8 queues for FCoE, where 8 the is
+                * FCoE redirection table size. If TC for FCoE is
+                * less than or equal to TC3, we have enough queues
+                * to add max of 8 queues for FCoE, so we start FCoE
+                * Tx queue from the next one, i.e., reg_idx + 1.
+                * If TC for FCoE is above TC3, implying 8 TC mode,
+                * and we need 8 for FCoE, we have to take all queues
+                * in that traffic class for FCoE.
+                */
+               if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
+                       fcoe_tx_i--;
+       }
 #endif /* CONFIG_IXGBE_DCB */
-               if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-                       if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
-                           (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
-                               ixgbe_cache_ring_fdir(adapter);
-                       else
-                               ixgbe_cache_ring_rss(adapter);
+       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+               if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+                   (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+                       ixgbe_cache_ring_fdir(adapter);
+               else
+                       ixgbe_cache_ring_rss(adapter);
 
-                       fcoe_rx_i = f->mask;
-                       fcoe_tx_i = f->mask;
-               }
-               for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
-                       adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
-                       adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
-               }
-               ret = true;
+               fcoe_rx_i = f->mask;
+               fcoe_tx_i = f->mask;
        }
-       return ret;
+       for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
+               adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+               adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
+       }
+       return true;
 }
 
 #endif /* IXGBE_FCOE */
@@ -4471,65 +4676,55 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
  **/
 static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
 {
-       int i;
-       int orig_node = adapter->node;
+       int rx = 0, tx = 0, nid = adapter->node;
 
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct ixgbe_ring *ring = adapter->tx_ring[i];
-               if (orig_node == -1) {
-                       int cur_node = next_online_node(adapter->node);
-                       if (cur_node == MAX_NUMNODES)
-                               cur_node = first_online_node;
-                       adapter->node = cur_node;
-               }
-               ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
-                                   adapter->node);
+       if (nid < 0 || !node_online(nid))
+               nid = first_online_node;
+
+       for (; tx < adapter->num_tx_queues; tx++) {
+               struct ixgbe_ring *ring;
+
+               ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
                if (!ring)
-                       ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+                       ring = kzalloc(sizeof(*ring), GFP_KERNEL);
                if (!ring)
-                       goto err_tx_ring_allocation;
+                       goto err_allocation;
                ring->count = adapter->tx_ring_count;
-               ring->queue_index = i;
-               ring->numa_node = adapter->node;
+               ring->queue_index = tx;
+               ring->numa_node = nid;
+               ring->dev = &adapter->pdev->dev;
+               ring->netdev = adapter->netdev;
 
-               adapter->tx_ring[i] = ring;
+               adapter->tx_ring[tx] = ring;
        }
 
-       /* Restore the adapter's original node */
-       adapter->node = orig_node;
+       for (; rx < adapter->num_rx_queues; rx++) {
+               struct ixgbe_ring *ring;
 
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct ixgbe_ring *ring = adapter->rx_ring[i];
-               if (orig_node == -1) {
-                       int cur_node = next_online_node(adapter->node);
-                       if (cur_node == MAX_NUMNODES)
-                               cur_node = first_online_node;
-                       adapter->node = cur_node;
-               }
-               ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
-                                   adapter->node);
+               ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
                if (!ring)
-                       ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+                       ring = kzalloc(sizeof(*ring), GFP_KERNEL);
                if (!ring)
-                       goto err_rx_ring_allocation;
+                       goto err_allocation;
                ring->count = adapter->rx_ring_count;
-               ring->queue_index = i;
-               ring->numa_node = adapter->node;
+               ring->queue_index = rx;
+               ring->numa_node = nid;
+               ring->dev = &adapter->pdev->dev;
+               ring->netdev = adapter->netdev;
 
-               adapter->rx_ring[i] = ring;
+               adapter->rx_ring[rx] = ring;
        }
 
-       /* Restore the adapter's original node */
-       adapter->node = orig_node;
-
        ixgbe_cache_ring_register(adapter);
 
        return 0;
 
-err_rx_ring_allocation:
-       for (i = 0; i < adapter->num_tx_queues; i++)
-               kfree(adapter->tx_ring[i]);
-err_tx_ring_allocation:
+err_allocation:
+       while (tx)
+               kfree(adapter->tx_ring[--tx]);
+
+       while (rx)
+               kfree(adapter->rx_ring[--rx]);
        return -ENOMEM;
 }
 
@@ -4751,6 +4946,11 @@ err_set_interrupt:
        return err;
 }
 
+static void ring_free_rcu(struct rcu_head *head)
+{
+       kfree(container_of(head, struct ixgbe_ring, rcu));
+}
+
 /**
  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
  * @adapter: board private structure to clear interrupt scheme on
@@ -4767,7 +4967,12 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
                adapter->tx_ring[i] = NULL;
        }
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               kfree(adapter->rx_ring[i]);
+               struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+               /* ixgbe_get_stats64() might access this ring, we must wait
+                * a grace period before freeing it.
+                */
+               call_rcu(&ring->rcu, ring_free_rcu);
                adapter->rx_ring[i] = NULL;
        }
 
@@ -4844,6 +5049,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        int j;
        struct tc_configuration *tc;
 #endif
+       int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* PCI config space info */
 
@@ -4858,11 +5064,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->ring_feature[RING_F_RSS].indices = rss;
        adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
        adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
-       if (hw->mac.type == ixgbe_mac_82598EB) {
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                if (hw->device_id == IXGBE_DEV_ID_82598AT)
                        adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
-       } else if (hw->mac.type == ixgbe_mac_82599EB) {
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
                adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
                adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -4891,6 +5100,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
                adapter->fcoe.up = IXGBE_FCOE_DEFTC;
 #endif
 #endif /* IXGBE_FCOE */
+               break;
+       default:
+               break;
        }
 
 #ifdef CONFIG_IXGBE_DCB
@@ -4920,8 +5132,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 #ifdef CONFIG_DCB
        adapter->last_lfc_mode = hw->fc.current_mode;
 #endif
-       hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
-       hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
+       hw->fc.high_water = FC_HIGH_WATER(max_frame);
+       hw->fc.low_water = FC_LOW_WATER(max_frame);
        hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
        hw->fc.send_xon = true;
        hw->fc.disable_fc_autoneg = false;
@@ -4959,15 +5171,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
-int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
-                            struct ixgbe_ring *tx_ring)
+int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = tx_ring->dev;
        int size;
 
        size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -4982,7 +5192,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
        tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-       tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+       tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
                                           &tx_ring->dma, GFP_KERNEL);
        if (!tx_ring->desc)
                goto err;
@@ -4995,7 +5205,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
 err:
        vfree(tx_ring->tx_buffer_info);
        tx_ring->tx_buffer_info = NULL;
-       e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n");
+       dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -5014,7 +5224,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
        int i, err = 0;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
+               err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
                if (!err)
                        continue;
                e_err(probe, "Allocation for Tx Queue %u failed\n", i);
@@ -5026,48 +5236,41 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
-int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
-                            struct ixgbe_ring *rx_ring)
+int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = rx_ring->dev;
        int size;
 
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
-       rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
+       rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node);
        if (!rx_ring->rx_buffer_info)
                rx_ring->rx_buffer_info = vmalloc(size);
-       if (!rx_ring->rx_buffer_info) {
-               e_err(probe, "vmalloc allocation failed for the Rx "
-                     "descriptor ring\n");
-               goto alloc_failed;
-       }
+       if (!rx_ring->rx_buffer_info)
+               goto err;
        memset(rx_ring->rx_buffer_info, 0, size);
 
        /* Round up to nearest 4K */
        rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
        rx_ring->size = ALIGN(rx_ring->size, 4096);
 
-       rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+       rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
                                           &rx_ring->dma, GFP_KERNEL);
 
-       if (!rx_ring->desc) {
-               e_err(probe, "Memory allocation failed for the Rx "
-                     "descriptor ring\n");
-               vfree(rx_ring->rx_buffer_info);
-               goto alloc_failed;
-       }
+       if (!rx_ring->desc)
+               goto err;
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
        return 0;
-
-alloc_failed:
+err:
+       vfree(rx_ring->rx_buffer_info);
+       rx_ring->rx_buffer_info = NULL;
+       dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -5081,13 +5284,12 @@ alloc_failed:
  *
  * Return 0 on success, negative on failure
  **/
-
 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
 {
        int i, err = 0;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
+               err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
                if (!err)
                        continue;
                e_err(probe, "Allocation for Rx Queue %u failed\n", i);
@@ -5099,23 +5301,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
  * @tx_ring: Tx descriptor ring for a specific queue
  *
  * Free all transmit software resources
  **/
-void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
-                            struct ixgbe_ring *tx_ring)
+void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
-
-       ixgbe_clean_tx_ring(adapter, tx_ring);
+       ixgbe_clean_tx_ring(tx_ring);
 
        vfree(tx_ring->tx_buffer_info);
        tx_ring->tx_buffer_info = NULL;
 
-       dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
-                         tx_ring->dma);
+       /* if not set, then don't free */
+       if (!tx_ring->desc)
+               return;
+
+       dma_free_coherent(tx_ring->dev, tx_ring->size,
+                         tx_ring->desc, tx_ring->dma);
 
        tx_ring->desc = NULL;
 }
@@ -5132,28 +5334,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
 
        for (i = 0; i < adapter->num_tx_queues; i++)
                if (adapter->tx_ring[i]->desc)
-                       ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
+                       ixgbe_free_tx_resources(adapter->tx_ring[i]);
 }
 
 /**
  * ixgbe_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
  * @rx_ring: ring to clean the resources from
  *
  * Free all receive software resources
  **/
-void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
-                            struct ixgbe_ring *rx_ring)
+void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
-
-       ixgbe_clean_rx_ring(adapter, rx_ring);
+       ixgbe_clean_rx_ring(rx_ring);
 
        vfree(rx_ring->rx_buffer_info);
        rx_ring->rx_buffer_info = NULL;
 
-       dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
-                         rx_ring->dma);
+       /* if not set, then don't free */
+       if (!rx_ring->desc)
+               return;
+
+       dma_free_coherent(rx_ring->dev, rx_ring->size,
+                         rx_ring->desc, rx_ring->dma);
 
        rx_ring->desc = NULL;
 }
@@ -5170,7 +5372,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
 
        for (i = 0; i < adapter->num_rx_queues; i++)
                if (adapter->rx_ring[i]->desc)
-                       ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
+                       ixgbe_free_rx_resources(adapter->rx_ring[i]);
 }
 
 /**
@@ -5183,6 +5385,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* MTU < 68 is an error and causes problems on some kernels */
@@ -5193,6 +5396,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
        /* must set new MTU before calling down or up */
        netdev->mtu = new_mtu;
 
+       hw->fc.high_water = FC_HIGH_WATER(max_frame);
+       hw->fc.low_water = FC_LOW_WATER(max_frame);
+
        if (netif_running(netdev))
                ixgbe_reinit_locked(adapter);
 
@@ -5288,8 +5494,8 @@ static int ixgbe_close(struct net_device *netdev)
 #ifdef CONFIG_PM
 static int ixgbe_resume(struct pci_dev *pdev)
 {
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
        u32 err;
 
        pci_set_power_state(pdev, PCI_D0);
@@ -5320,7 +5526,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
 
        if (netif_running(netdev)) {
-               err = ixgbe_open(adapter->netdev);
+               err = ixgbe_open(netdev);
                if (err)
                        return err;
        }
@@ -5333,8 +5539,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
 {
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
        u32 ctrl, fctrl;
        u32 wufc = adapter->wol;
@@ -5351,6 +5557,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
                ixgbe_free_all_rx_resources(adapter);
        }
 
+       ixgbe_clear_interrupt_scheme(adapter);
+
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
        if (retval)
@@ -5377,15 +5585,20 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
                IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
        }
 
-       if (wufc && hw->mac.type == ixgbe_mac_82599EB)
-               pci_wake_from_d3(pdev, true);
-       else
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                pci_wake_from_d3(pdev, false);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               pci_wake_from_d3(pdev, !!wufc);
+               break;
+       default:
+               break;
+       }
 
        *enable_wake = !!wufc;
 
-       ixgbe_clear_interrupt_scheme(adapter);
-
        ixgbe_release_hw_control(adapter);
 
        pci_disable_device(pdev);
@@ -5434,10 +5647,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_hw_stats *hwstats = &adapter->stats;
        u64 total_mpc = 0;
        u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
-       u64 non_eop_descs = 0, restart_queue = 0;
-       struct ixgbe_hw_stats *hwstats = &adapter->stats;
+       u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
+       u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+       u64 bytes = 0, packets = 0;
 
        if (test_bit(__IXGBE_DOWN, &adapter->state) ||
            test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5450,21 +5665,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                        adapter->hw_rx_no_dma_resources +=
                                IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
                for (i = 0; i < adapter->num_rx_queues; i++) {
-                       rsc_count += adapter->rx_ring[i]->rsc_count;
-                       rsc_flush += adapter->rx_ring[i]->rsc_flush;
+                       rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
+                       rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
                }
                adapter->rsc_total_count = rsc_count;
                adapter->rsc_total_flush = rsc_flush;
        }
 
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+               non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+               alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+               alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+               bytes += rx_ring->stats.bytes;
+               packets += rx_ring->stats.packets;
+       }
+       adapter->non_eop_descs = non_eop_descs;
+       adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+       adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+       netdev->stats.rx_bytes = bytes;
+       netdev->stats.rx_packets = packets;
+
+       bytes = 0;
+       packets = 0;
        /* gather some stats to the adapter struct that are per queue */
-       for (i = 0; i < adapter->num_tx_queues; i++)
-               restart_queue += adapter->tx_ring[i]->restart_queue;
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+               restart_queue += tx_ring->tx_stats.restart_queue;
+               tx_busy += tx_ring->tx_stats.tx_busy;
+               bytes += tx_ring->stats.bytes;
+               packets += tx_ring->stats.packets;
+       }
        adapter->restart_queue = restart_queue;
-
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
-       adapter->non_eop_descs = non_eop_descs;
+       adapter->tx_busy = tx_busy;
+       netdev->stats.tx_bytes = bytes;
+       netdev->stats.tx_packets = packets;
 
        hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
        for (i = 0; i < 8; i++) {
@@ -5479,17 +5714,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
                hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
                hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
-               if (hw->mac.type == ixgbe_mac_82599EB) {
-                       hwstats->pxonrxc[i] +=
-                               IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
-                       hwstats->pxoffrxc[i] +=
-                               IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
-                       hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
-               } else {
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
                        hwstats->pxonrxc[i] +=
                                IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
-                       hwstats->pxoffrxc[i] +=
-                               IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+                       break;
+               case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+                       hwstats->pxonrxc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+                       break;
+               default:
+                       break;
                }
                hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
                hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
@@ -5498,21 +5734,25 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        /* work around hardware counting issue */
        hwstats->gprc -= missed_rx;
 
+       ixgbe_update_xoff_received(adapter);
+
        /* 82598 hardware only has a 32 bit counter in the high register */
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               u64 tmp;
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+               hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+               hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+               hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
-               tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
-                                               /* 4 high bits of GORC */
-               hwstats->gorc += (tmp << 32);
+               IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
                hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
-               tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
-                                               /* 4 high bits of GOTC */
-               hwstats->gotc += (tmp << 32);
+               IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
                hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
-               IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
+               IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
                hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
-               hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
                hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
                hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
 #ifdef IXGBE_FCOE
@@ -5523,12 +5763,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
                hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
 #endif /* IXGBE_FCOE */
-       } else {
-               hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
-               hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
-               hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
-               hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
-               hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+               break;
+       default:
+               break;
        }
        bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
        hwstats->bprc += bprc;
@@ -5701,8 +5938,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
 
        if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
                for (i = 0; i < adapter->num_tx_queues; i++)
-                       set_bit(__IXGBE_FDIR_INIT_DONE,
-                               &(adapter->tx_ring[i]->reinit_state));
+                       set_bit(__IXGBE_TX_FDIR_INIT_DONE,
+                               &(adapter->tx_ring[i]->state));
        } else {
                e_err(probe, "failed to finish FDIR re-initialization, "
                      "ignored adding FDIR ATR filters\n");
@@ -5764,17 +6001,27 @@ static void ixgbe_watchdog_task(struct work_struct *work)
                if (!netif_carrier_ok(netdev)) {
                        bool flow_rx, flow_tx;
 
-                       if (hw->mac.type == ixgbe_mac_82599EB) {
-                               u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-                               u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
-                               flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
-                               flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
-                       } else {
+                       switch (hw->mac.type) {
+                       case ixgbe_mac_82598EB: {
                                u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
                                u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
                                flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
                                flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
                        }
+                               break;
+                       case ixgbe_mac_82599EB:
+                       case ixgbe_mac_X540: {
+                               u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+                               u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+                               flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
+                               flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
+                       }
+                               break;
+                       default:
+                               flow_tx = false;
+                               flow_rx = false;
+                               break;
+                       }
 
                        e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
                               (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5788,7 +6035,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
                        netif_carrier_on(netdev);
                } else {
                        /* Force detection of hung controller */
-                       adapter->detect_tx_hung = true;
+                       for (i = 0; i < adapter->num_tx_queues; i++) {
+                               tx_ring = adapter->tx_ring[i];
+                               set_check_for_tx_hang(tx_ring);
+                       }
                }
        } else {
                adapter->link_up = false;
@@ -6000,15 +6250,17 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        struct ixgbe_ring *tx_ring,
                        struct sk_buff *skb, u32 tx_flags,
-                       unsigned int first)
+                       unsigned int first, const u8 hdr_len)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = tx_ring->dev;
        struct ixgbe_tx_buffer *tx_buffer_info;
        unsigned int len;
        unsigned int total = skb->len;
        unsigned int offset = 0, size, count = 0, i;
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int f;
+       unsigned int bytecount = skb->len;
+       u16 gso_segs = 1;
 
        i = tx_ring->next_to_use;
 
@@ -6023,10 +6275,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 
                tx_buffer_info->length = size;
                tx_buffer_info->mapped_as_page = false;
-               tx_buffer_info->dma = dma_map_single(&pdev->dev,
+               tx_buffer_info->dma = dma_map_single(dev,
                                                     skb->data + offset,
                                                     size, DMA_TO_DEVICE);
-               if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+               if (dma_mapping_error(dev, tx_buffer_info->dma))
                        goto dma_error;
                tx_buffer_info->time_stamp = jiffies;
                tx_buffer_info->next_to_watch = i;
@@ -6059,12 +6311,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
 
                        tx_buffer_info->length = size;
-                       tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
+                       tx_buffer_info->dma = dma_map_page(dev,
                                                           frag->page,
                                                           offset, size,
                                                           DMA_TO_DEVICE);
                        tx_buffer_info->mapped_as_page = true;
-                       if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+                       if (dma_mapping_error(dev, tx_buffer_info->dma))
                                goto dma_error;
                        tx_buffer_info->time_stamp = jiffies;
                        tx_buffer_info->next_to_watch = i;
@@ -6078,6 +6330,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        break;
        }
 
+       if (tx_flags & IXGBE_TX_FLAGS_TSO)
+               gso_segs = skb_shinfo(skb)->gso_segs;
+#ifdef IXGBE_FCOE
+       /* adjust for FCoE Sequence Offload */
+       else if (tx_flags & IXGBE_TX_FLAGS_FSO)
+               gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+                                       skb_shinfo(skb)->gso_size);
+#endif /* IXGBE_FCOE */
+       bytecount += (gso_segs - 1) * hdr_len;
+
+       /* multiply data chunks by size of headers */
+       tx_ring->tx_buffer_info[i].bytecount = bytecount;
+       tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
        tx_ring->tx_buffer_info[i].skb = skb;
        tx_ring->tx_buffer_info[first].next_to_watch = i;
 
@@ -6099,14 +6364,13 @@ dma_error:
                        i += tx_ring->count;
                i--;
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
        }
 
        return 0;
 }
 
-static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
-                          struct ixgbe_ring *tx_ring,
+static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
                           int tx_flags, int count, u32 paylen, u8 hdr_len)
 {
        union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -6171,60 +6435,46 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
        wmb();
 
        tx_ring->next_to_use = i;
-       writel(i, adapter->hw.hw_addr + tx_ring->tail);
+       writel(i, tx_ring->tail);
 }
 
 static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
-                     int queue, u32 tx_flags, __be16 protocol)
+                     u8 queue, u32 tx_flags, __be16 protocol)
 {
        struct ixgbe_atr_input atr_input;
-       struct tcphdr *th;
        struct iphdr *iph = ip_hdr(skb);
        struct ethhdr *eth = (struct ethhdr *)skb->data;
-       u16 vlan_id, src_port, dst_port, flex_bytes;
-       u32 src_ipv4_addr, dst_ipv4_addr;
-       u8 l4type = 0;
+       struct tcphdr *th;
+       u16 vlan_id;
 
-       /* Right now, we support IPv4 only */
-       if (protocol != htons(ETH_P_IP))
-               return;
-       /* check if we're UDP or TCP */
-       if (iph->protocol == IPPROTO_TCP) {
-               th = tcp_hdr(skb);
-               src_port = th->source;
-               dst_port = th->dest;
-               l4type |= IXGBE_ATR_L4TYPE_TCP;
-               /* l4type IPv4 type is 0, no need to assign */
-       } else {
-               /* Unsupported L4 header, just bail here */
+       /* Right now, we support IPv4 w/ TCP only */
+       if (protocol != htons(ETH_P_IP) ||
+           iph->protocol != IPPROTO_TCP)
                return;
-       }
 
        memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
 
        vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
                   IXGBE_TX_FLAGS_VLAN_SHIFT;
-       src_ipv4_addr = iph->saddr;
-       dst_ipv4_addr = iph->daddr;
-       flex_bytes = eth->h_proto;
+
+       th = tcp_hdr(skb);
 
        ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
-       ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
-       ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
-       ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
-       ixgbe_atr_set_l4type_82599(&atr_input, l4type);
+       ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
+       ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
+       ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
+       ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
        /* src and dst are inverted, think how the receiver sees them */
-       ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
-       ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
+       ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
+       ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
 
        /* This assumes the Rx queue and Tx queue are bound to the same CPU */
        ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
 }
 
-static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
-                                struct ixgbe_ring *tx_ring, int size)
+static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
 {
-       netif_stop_subqueue(netdev, tx_ring->queue_index);
+       netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
        /* Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
         * but since that doesn't exist yet, just open code it. */
@@ -6236,17 +6486,16 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
                return -EBUSY;
 
        /* A reprieve! - use start_queue because it doesn't call schedule */
-       netif_start_subqueue(netdev, tx_ring->queue_index);
-       ++tx_ring->restart_queue;
+       netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+       ++tx_ring->tx_stats.restart_queue;
        return 0;
 }
 
-static int ixgbe_maybe_stop_tx(struct net_device *netdev,
-                             struct ixgbe_ring *tx_ring, int size)
+static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
 {
        if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
                return 0;
-       return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
+       return __ixgbe_maybe_stop_tx(tx_ring, size);
 }
 
 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
@@ -6291,10 +6540,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
        return skb_tx_hash(dev, skb);
 }
 
-netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                          struct ixgbe_adapter *adapter,
                          struct ixgbe_ring *tx_ring)
 {
+       struct net_device *netdev = tx_ring->netdev;
        struct netdev_queue *txq;
        unsigned int first;
        unsigned int tx_flags = 0;
@@ -6352,8 +6602,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 
-       if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
-               adapter->tx_busy++;
+       if (ixgbe_maybe_stop_tx(tx_ring, count)) {
+               tx_ring->tx_stats.tx_busy++;
                return NETDEV_TX_BUSY;
        }
 
@@ -6387,14 +6637,14 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
                        tx_flags |= IXGBE_TX_FLAGS_CSUM;
        }
 
-       count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
+       count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
        if (count) {
                /* add the ATR filter if ATR is on */
                if (tx_ring->atr_sample_rate) {
                        ++tx_ring->atr_count;
                        if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
-                            test_bit(__IXGBE_FDIR_INIT_DONE,
-                                     &tx_ring->reinit_state)) {
+                            test_bit(__IXGBE_TX_FDIR_INIT_DONE,
+                                     &tx_ring->state)) {
                                ixgbe_atr(adapter, skb, tx_ring->queue_index,
                                          tx_flags, protocol);
                                tx_ring->atr_count = 0;
@@ -6403,9 +6653,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
                txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
                txq->tx_bytes += skb->len;
                txq->tx_packets++;
-               ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
-                              hdr_len);
-               ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+               ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
+               ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
        } else {
                dev_kfree_skb_any(skb);
@@ -6422,7 +6671,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netd
        struct ixgbe_ring *tx_ring;
 
        tx_ring = adapter->tx_ring[skb->queue_mapping];
-       return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
+       return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
 }
 
 /**
@@ -6563,20 +6812,23 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
 
        /* accurate rx/tx bytes/packets stats */
        dev_txq_stats_fold(netdev, stats);
+       rcu_read_lock();
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct ixgbe_ring *ring = adapter->rx_ring[i];
+               struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
                u64 bytes, packets;
                unsigned int start;
 
-               do {
-                       start = u64_stats_fetch_begin_bh(&ring->syncp);
-                       packets = ring->stats.packets;
-                       bytes   = ring->stats.bytes;
-               } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
-               stats->rx_packets += packets;
-               stats->rx_bytes   += bytes;
+               if (ring) {
+                       do {
+                               start = u64_stats_fetch_begin_bh(&ring->syncp);
+                               packets = ring->stats.packets;
+                               bytes   = ring->stats.bytes;
+                       } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+                       stats->rx_packets += packets;
+                       stats->rx_bytes   += bytes;
+               }
        }
-
+       rcu_read_unlock();
        /* following stats updated by ixgbe_watchdog_task() */
        stats->multicast        = netdev->stats.multicast;
        stats->rx_errors        = netdev->stats.rx_errors;
@@ -6758,8 +7010,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
-       pci_set_drvdata(pdev, netdev);
        adapter = netdev_priv(netdev);
+       pci_set_drvdata(pdev, adapter);
 
        adapter->netdev = netdev;
        adapter->pdev = pdev;
@@ -6832,8 +7084,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_sw_init;
 
        /* Make it possible the adapter to be woken up via WOL */
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+               break;
+       default:
+               break;
+       }
 
        /*
         * If there is a fan on this device and it has failed log the
@@ -6942,7 +7200,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        }
 
        /* power down the optics */
-       if (hw->phy.multispeed_fiber)
+       if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser)
                hw->mac.ops.disable_tx_laser(hw);
 
        init_timer(&adapter->watchdog_timer);
@@ -6957,6 +7215,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_sw_init;
 
        switch (pdev->device) {
+       case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+               /* All except this subdevice support WOL */
+               if (pdev->subsystem_device ==
+                   IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
+                       adapter->wol = 0;
+                       break;
+               }
        case IXGBE_DEV_ID_82599_KX4:
                adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
                                IXGBE_WUFC_MC | IXGBE_WUFC_BC);
@@ -7082,8 +7347,8 @@ err_dma:
  **/
 static void __devexit ixgbe_remove(struct pci_dev *pdev)
 {
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
 
        set_bit(__IXGBE_DOWN, &adapter->state);
        /* clear the module not found bit to make sure the worker won't
@@ -7153,8 +7418,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
                                                pci_channel_state_t state)
 {
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
 
        netif_device_detach(netdev);
 
@@ -7177,8 +7442,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
  */
 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
 {
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        pci_ers_result_t result;
        int err;
 
@@ -7216,8 +7480,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
  */
 static void ixgbe_io_resume(struct pci_dev *pdev)
 {
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
 
        if (netif_running(netdev)) {
                if (ixgbe_up(adapter)) {
@@ -7282,6 +7546,7 @@ static void __exit ixgbe_exit_module(void)
        dca_unregister_notify(&dca_notifier);
 #endif
        pci_unregister_driver(&ixgbe_driver);
+       rcu_barrier(); /* Wait for completion of call_rcu()'s */
 }
 
 #ifdef CONFIG_IXGBE_DCA
index 471f0f2cdb98976ac3d6785a134a9d8b5a2f3087..027c628c3aaed040dce66a720179ea86efc0a6bd 100644 (file)
@@ -319,8 +319,14 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
        u32 vflre = 0;
        s32 ret_val = IXGBE_ERR_MBX;
 
-       if (hw->mac.type == ixgbe_mac_82599EB)
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+               break;
+       default:
+               break;
+       }
 
        if (vflre & (1 << vf_shift)) {
                ret_val = 0;
@@ -439,22 +445,26 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
 
-       if (hw->mac.type != ixgbe_mac_82599EB)
-               return;
-
-       mbx->timeout = 0;
-       mbx->usec_delay = 0;
-
-       mbx->size = IXGBE_VFMAILBOX_SIZE;
-
-       mbx->stats.msgs_tx = 0;
-       mbx->stats.msgs_rx = 0;
-       mbx->stats.reqs = 0;
-       mbx->stats.acks = 0;
-       mbx->stats.rsts = 0;
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               mbx->timeout = 0;
+               mbx->usec_delay = 0;
+
+               mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+               mbx->stats.msgs_tx = 0;
+               mbx->stats.msgs_rx = 0;
+               mbx->stats.reqs = 0;
+               mbx->stats.acks = 0;
+               mbx->stats.rsts = 0;
+               break;
+       default:
+               break;
+       }
 }
 
-struct ixgbe_mbx_operations mbx_ops_82599 = {
+struct ixgbe_mbx_operations mbx_ops_generic = {
        .read                   = ixgbe_read_mbx_pf,
        .write                  = ixgbe_write_mbx_pf,
        .read_posted            = ixgbe_read_posted_mbx,
index 7e0d08ff5b53f53cad83da3db0ef96981cbc1eb6..3df9b15902186e18879fe51fccd5d327fa7e2c0f 100644 (file)
@@ -88,6 +88,6 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
 s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
 
-extern struct ixgbe_mbx_operations mbx_ops_82599;
+extern struct ixgbe_mbx_operations mbx_ops_generic;
 
 #endif /* _IXGBE_MBX_H_ */
index 6c0d42e33f21770092668b3c3405223c3e55d09b..c445fbce56ee7bed1543bafcf58349fb731e42eb 100644 (file)
@@ -115,6 +115,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
        case TN1010_PHY_ID:
                phy_type = ixgbe_phy_tn;
                break;
+       case AQ1202_PHY_ID:
+               phy_type = ixgbe_phy_aq;
+               break;
        case QT2022_PHY_ID:
                phy_type = ixgbe_phy_qt;
                break;
@@ -424,6 +427,39 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
        return 0;
 }
 
+/**
+ * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ */
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+                                               ixgbe_link_speed *speed,
+                                               bool *autoneg)
+{
+       s32 status = IXGBE_ERR_LINK_SETUP;
+       u16 speed_ability;
+
+       *speed = 0;
+       *autoneg = true;
+
+       status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
+                                     &speed_ability);
+
+       if (status == 0) {
+               if (speed_ability & MDIO_SPEED_10G)
+                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+               if (speed_ability & MDIO_PMA_SPEED_1000)
+                       *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+               if (speed_ability & MDIO_PMA_SPEED_100)
+                       *speed |= IXGBE_LINK_SPEED_100_FULL;
+       }
+
+       return status;
+}
+
 /**
  *  ixgbe_reset_phy_nl - Performs a PHY reset
  *  @hw: pointer to hardware structure
@@ -1377,6 +1413,22 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
        return status;
 }
 
+/**
+ *  ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
+ *  @hw: pointer to hardware structure
+ *  @firmware_version: pointer to the PHY Firmware Version
+**/
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+                                           u16 *firmware_version)
+{
+       s32 status = 0;
+
+       status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1,
+                                     firmware_version);
+
+       return status;
+}
+
 /**
  *  ixgbe_tn_check_overtemp - Checks if an overtemp occured.
  *  @hw: pointer to hardware structure
index fb3898f12fc5519b1d01f2705323ef4271ef2730..e2c6b7eac641d069e45fb3fdc6ea4aad58d88e87 100644 (file)
@@ -96,6 +96,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
                                        ixgbe_link_speed speed,
                                        bool autoneg,
                                        bool autoneg_wait_to_complete);
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+                                               ixgbe_link_speed *speed,
+                                               bool *autoneg);
 
 /* PHY specific */
 s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
@@ -103,6 +106,8 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
                              bool *link_up);
 s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
                                        u16 *firmware_version);
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+                                           u16 *firmware_version);
 
 s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
index 5428153af8f31a83ca56d2beb8dea42b121d6462..6e3e94b5a5f6273f704ed7d87695aa4436d34be1 100644 (file)
@@ -68,7 +68,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
         * addresses
         */
        for (i = 0; i < entries; i++) {
-               vfinfo->vf_mc_hashes[i] = hash_list[i];;
+               vfinfo->vf_mc_hashes[i] = hash_list[i];
        }
 
        for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
@@ -178,8 +178,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 {
        unsigned char vf_mac_addr[6];
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        unsigned int vfn = (event_mask & 0x3f);
 
        bool enable = ((event_mask & 0x10000000U) != 0);
index d3cc6ce7c973a3c71855588029e57ab7dcd82ef6..42c607339a6210d6faa9cfed5e83a115cc87bbe0 100644 (file)
@@ -57,6 +57,8 @@
 #define IXGBE_DEV_ID_82599_SFP_EM        0x1507
 #define IXGBE_DEV_ID_82599_XAUI_LOM      0x10FC
 #define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ  0x000C
+#define IXGBE_DEV_ID_X540T               0x1528
 
 /* General Registers */
 #define IXGBE_CTRL      0x00000
 /* PHY IDs*/
 #define TN1010_PHY_ID    0x00A19410
 #define TNX_FW_REV       0xB
+#define AQ1202_PHY_ID    0x03A1B440
 #define QT2022_PHY_ID    0x0043A400
 #define ATH_PHY_ID       0x03429050
+#define AQ_FW_REV        0x20
 
 /* PHY Types */
 #define IXGBE_M88E1145_E_PHY_ID  0x01410CD0
 #define IXGBE_EEC_PRES      0x00000100 /* EEPROM Present */
 #define IXGBE_EEC_ARD       0x00000200 /* EEPROM Auto Read Done */
 #define IXGBE_EEC_FLUP      0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL   0x02000000 /* Sector 1 Valid */
 #define IXGBE_EEC_FLUDONE   0x04000000 /* Flash update done */
 /* EEPROM Addressing bits based on type (0-small, 1-large) */
 #define IXGBE_EEC_ADDR_SIZE 0x00000400
 #define IXGBE_EEPROM_SUM        0xBABA
 #define IXGBE_PCIE_ANALOG_PTR   0x03
 #define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_PHY_PTR           0x04
 #define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_OPTION_ROM_PTR    0x05
 #define IXGBE_PCIE_GENERAL_PTR  0x06
 #define IXGBE_PCIE_CONFIG0_PTR  0x07
 #define IXGBE_PCIE_CONFIG1_PTR  0x08
@@ -2113,6 +2120,14 @@ typedef u32 ixgbe_physical_layer;
 #define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
 #define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
 
+/* Flow Control Macros */
+#define PAUSE_RTT      8
+#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024)
+
+#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\
+                               PAUSE_MTU(MTU))
+#define FC_LOW_WATER(MTU)  (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
+
 /* Software ATR hash keys */
 #define IXGBE_ATR_BUCKET_HASH_KEY    0xE214AD3D
 #define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
@@ -2164,6 +2179,7 @@ struct ixgbe_atr_input_masks {
 enum ixgbe_eeprom_type {
        ixgbe_eeprom_uninitialized = 0,
        ixgbe_eeprom_spi,
+       ixgbe_flash,
        ixgbe_eeprom_none /* No NVM support */
 };
 
@@ -2171,12 +2187,14 @@ enum ixgbe_mac_type {
        ixgbe_mac_unknown = 0,
        ixgbe_mac_82598EB,
        ixgbe_mac_82599EB,
+       ixgbe_mac_X540,
        ixgbe_num_macs
 };
 
 enum ixgbe_phy_type {
        ixgbe_phy_unknown = 0,
        ixgbe_phy_tn,
+       ixgbe_phy_aq,
        ixgbe_phy_cu_unknown,
        ixgbe_phy_qt,
        ixgbe_phy_xaui,
@@ -2405,6 +2423,7 @@ struct ixgbe_eeprom_operations {
        s32 (*write)(struct ixgbe_hw *, u16, u16);
        s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
        s32 (*update_checksum)(struct ixgbe_hw *);
+       u16 (*calc_checksum)(struct ixgbe_hw *);
 };
 
 struct ixgbe_mac_operations {
@@ -2574,6 +2593,7 @@ struct ixgbe_hw {
        u16                             subsystem_vendor_id;
        u8                              revision_id;
        bool                            adapter_stopped;
+       bool                            force_full_reset;
 };
 
 struct ixgbe_info {
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
new file mode 100644 (file)
index 0000000..9649fa7
--- /dev/null
@@ -0,0 +1,722 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2010 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "ixgbe.h"
+#include "ixgbe_phy.h"
+//#include "ixgbe_mbx.h"
+
+#define IXGBE_X540_MAX_TX_QUEUES 128
+#define IXGBE_X540_MAX_RX_QUEUES 128
+#define IXGBE_X540_RAR_ENTRIES   128
+#define IXGBE_X540_MC_TBL_SIZE   128
+#define IXGBE_X540_VFT_TBL_SIZE  128
+
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
+
+static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+{
+       return ixgbe_media_type_copper;
+}
+
+static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mac_info *mac = &hw->mac;
+
+       /* Call PHY identify routine to get the phy type */
+       ixgbe_identify_phy_generic(hw);
+
+       mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
+       mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
+       mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
+       mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
+       mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
+       mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+       return 0;
+}
+
+/**
+ *  ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: true if autonegotiation enabled
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ **/
+static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed speed, bool autoneg,
+                                     bool autoneg_wait_to_complete)
+{
+       return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+                                           autoneg_wait_to_complete);
+}
+
+/**
+ *  ixgbe_reset_hw_X540 - Perform hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks
+ *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ *  reset.
+ **/
+static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+{
+       ixgbe_link_speed link_speed;
+       s32 status = 0;
+       u32 ctrl;
+       u32 ctrl_ext;
+       u32 reset_bit;
+       u32 i;
+       u32 autoc;
+       u32 autoc2;
+       bool link_up = false;
+
+       /* Call adapter stop to disable tx/rx and clear interrupts */
+       hw->mac.ops.stop_adapter(hw);
+
+       /*
+        * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+        * access and verify no pending requests before reset
+        */
+       status = ixgbe_disable_pcie_master(hw);
+       if (status != 0) {
+               status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+               hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
+       }
+
+       /*
+        * Issue global reset to the MAC.  Needs to be SW reset if link is up.
+        * If link reset is used when link is up, it might reset the PHY when
+        * mng is using it.  If link is down or the flag to force full link
+        * reset is set, then perform link reset.
+        */
+       if (hw->force_full_reset) {
+               reset_bit = IXGBE_CTRL_LNK_RST;
+       } else {
+               hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+               if (!link_up)
+                       reset_bit = IXGBE_CTRL_LNK_RST;
+               else
+                       reset_bit = IXGBE_CTRL_RST;
+       }
+
+       ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* Poll for reset bit to self-clear indicating reset is complete */
+       for (i = 0; i < 10; i++) {
+               udelay(1);
+               ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+               if (!(ctrl & IXGBE_CTRL_RST))
+                       break;
+       }
+       if (ctrl & IXGBE_CTRL_RST) {
+               status = IXGBE_ERR_RESET_FAILED;
+               hw_dbg(hw, "Reset polling failed to complete.\n");
+       }
+
+       /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
+       ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+       ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+       msleep(50);
+
+       /* Set the Rx packet buffer size. */
+       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+       /* Store the permanent mac address */
+       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+       /*
+        * Store the original AUTOC/AUTOC2 values if they have not been
+        * stored off yet.  Otherwise restore the stored original
+        * values since the reset operation sets back to defaults.
+        */
+       autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+       if (hw->mac.orig_link_settings_stored == false) {
+               hw->mac.orig_autoc = autoc;
+               hw->mac.orig_autoc2 = autoc2;
+               hw->mac.orig_link_settings_stored = true;
+       } else {
+               if (autoc != hw->mac.orig_autoc)
+                       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
+                                       IXGBE_AUTOC_AN_RESTART));
+
+               if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
+                   (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
+                       autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
+                       autoc2 |= (hw->mac.orig_autoc2 &
+                                  IXGBE_AUTOC2_UPPER_MASK);
+                       IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+               }
+       }
+
+       /*
+        * Store MAC address from RAR0, clear receive address registers, and
+        * clear the multicast table.  Also reset num_rar_entries to 128,
+        * since we modify this value when programming the SAN MAC address.
+        */
+       hw->mac.num_rar_entries = 128;
+       hw->mac.ops.init_rx_addrs(hw);
+
+       /* Store the permanent mac address */
+       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+       /* Store the permanent SAN mac address */
+       hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+       /* Add the SAN MAC address to the RAR only if it's a valid address */
+       if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+               hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+               /* Reserve the last RAR for the SAN MAC address */
+               hw->mac.num_rar_entries--;
+       }
+
+       /* Store the alternative WWNN/WWPN prefix */
+       hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+                                  &hw->mac.wwpn_prefix);
+
+       return status;
+}
+
+/**
+ *  ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current configuration.
+ **/
+static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+{
+       u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+       u16 ext_ability = 0;
+
+       hw->phy.ops.identify(hw);
+
+       hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
+                            &ext_ability);
+       if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
+               physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+       if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
+               physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+       if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
+               physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+
+       return physical_layer;
+}
+
+/**
+ * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ **/
+static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+{
+       struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+       u32 eec;
+       u16 eeprom_size;
+
+       if (eeprom->type == ixgbe_eeprom_uninitialized) {
+               eeprom->semaphore_delay = 10;
+               eeprom->type = ixgbe_flash;
+
+               eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+               eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+                                   IXGBE_EEC_SIZE_SHIFT);
+               eeprom->word_size = 1 << (eeprom_size +
+                                         IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+               hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
+                       eeprom->type, eeprom->word_size);
+       }
+
+       return 0;
+}
+
+/**
+ * ixgbe_read_eerd_X540 - Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EERPOM
+ **/
+static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+       s32 status;
+
+       if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM))
+               status = ixgbe_read_eerd_generic(hw, offset, data);
+       else
+               status = IXGBE_ERR_SWFW_SYNC;
+
+       ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
+       return status;
+}
+
+/**
+ * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of  word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+       u32 eewr;
+       s32 status;
+
+       hw->eeprom.ops.init_params(hw);
+
+       if (offset >= hw->eeprom.word_size) {
+               status = IXGBE_ERR_EEPROM;
+               goto out;
+       }
+
+       eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+              (data << IXGBE_EEPROM_RW_REG_DATA) |
+              IXGBE_EEPROM_RW_REG_START;
+
+       if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM)) {
+               status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+               if (status != 0) {
+                       hw_dbg(hw, "Eeprom write EEWR timed out\n");
+                       goto out;
+               }
+
+               IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+
+               status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+               if (status != 0) {
+                       hw_dbg(hw, "Eeprom write EEWR timed out\n");
+                       goto out;
+               }
+       } else {
+               status = IXGBE_ERR_SWFW_SYNC;
+       }
+
+out:
+       ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
+       return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ **/
+static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+       u16 i;
+       u16 j;
+       u16 checksum = 0;
+       u16 length = 0;
+       u16 pointer = 0;
+       u16 word = 0;
+
+       /* Include 0x0-0x3F in the checksum */
+       for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+               if (hw->eeprom.ops.read(hw, i, &word) != 0) {
+                       hw_dbg(hw, "EEPROM read failed\n");
+                       break;
+               }
+               checksum += word;
+       }
+
+       /*
+        * Include all data from pointers 0x3, 0x6-0xE.  This excludes the
+        * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+        */
+       for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+               if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+                       continue;
+
+               if (hw->eeprom.ops.read(hw, i, &pointer) != 0) {
+                       hw_dbg(hw, "EEPROM read failed\n");
+                       break;
+               }
+
+               /* Skip pointer section if the pointer is invalid. */
+               if (pointer == 0xFFFF || pointer == 0 ||
+                   pointer >= hw->eeprom.word_size)
+                       continue;
+
+               if (hw->eeprom.ops.read(hw, pointer, &length) != 0) {
+                       hw_dbg(hw, "EEPROM read failed\n");
+                       break;
+               }
+
+               /* Skip pointer section if length is invalid. */
+               if (length == 0xFFFF || length == 0 ||
+                   (pointer + length) >= hw->eeprom.word_size)
+                       continue;
+
+               for (j = pointer+1; j <= pointer+length; j++) {
+                       if (hw->eeprom.ops.read(hw, j, &word) != 0) {
+                               hw_dbg(hw, "EEPROM read failed\n");
+                               break;
+                       }
+                       checksum += word;
+               }
+       }
+
+       checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+       return checksum;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+       s32 status;
+
+       status = ixgbe_update_eeprom_checksum_generic(hw);
+
+       if (status)
+               status = ixgbe_update_flash_X540(hw);
+
+       return status;
+}
+
+/**
+ * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
+ * EEPROM from shadow RAM to the flash device.
+ **/
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+{
+       u32 flup;
+       s32 status = IXGBE_ERR_EEPROM;
+
+       status = ixgbe_poll_flash_update_done_X540(hw);
+       if (status == IXGBE_ERR_EEPROM) {
+               hw_dbg(hw, "Flash update time out\n");
+               goto out;
+       }
+
+       flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
+       IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+
+       status = ixgbe_poll_flash_update_done_X540(hw);
+       if (status)
+               hw_dbg(hw, "Flash update complete\n");
+       else
+               hw_dbg(hw, "Flash update time out\n");
+
+       if (hw->revision_id == 0) {
+               flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+               if (flup & IXGBE_EEC_SEC1VAL) {
+                       flup |= IXGBE_EEC_FLUP;
+                       IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+               }
+
+               status = ixgbe_poll_flash_update_done_X540(hw);
+               if (status)
+                       hw_dbg(hw, "Flash update complete\n");
+               else
+                       hw_dbg(hw, "Flash update time out\n");
+
+       }
+out:
+       return status;
+}
+
+/**
+ * ixgbe_poll_flash_update_done_X540 - Poll flash update status
+ * @hw: pointer to hardware structure
+ *
+ * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
+ * flash update is done.
+ **/
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+{
+       u32 i;
+       u32 reg;
+       s32 status = IXGBE_ERR_EEPROM;
+
+       for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
+               reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+               if (reg & IXGBE_EEC_FLUDONE) {
+                       status = 0;
+                       break;
+               }
+               udelay(5);
+       }
+       return status;
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
+ * the specified function (CSR, PHY0, PHY1, NVM, Flash)
+ **/
+static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+       u32 swfw_sync;
+       u32 swmask = mask;
+       u32 fwmask = mask << 5;
+       u32 hwmask = 0;
+       u32 timeout = 200;
+       u32 i;
+
+       if (swmask == IXGBE_GSSR_EEP_SM)
+               hwmask = IXGBE_GSSR_FLASH_SM;
+
+       for (i = 0; i < timeout; i++) {
+               /*
+                * SW NVM semaphore bit is used for access to all
+                * SW_FW_SYNC bits (not just NVM)
+                */
+               if (ixgbe_get_swfw_sync_semaphore(hw))
+                       return IXGBE_ERR_SWFW_SYNC;
+
+               swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+               if (!(swfw_sync & (fwmask | swmask | hwmask))) {
+                       swfw_sync |= swmask;
+                       IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+                       ixgbe_release_swfw_sync_semaphore(hw);
+                       break;
+               } else {
+                       /*
+                        * Firmware currently using resource (fwmask),
+                        * hardware currently using resource (hwmask),
+                        * or other software thread currently using
+                        * resource (swmask)
+                        */
+                       ixgbe_release_swfw_sync_semaphore(hw);
+                       msleep(5);
+               }
+       }
+
+       /*
+        * If the resource is not released by the FW/HW the SW can assume that
+        * the FW/HW malfunctions. In that case the SW should sets the
+        * SW bit(s) of the requested resource(s) while ignoring the
+        * corresponding FW/HW bits in the SW_FW_SYNC register.
+        */
+       if (i >= timeout) {
+               swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+               if (swfw_sync & (fwmask | hwmask)) {
+                       if (ixgbe_get_swfw_sync_semaphore(hw))
+                               return IXGBE_ERR_SWFW_SYNC;
+
+                       swfw_sync |= swmask;
+                       IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+                       ixgbe_release_swfw_sync_semaphore(hw);
+               }
+       }
+
+       msleep(5);
+       return 0;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore throught the SW_FW_SYNC register
+ * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
+ **/
+static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+       u32 swfw_sync;
+       u32 swmask = mask;
+
+       ixgbe_get_swfw_sync_semaphore(hw);
+
+       swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+       swfw_sync &= ~swmask;
+       IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+
+       ixgbe_release_swfw_sync_semaphore(hw);
+       msleep(5);
+}
+
+/**
+ * ixgbe_get_nvm_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so SW/FW can gain control of shared resources
+ **/
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_ERR_EEPROM;
+       u32 timeout = 2000;
+       u32 i;
+       u32 swsm;
+
+       /* Get SMBI software semaphore between device drivers first */
+       for (i = 0; i < timeout; i++) {
+               /*
+                * If the SMBI bit is 0 when we read it, then the bit will be
+                * set and we have the semaphore
+                */
+               swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+               if (!(swsm & IXGBE_SWSM_SMBI)) {
+                       status = 0;
+                       break;
+               }
+               udelay(50);
+       }
+
+       /* Now get the semaphore between SW/FW through the REGSMP bit */
+       if (status) {
+               for (i = 0; i < timeout; i++) {
+                       swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+                       if (!(swsm & IXGBE_SWFW_REGSMP))
+                               break;
+
+                       udelay(50);
+               }
+       } else {
+               hw_dbg(hw, "Software semaphore SMBI between device drivers "
+                          "not granted.\n");
+       }
+
+       return status;
+}
+
+/**
+ * ixgbe_release_nvm_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+        u32 swsm;
+
+       /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
+
+       swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+       swsm &= ~IXGBE_SWSM_SMBI;
+       IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+       swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+       swsm &= ~IXGBE_SWFW_REGSMP;
+       IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+
+       IXGBE_WRITE_FLUSH(hw);
+}
+
+static struct ixgbe_mac_operations mac_ops_X540 = {
+       .init_hw                = &ixgbe_init_hw_generic,
+       .reset_hw               = &ixgbe_reset_hw_X540,
+       .start_hw               = &ixgbe_start_hw_generic,
+       .clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic,
+       .get_media_type         = &ixgbe_get_media_type_X540,
+       .get_supported_physical_layer =
+                                  &ixgbe_get_supported_physical_layer_X540,
+       .enable_rx_dma          = &ixgbe_enable_rx_dma_generic,
+       .get_mac_addr           = &ixgbe_get_mac_addr_generic,
+       .get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
+       .get_device_caps        = NULL,
+       .get_wwn_prefix         = &ixgbe_get_wwn_prefix_generic,
+       .stop_adapter           = &ixgbe_stop_adapter_generic,
+       .get_bus_info           = &ixgbe_get_bus_info_generic,
+       .set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie,
+       .read_analog_reg8       = NULL,
+       .write_analog_reg8      = NULL,
+       .setup_link             = &ixgbe_setup_mac_link_X540,
+       .check_link             = &ixgbe_check_mac_link_generic,
+       .get_link_capabilities  = &ixgbe_get_copper_link_capabilities_generic,
+       .led_on                 = &ixgbe_led_on_generic,
+       .led_off                = &ixgbe_led_off_generic,
+       .blink_led_start        = &ixgbe_blink_led_start_generic,
+       .blink_led_stop         = &ixgbe_blink_led_stop_generic,
+       .set_rar                = &ixgbe_set_rar_generic,
+       .clear_rar              = &ixgbe_clear_rar_generic,
+       .set_vmdq               = &ixgbe_set_vmdq_generic,
+       .clear_vmdq             = &ixgbe_clear_vmdq_generic,
+       .init_rx_addrs          = &ixgbe_init_rx_addrs_generic,
+       .update_uc_addr_list    = &ixgbe_update_uc_addr_list_generic,
+       .update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic,
+       .enable_mc              = &ixgbe_enable_mc_generic,
+       .disable_mc             = &ixgbe_disable_mc_generic,
+       .clear_vfta             = &ixgbe_clear_vfta_generic,
+       .set_vfta               = &ixgbe_set_vfta_generic,
+       .fc_enable              = &ixgbe_fc_enable_generic,
+       .init_uta_tables        = &ixgbe_init_uta_tables_generic,
+       .setup_sfp              = NULL,
+};
+
+static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
+       .init_params            = &ixgbe_init_eeprom_params_X540,
+       .read                   = &ixgbe_read_eerd_X540,
+       .write                  = &ixgbe_write_eewr_X540,
+       .calc_checksum          = &ixgbe_calc_eeprom_checksum_X540,
+       .validate_checksum      = &ixgbe_validate_eeprom_checksum_generic,
+       .update_checksum        = &ixgbe_update_eeprom_checksum_X540,
+};
+
+static struct ixgbe_phy_operations phy_ops_X540 = {
+       .identify               = &ixgbe_identify_phy_generic,
+       .identify_sfp           = &ixgbe_identify_sfp_module_generic,
+       .init                   = NULL,
+       .reset                  = &ixgbe_reset_phy_generic,
+       .read_reg               = &ixgbe_read_phy_reg_generic,
+       .write_reg              = &ixgbe_write_phy_reg_generic,
+       .setup_link             = &ixgbe_setup_phy_link_generic,
+       .setup_link_speed       = &ixgbe_setup_phy_link_speed_generic,
+       .read_i2c_byte          = &ixgbe_read_i2c_byte_generic,
+       .write_i2c_byte         = &ixgbe_write_i2c_byte_generic,
+       .read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic,
+       .write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic,
+       .check_overtemp         = &ixgbe_tn_check_overtemp,
+};
+
+struct ixgbe_info ixgbe_X540_info = {
+       .mac                    = ixgbe_mac_X540,
+       .get_invariants         = &ixgbe_get_invariants_X540,
+       .mac_ops                = &mac_ops_X540,
+       .eeprom_ops             = &eeprom_ops_X540,
+       .phy_ops                = &phy_ops_X540,
+       .mbx_ops                = &mbx_ops_generic,
+};
index dd4e0d27e8ccf5418633bdb77b2ff0f1ca85b4d6..1f35d229e71ad4a3fca37105361a2c0adef10fd6 100644 (file)
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel 82599 Virtual Function driver
-# Copyright(c) 1999 - 2009 Intel Corporation.
+# Copyright(c) 1999 - 2010 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
index ca2c81f49a0567a133b34bd98ba08afb1495da7f..f8a807d606c7cd476e156feee06f19a2eac3e257 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index da4033c6efa20f677342a8f6aff4bbca224548cd..0cd6abcf93062a208b6a8c59e083b54f3c41e9ca 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index dc03c9652389692982dbcff1be6693c5d76e2f6b..5b8063cb4e6c44c213faac2c59aa54ba639a777c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -51,9 +51,10 @@ char ixgbevf_driver_name[] = "ixgbevf";
 static const char ixgbevf_driver_string[] =
        "Intel(R) 82599 Virtual Function";
 
-#define DRV_VERSION "1.0.0-k0"
+#define DRV_VERSION "1.0.12-k0"
 const char ixgbevf_driver_version[] = DRV_VERSION;
-static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+static char ixgbevf_copyright[] =
+       "Copyright (c) 2009 - 2010 Intel Corporation.";
 
 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
        [board_82599_vf] = &ixgbevf_vf_info,
@@ -3424,10 +3425,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
        if (hw->mac.ops.get_bus_info)
                hw->mac.ops.get_bus_info(hw);
 
-
-       netif_carrier_off(netdev);
-       netif_tx_stop_all_queues(netdev);
-
        strcpy(netdev->name, "eth%d");
 
        err = register_netdev(netdev);
@@ -3436,6 +3433,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
 
        adapter->netdev_registered = true;
 
+       netif_carrier_off(netdev);
+
        ixgbevf_init_last_counter_stats(adapter);
 
        /* print the MAC address */
index 84ac486f4a65f9f4abb65b5d94236c646b56bec0..7a88331257707ebae6eca3a8fb5ad8ddea936557 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 8c063bebee7f13a5f1b6d28054c4d1fe5658fc3f..b2b5bf5daa3d202cc74d8961ca5af9f411604bc1 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 12f75960aec1f12e9fa64bd494b7eb20db71d97c..fb80ca1bcc934fe324ebaa6f3adcf4a312a70ae7 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index bfe42c1fcfafa681c660d10e2b399e20d12f22ca..971019d819b4798407b4b61f90b5d2442eb3f0a7 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 61f9dc831424143d02497b335387caba5bf65823..144c99d5363af1108e2d050e126e09493a3c3317 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 51919fcd50c26e2c0c6b8c23ba393887eca50254..0fa4a9887ba2668e42cd328ad150148af0fd88fb 100644 (file)
@@ -1545,6 +1545,37 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
 
 /* driver bus management functions */
 
+#ifdef CONFIG_PM
+static int ks8851_suspend(struct spi_device *spi, pm_message_t state)
+{
+       struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
+       struct net_device *dev = ks->netdev;
+
+       if (netif_running(dev)) {
+               netif_device_detach(dev);
+               ks8851_net_stop(dev);
+       }
+
+       return 0;
+}
+
+static int ks8851_resume(struct spi_device *spi)
+{
+       struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
+       struct net_device *dev = ks->netdev;
+
+       if (netif_running(dev)) {
+               ks8851_net_open(dev);
+               netif_device_attach(dev);
+       }
+
+       return 0;
+}
+#else
+#define ks8851_suspend NULL
+#define ks8851_resume NULL
+#endif
+
 static int __devinit ks8851_probe(struct spi_device *spi)
 {
        struct net_device *ndev;
@@ -1679,6 +1710,8 @@ static struct spi_driver ks8851_driver = {
        },
        .probe = ks8851_probe,
        .remove = __devexit_p(ks8851_remove),
+       .suspend = ks8851_suspend,
+       .resume = ks8851_resume,
 };
 
 static int __init ks8851_init(void)
index f06296bfe293fd8ddcce2b4906c897d45472f92b..02336edce748db5b4e959d8b83e637d48c20aac4 100644 (file)
@@ -207,7 +207,7 @@ tx_full and tbusy flags.
 #define LANCE_BUS_IF 0x16
 #define LANCE_TOTAL_SIZE 0x18
 
-#define TX_TIMEOUT     20
+#define TX_TIMEOUT     (HZ/5)
 
 /* The LANCE Rx and Tx ring descriptors. */
 struct lance_rx_head {
index c27f4291b350422978ab9fbf55bd6318ec0953ae..9e042894479b6c5e74d9ef28c4540c2370bad0a9 100644 (file)
@@ -161,7 +161,7 @@ enum commands {
 #define         RX_SUSPEND     0x0030
 #define         RX_ABORT       0x0040
 
-#define TX_TIMEOUT     5
+#define TX_TIMEOUT     (HZ/20)
 
 
 struct i596_reg {
index e7030ceb178b6c3f109c5c355cbec7b55e0bae75..da74db4a03d46cccab76f6d51a698ae5a843539d 100644 (file)
@@ -203,7 +203,7 @@ static void __NS8390_init(struct net_device *dev, int startp);
 static int __ei_open(struct net_device *dev)
 {
        unsigned long flags;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
 
        if (dev->watchdog_timeo <= 0)
                 dev->watchdog_timeo = TX_TIMEOUT;
@@ -231,7 +231,7 @@ static int __ei_open(struct net_device *dev)
  */
 static int __ei_close(struct net_device *dev)
 {
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        unsigned long flags;
 
        /*
@@ -256,7 +256,7 @@ static int __ei_close(struct net_device *dev)
 static void __ei_tx_timeout(struct net_device *dev)
 {
        unsigned long e8390_base = dev->base_addr;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
        unsigned long flags;
 
@@ -303,7 +303,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
                                   struct net_device *dev)
 {
        unsigned long e8390_base = dev->base_addr;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        int send_length = skb->len, output_page;
        unsigned long flags;
        char buf[ETH_ZLEN];
@@ -592,7 +592,7 @@ static void ei_tx_err(struct net_device *dev)
 static void ei_tx_intr(struct net_device *dev)
 {
        unsigned long e8390_base = dev->base_addr;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        int status = ei_inb(e8390_base + EN0_TSR);
 
        ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
@@ -675,7 +675,7 @@ static void ei_tx_intr(struct net_device *dev)
 static void ei_receive(struct net_device *dev)
 {
        unsigned long e8390_base = dev->base_addr;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        unsigned char rxing_page, this_frame, next_frame;
        unsigned short current_offset;
        int rx_pkt_count = 0;
@@ -879,7 +879,7 @@ static void ei_rx_overrun(struct net_device *dev)
 static struct net_device_stats *__ei_get_stats(struct net_device *dev)
 {
        unsigned long ioaddr = dev->base_addr;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        unsigned long flags;
 
        /* If the card is stopped, just return the present stats. */
@@ -927,7 +927,7 @@ static void do_set_multicast_list(struct net_device *dev)
 {
        unsigned long e8390_base = dev->base_addr;
        int i;
-       struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
 
        if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
        {
@@ -981,7 +981,7 @@ static void do_set_multicast_list(struct net_device *dev)
 static void __ei_set_multicast_list(struct net_device *dev)
 {
        unsigned long flags;
-       struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
 
        spin_lock_irqsave(&ei_local->page_lock, flags);
        do_set_multicast_list(dev);
@@ -998,7 +998,7 @@ static void __ei_set_multicast_list(struct net_device *dev)
 
 static void ethdev_setup(struct net_device *dev)
 {
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        if (ei_debug > 1)
                printk(version);
 
@@ -1036,7 +1036,7 @@ static struct net_device *____alloc_ei_netdev(int size)
 static void __NS8390_init(struct net_device *dev, int startp)
 {
        unsigned long e8390_base = dev->base_addr;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        int i;
        int endcfg = ei_local->word16
            ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
@@ -1099,7 +1099,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
                                                                int start_page)
 {
        unsigned long e8390_base = dev->base_addr;
-       struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
 
        ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
 
index 0fc9dc7f20db02da7ac784c193b940453b7eccd7..93f0ba25c80821a647a860c26c8ddefa6f57f733 100644 (file)
@@ -243,18 +243,22 @@ xmit_world:
 netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
                               struct net_device *dev)
 {
-       int i = skb_get_queue_mapping(skb);
-       struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
        unsigned int len = skb->len;
        int ret;
+       const struct macvlan_dev *vlan = netdev_priv(dev);
 
        ret = macvlan_queue_xmit(skb, dev);
        if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
-               txq->tx_packets++;
-               txq->tx_bytes += len;
-       } else
-               txq->tx_dropped++;
+               struct macvlan_pcpu_stats *pcpu_stats;
 
+               pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+               u64_stats_update_begin(&pcpu_stats->syncp);
+               pcpu_stats->tx_packets++;
+               pcpu_stats->tx_bytes += len;
+               u64_stats_update_end(&pcpu_stats->syncp);
+       } else {
+               this_cpu_inc(vlan->pcpu_stats->tx_dropped);
+       }
        return ret;
 }
 EXPORT_SYMBOL_GPL(macvlan_start_xmit);
@@ -414,14 +418,15 @@ static int macvlan_init(struct net_device *dev)
        dev->state              = (dev->state & ~MACVLAN_STATE_MASK) |
                                  (lowerdev->state & MACVLAN_STATE_MASK);
        dev->features           = lowerdev->features & MACVLAN_FEATURES;
+       dev->features           |= NETIF_F_LLTX;
        dev->gso_max_size       = lowerdev->gso_max_size;
        dev->iflink             = lowerdev->ifindex;
        dev->hard_header_len    = lowerdev->hard_header_len;
 
        macvlan_set_lockdep_class(dev);
 
-       vlan->rx_stats = alloc_percpu(struct macvlan_rx_stats);
-       if (!vlan->rx_stats)
+       vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
+       if (!vlan->pcpu_stats)
                return -ENOMEM;
 
        return 0;
@@ -431,7 +436,7 @@ static void macvlan_uninit(struct net_device *dev)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
 
-       free_percpu(vlan->rx_stats);
+       free_percpu(vlan->pcpu_stats);
 }
 
 static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
@@ -439,33 +444,38 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
 
-       dev_txq_stats_fold(dev, stats);
-
-       if (vlan->rx_stats) {
-               struct macvlan_rx_stats *p, accum = {0};
-               u64 rx_packets, rx_bytes, rx_multicast;
+       if (vlan->pcpu_stats) {
+               struct macvlan_pcpu_stats *p;
+               u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
+               u32 rx_errors = 0, tx_dropped = 0;
                unsigned int start;
                int i;
 
                for_each_possible_cpu(i) {
-                       p = per_cpu_ptr(vlan->rx_stats, i);
+                       p = per_cpu_ptr(vlan->pcpu_stats, i);
                        do {
                                start = u64_stats_fetch_begin_bh(&p->syncp);
                                rx_packets      = p->rx_packets;
                                rx_bytes        = p->rx_bytes;
                                rx_multicast    = p->rx_multicast;
+                               tx_packets      = p->tx_packets;
+                               tx_bytes        = p->tx_bytes;
                        } while (u64_stats_fetch_retry_bh(&p->syncp, start));
-                       accum.rx_packets        += rx_packets;
-                       accum.rx_bytes          += rx_bytes;
-                       accum.rx_multicast      += rx_multicast;
-                       /* rx_errors is an ulong, updated without syncp protection */
-                       accum.rx_errors         += p->rx_errors;
+
+                       stats->rx_packets       += rx_packets;
+                       stats->rx_bytes         += rx_bytes;
+                       stats->multicast        += rx_multicast;
+                       stats->tx_packets       += tx_packets;
+                       stats->tx_bytes         += tx_bytes;
+                       /* rx_errors & tx_dropped are u32, updated
+                        * without syncp protection.
+                        */
+                       rx_errors       += p->rx_errors;
+                       tx_dropped      += p->tx_dropped;
                }
-               stats->rx_packets = accum.rx_packets;
-               stats->rx_bytes   = accum.rx_bytes;
-               stats->rx_errors  = accum.rx_errors;
-               stats->rx_dropped = accum.rx_errors;
-               stats->multicast  = accum.rx_multicast;
+               stats->rx_errors        = rx_errors;
+               stats->rx_dropped       = rx_errors;
+               stats->tx_dropped       = tx_dropped;
        }
        return stats;
 }
@@ -601,25 +611,6 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
        return 0;
 }
 
-static int macvlan_get_tx_queues(struct net *net,
-                                struct nlattr *tb[],
-                                unsigned int *num_tx_queues,
-                                unsigned int *real_num_tx_queues)
-{
-       struct net_device *real_dev;
-
-       if (!tb[IFLA_LINK])
-               return -EINVAL;
-
-       real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
-       if (!real_dev)
-               return -ENODEV;
-
-       *num_tx_queues      = real_dev->num_tx_queues;
-       *real_num_tx_queues = real_dev->real_num_tx_queues;
-       return 0;
-}
-
 int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
                           struct nlattr *tb[], struct nlattr *data[],
                           int (*receive)(struct sk_buff *skb),
@@ -743,7 +734,6 @@ int macvlan_link_register(struct rtnl_link_ops *ops)
 {
        /* common fields */
        ops->priv_size          = sizeof(struct macvlan_dev);
-       ops->get_tx_queues      = macvlan_get_tx_queues;
        ops->validate           = macvlan_validate;
        ops->maxtype            = IFLA_MACVLAN_MAX;
        ops->policy             = macvlan_policy;
index e0b0ef11f11038f64089c164967316affed23a92..30be8c634ebdd3722bd3c3e965348ccd638d12e2 100644 (file)
@@ -86,7 +86,7 @@ static u32 reg_offset[16];
 
 static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
 {
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        int i;
        unsigned char bus_width;
 
@@ -218,7 +218,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
        int start_page, stop_page;
        int reg0, ret;
        static unsigned version_printed;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        unsigned char bus_width;
 
        if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
@@ -371,7 +371,7 @@ static int ne_close(struct net_device *dev)
 static void ne_reset_8390(struct net_device *dev)
 {
        unsigned long reset_start_time = jiffies;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
 
        if (ei_debug > 1)
                printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
@@ -397,7 +397,7 @@ static void ne_reset_8390(struct net_device *dev)
 
 static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
 {
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        /* This *shouldn't* happen. If it does, it's the last thing you'll see */
 
        if (ei_status.dmaing)
@@ -437,7 +437,7 @@ static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, i
 
 static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
 {
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
 #ifdef NE_SANITY_CHECK
        int xfer_count = count;
 #endif
@@ -507,7 +507,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
 static void ne_block_output(struct net_device *dev, int count,
                const unsigned char *buf, const int start_page)
 {
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        unsigned long dma_start;
 #ifdef NE_SANITY_CHECK
        int retries = 0;
index 8a4d19e5de064bd3fbbb11a2d2e916a28cf593b0..1a0eb128e60773cb69bd1a5ba7548abd35368323 100644 (file)
@@ -875,7 +875,7 @@ static void do_set_multicast_list(struct net_device *dev);
 static int ax_open(struct net_device *dev)
 {
        unsigned long flags;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
 
        /*
         *      Grab the page lock so we own the register set, then call
@@ -926,7 +926,7 @@ static int ax_close(struct net_device *dev)
 static void axnet_tx_timeout(struct net_device *dev)
 {
        long e8390_base = dev->base_addr;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
        unsigned long flags;
 
@@ -973,7 +973,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
                                          struct net_device *dev)
 {
        long e8390_base = dev->base_addr;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        int length, send_length, output_page;
        unsigned long flags;
        u8 packet[ETH_ZLEN];
@@ -1270,7 +1270,7 @@ static void ei_tx_err(struct net_device *dev)
 static void ei_tx_intr(struct net_device *dev)
 {
        long e8390_base = dev->base_addr;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        int status = inb(e8390_base + EN0_TSR);
     
        /*
@@ -1354,7 +1354,7 @@ static void ei_tx_intr(struct net_device *dev)
 static void ei_receive(struct net_device *dev)
 {
        long e8390_base = dev->base_addr;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        unsigned char rxing_page, this_frame, next_frame;
        unsigned short current_offset;
        int rx_pkt_count = 0;
@@ -1539,7 +1539,7 @@ static void ei_rx_overrun(struct net_device *dev)
 static struct net_device_stats *get_stats(struct net_device *dev)
 {
        long ioaddr = dev->base_addr;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        unsigned long flags;
     
        /* If the card is stopped, just return the present stats. */
@@ -1588,7 +1588,7 @@ static void do_set_multicast_list(struct net_device *dev)
 {
        long e8390_base = dev->base_addr;
        int i;
-       struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
 
        if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
                memset(ei_local->mcfilter, 0, 8);
@@ -1646,7 +1646,7 @@ static void AX88190_init(struct net_device *dev, int startp)
 {
        axnet_dev_t *info = PRIV(dev);
        long e8390_base = dev->base_addr;
-       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local = netdev_priv(dev);
        int i;
        int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
     
@@ -1712,7 +1712,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
                                                                int start_page)
 {
        long e8390_base = dev->base_addr;
-       struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
+       struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
     
        if (inb_p(e8390_base) & E8390_TRANS) 
        {
index 09cf56d0416a8c24c78780c7eb374fcebf01cffc..0c91598ae2806377aa901175afe75b2632ed9a3a 100644 (file)
@@ -1136,8 +1136,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
                   a four-byte PPP header on each packet */
                *skb_push(skb, 2) = 1;
                if (ppp->pass_filter &&
-                   sk_run_filter(skb, ppp->pass_filter,
-                                 ppp->pass_len) == 0) {
+                   sk_run_filter(skb, ppp->pass_filter) == 0) {
                        if (ppp->debug & 1)
                                printk(KERN_DEBUG "PPP: outbound frame not passed\n");
                        kfree_skb(skb);
@@ -1145,8 +1144,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
                }
                /* if this packet passes the active filter, record the time */
                if (!(ppp->active_filter &&
-                     sk_run_filter(skb, ppp->active_filter,
-                                   ppp->active_len) == 0))
+                     sk_run_filter(skb, ppp->active_filter) == 0))
                        ppp->last_xmit = jiffies;
                skb_pull(skb, 2);
 #else
@@ -1758,8 +1756,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
 
                        *skb_push(skb, 2) = 0;
                        if (ppp->pass_filter &&
-                           sk_run_filter(skb, ppp->pass_filter,
-                                         ppp->pass_len) == 0) {
+                           sk_run_filter(skb, ppp->pass_filter) == 0) {
                                if (ppp->debug & 1)
                                        printk(KERN_DEBUG "PPP: inbound frame "
                                               "not passed\n");
@@ -1767,8 +1764,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
                                return;
                        }
                        if (!(ppp->active_filter &&
-                             sk_run_filter(skb, ppp->active_filter,
-                                           ppp->active_len) == 0))
+                             sk_run_filter(skb, ppp->active_filter) == 0))
                                ppp->last_recv = jiffies;
                        __skb_pull(skb, 2);
                } else
index 7496ed2c34aba61aa06f616774cd2b139c578342..1a3584edd79cb5b88633360a5866f19b413172e6 100644 (file)
@@ -2467,7 +2467,7 @@ map_error:
 static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
                               struct net_device *ndev)
 {
-       struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+       struct ql3_adapter *qdev = netdev_priv(ndev);
        struct ql3xxx_port_registers __iomem *port_regs =
                        qdev->mem_map_registers;
        struct ql_tx_buf_cb *tx_cb;
@@ -3390,7 +3390,7 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
 
 static void ql_display_dev_info(struct net_device *ndev)
 {
-       struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+       struct ql3_adapter *qdev = netdev_priv(ndev);
        struct pci_dev *pdev = qdev->pdev;
 
        netdev_info(ndev,
@@ -3573,7 +3573,7 @@ static int ql3xxx_open(struct net_device *ndev)
 
 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
 {
-       struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+       struct ql3_adapter *qdev = netdev_priv(ndev);
        struct ql3xxx_port_registers __iomem *port_regs =
                        qdev->mem_map_registers;
        struct sockaddr *addr = p;
@@ -3608,7 +3608,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
 
 static void ql3xxx_tx_timeout(struct net_device *ndev)
 {
-       struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+       struct ql3_adapter *qdev = netdev_priv(ndev);
 
        netdev_err(ndev, "Resetting...\n");
        /*
index 8ecc170c9b74f569f2e79be28f70dfc96097d50d..56f54ffabb2fb27df75599ea03208510d4c81ac6 100644 (file)
@@ -51,8 +51,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 11
-#define QLCNIC_LINUX_VERSIONID  "5.0.11"
+#define _QLCNIC_LINUX_SUBVERSION 12
+#define QLCNIC_LINUX_VERSIONID  "5.0.12"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -1126,8 +1126,7 @@ struct qlcnic_eswitch {
 /* Return codes for Error handling */
 #define QL_STATUS_INVALID_PARAM        -1
 
-#define MAX_BW                 100
-#define MIN_BW                 1
+#define MAX_BW                 100     /* % of link speed */
 #define MAX_VLAN_ID            4095
 #define MIN_VLAN_ID            2
 #define MAX_TX_QUEUES          1
@@ -1135,7 +1134,7 @@ struct qlcnic_eswitch {
 #define DEFAULT_MAC_LEARN      1
 
 #define IS_VALID_VLAN(vlan)    (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
-#define IS_VALID_BW(bw)                (bw >= MIN_BW && bw <= MAX_BW)
+#define IS_VALID_BW(bw)                (bw <= MAX_BW)
 #define IS_VALID_TX_QUEUES(que)        (que > 0 && que <= MAX_TX_QUEUES)
 #define IS_VALID_RX_QUEUES(que)        (que > 0 && que <= MAX_RX_QUEUES)
 
@@ -1377,6 +1376,8 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
                "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
        {0x1077, 0x8020, 0x103c, 0x3733,
                "NC523SFP 10Gb 2-port Server Adapter"},
+       {0x1077, 0x8020, 0x103c, 0x3346,
+               "CN1000Q Dual Port Converged Network Adapter"},
        {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
 };
 
index 1cdc05dade6b0b5d31d9b0aa27fab5a7fffed894..3ad1f3eba289441e43d6db49721c8ac3a4e862c9 100644 (file)
@@ -480,6 +480,9 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
 {
        int err;
 
+       if (reset_devices)
+               pci_reset_function(adapter->pdev);
+
        err = qlcnic_fw_cmd_create_rx_ctx(adapter);
        if (err)
                return err;
index ec21d24015c485a7153e4262026f89d1c73af6c3..c38929636488af71fcf23ba072a13ecc31926634 100644 (file)
@@ -925,9 +925,10 @@ static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
 
                dev->features &= ~NETIF_F_LRO;
                qlcnic_send_lro_cleanup(adapter);
+               dev_info(&adapter->pdev->dev,
+                                       "disabling LRO as rx_csum is off\n");
        }
        adapter->rx_csum = !!data;
-       dev_info(&adapter->pdev->dev, "disabling LRO as rx_csum is off\n");
        return 0;
 }
 
index 22821398fc63765054eff056ab2529beedfb30fc..bdb8fe868539d68f8b1388887e560bf2626b7d42 100644 (file)
@@ -16,7 +16,7 @@
  */
 #define DRV_NAME       "qlge"
 #define DRV_STRING     "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION    "v1.00.00.25.00.00-01"
+#define DRV_VERSION    "v1.00.00.27.00.00-01"
 
 #define WQ_ADDR_ALIGN  0x3     /* 4 byte alignment */
 
@@ -2221,6 +2221,7 @@ int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
 int ql_unpause_mpi_risc(struct ql_adapter *qdev);
 int ql_pause_mpi_risc(struct ql_adapter *qdev);
 int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
 int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
                u32 ram_addr, int word_count);
 int ql_core_dump(struct ql_adapter *qdev,
@@ -2236,6 +2237,7 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
 int ql_mb_get_port_cfg(struct ql_adapter *qdev);
 int ql_mb_set_port_cfg(struct ql_adapter *qdev);
 int ql_wait_fifo_empty(struct ql_adapter *qdev);
+void ql_get_dump(struct ql_adapter *qdev, void *buff);
 void ql_gen_reg_dump(struct ql_adapter *qdev,
                        struct ql_reg_dump *mpi_coredump);
 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
index 4747492935ef8bf190cc575fe7146e3072417623..fca804f36d61a2e6f2d79140945adee3732a2ec1 100644 (file)
@@ -1317,9 +1317,28 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
        status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
        if (status)
                return;
+}
+
+void ql_get_dump(struct ql_adapter *qdev, void *buff)
+{
+       /*
+        * If the dump has already been taken and is stored
+        * in our internal buffer and if force dump is set then
+        * just start the spool to dump it to the log file
+        * and also, take a snapshot of the general regs to
+        * to the user's buffer or else take complete dump
+        * to the user's buffer if force is not set.
+        */
 
-       if (test_bit(QL_FRC_COREDUMP, &qdev->flags))
+       if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
+               if (!ql_core_dump(qdev, buff))
+                       ql_soft_reset_mpi_risc(qdev);
+               else
+                       netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
+       } else {
+               ql_gen_reg_dump(qdev, buff);
                ql_get_core_dump(qdev);
+       }
 }
 
 /* Coredump to messages log file using separate worker thread */
index 4892d64f4e054b2a630a13f2249f6ca1be7f83cb..8149cc9de4ca05a0bfaca39f396359eae1eac070 100644 (file)
@@ -375,7 +375,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
        strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
        drvinfo->n_stats = 0;
        drvinfo->testinfo_len = 0;
-       drvinfo->regdump_len = 0;
+       if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+               drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
+       else
+               drvinfo->regdump_len = sizeof(struct ql_reg_dump);
        drvinfo->eedump_len = 0;
 }
 
@@ -547,7 +550,12 @@ static void ql_self_test(struct net_device *ndev,
 
 static int ql_get_regs_len(struct net_device *ndev)
 {
-       return sizeof(struct ql_reg_dump);
+       struct ql_adapter *qdev = netdev_priv(ndev);
+
+       if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+               return sizeof(struct ql_mpi_coredump);
+       else
+               return sizeof(struct ql_reg_dump);
 }
 
 static void ql_get_regs(struct net_device *ndev,
@@ -555,7 +563,12 @@ static void ql_get_regs(struct net_device *ndev,
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
 
-       ql_gen_reg_dump(qdev, p);
+       ql_get_dump(qdev, p);
+       qdev->core_is_dumped = 0;
+       if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+               regs->len = sizeof(struct ql_mpi_coredump);
+       else
+               regs->len = sizeof(struct ql_reg_dump);
 }
 
 static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
index c30e0fe55a314858828c2caa4d464b0c2301fe81..d9a76260880b6d8e8c21ba815ba42f7ed04fdece 100644 (file)
@@ -3844,7 +3844,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
 
 static void ql_display_dev_info(struct net_device *ndev)
 {
-       struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+       struct ql_adapter *qdev = netdev_priv(ndev);
 
        netif_info(qdev, probe, qdev->ndev,
                   "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
@@ -4264,7 +4264,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
 
 static void qlge_set_multicast_list(struct net_device *ndev)
 {
-       struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+       struct ql_adapter *qdev = netdev_priv(ndev);
        struct netdev_hw_addr *ha;
        int i, status;
 
@@ -4354,7 +4354,7 @@ exit:
 
 static int qlge_set_mac_address(struct net_device *ndev, void *p)
 {
-       struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+       struct ql_adapter *qdev = netdev_priv(ndev);
        struct sockaddr *addr = p;
        int status;
 
@@ -4377,7 +4377,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
 
 static void qlge_tx_timeout(struct net_device *ndev)
 {
-       struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+       struct ql_adapter *qdev = netdev_priv(ndev);
        ql_queue_asic_error(qdev);
 }
 
index 0e7c7c7ee1647006baf561ffd90308290fbd2399..100a462cc9163e578ed48c5ce505c69a41cbbea6 100644 (file)
@@ -87,7 +87,7 @@ exit:
        return status;
 }
 
-static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
 {
        int status;
        status = ql_write_mpi_reg(qdev, 0x00001010, 1);
index ecc25aab896af3ee9e55a9da4e90bccd32553d37..0f4219cb0be2e5ab88fba5c82ba7ebd822af781f 100644 (file)
@@ -8321,8 +8321,7 @@ mem_alloc_failed:
 
 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
 {
-       struct net_device *dev =
-               (struct net_device *)pci_get_drvdata(pdev);
+       struct net_device *dev = pci_get_drvdata(pdev);
        struct s2io_nic *sp;
 
        if (dev == NULL) {
index 50259dfec5836772570d96f8bd402cd7a482a706..b12660d7233839d4d37d547592883545e38a1b7b 100644 (file)
@@ -45,9 +45,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
        u32 ioaddr = ndev->base_addr;
 
        if (mdp->duplex) /* Full */
-               ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+               writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
        else            /* Half */
-               ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+               writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
 }
 
 static void sh_eth_set_rate(struct net_device *ndev)
@@ -57,10 +57,10 @@ static void sh_eth_set_rate(struct net_device *ndev)
 
        switch (mdp->speed) {
        case 10: /* 10BASE */
-               ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
+               writel(readl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
                break;
        case 100:/* 100BASE */
-               ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
+               writel(readl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
                break;
        default:
                break;
@@ -96,9 +96,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
        u32 ioaddr = ndev->base_addr;
 
        if (mdp->duplex) /* Full */
-               ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+               writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
        else            /* Half */
-               ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+               writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
 }
 
 static void sh_eth_set_rate(struct net_device *ndev)
@@ -108,10 +108,10 @@ static void sh_eth_set_rate(struct net_device *ndev)
 
        switch (mdp->speed) {
        case 10: /* 10BASE */
-               ctrl_outl(0, ioaddr + RTRATE);
+               writel(0, ioaddr + RTRATE);
                break;
        case 100:/* 100BASE */
-               ctrl_outl(1, ioaddr + RTRATE);
+               writel(1, ioaddr + RTRATE);
                break;
        default:
                break;
@@ -143,7 +143,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 static void sh_eth_chip_reset(struct net_device *ndev)
 {
        /* reset device */
-       ctrl_outl(ARSTR_ARSTR, ARSTR);
+       writel(ARSTR_ARSTR, ARSTR);
        mdelay(1);
 }
 
@@ -152,10 +152,10 @@ static void sh_eth_reset(struct net_device *ndev)
        u32 ioaddr = ndev->base_addr;
        int cnt = 100;
 
-       ctrl_outl(EDSR_ENALL, ioaddr + EDSR);
-       ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+       writel(EDSR_ENALL, ioaddr + EDSR);
+       writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
        while (cnt > 0) {
-               if (!(ctrl_inl(ioaddr + EDMR) & 0x3))
+               if (!(readl(ioaddr + EDMR) & 0x3))
                        break;
                mdelay(1);
                cnt--;
@@ -164,14 +164,14 @@ static void sh_eth_reset(struct net_device *ndev)
                printk(KERN_ERR "Device reset fail\n");
 
        /* Table Init */
-       ctrl_outl(0x0, ioaddr + TDLAR);
-       ctrl_outl(0x0, ioaddr + TDFAR);
-       ctrl_outl(0x0, ioaddr + TDFXR);
-       ctrl_outl(0x0, ioaddr + TDFFR);
-       ctrl_outl(0x0, ioaddr + RDLAR);
-       ctrl_outl(0x0, ioaddr + RDFAR);
-       ctrl_outl(0x0, ioaddr + RDFXR);
-       ctrl_outl(0x0, ioaddr + RDFFR);
+       writel(0x0, ioaddr + TDLAR);
+       writel(0x0, ioaddr + TDFAR);
+       writel(0x0, ioaddr + TDFXR);
+       writel(0x0, ioaddr + TDFFR);
+       writel(0x0, ioaddr + RDLAR);
+       writel(0x0, ioaddr + RDFAR);
+       writel(0x0, ioaddr + RDFXR);
+       writel(0x0, ioaddr + RDFFR);
 }
 
 static void sh_eth_set_duplex(struct net_device *ndev)
@@ -180,9 +180,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
        u32 ioaddr = ndev->base_addr;
 
        if (mdp->duplex) /* Full */
-               ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+               writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
        else            /* Half */
-               ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+               writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
 }
 
 static void sh_eth_set_rate(struct net_device *ndev)
@@ -192,13 +192,13 @@ static void sh_eth_set_rate(struct net_device *ndev)
 
        switch (mdp->speed) {
        case 10: /* 10BASE */
-               ctrl_outl(GECMR_10, ioaddr + GECMR);
+               writel(GECMR_10, ioaddr + GECMR);
                break;
        case 100:/* 100BASE */
-               ctrl_outl(GECMR_100, ioaddr + GECMR);
+               writel(GECMR_100, ioaddr + GECMR);
                break;
        case 1000: /* 1000BASE */
-               ctrl_outl(GECMR_1000, ioaddr + GECMR);
+               writel(GECMR_1000, ioaddr + GECMR);
                break;
        default:
                break;
@@ -283,9 +283,9 @@ static void sh_eth_reset(struct net_device *ndev)
 {
        u32 ioaddr = ndev->base_addr;
 
-       ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+       writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
        mdelay(3);
-       ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
+       writel(readl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
 }
 #endif
 
@@ -336,10 +336,10 @@ static void update_mac_address(struct net_device *ndev)
 {
        u32 ioaddr = ndev->base_addr;
 
-       ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+       writel((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
                  (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
                  ioaddr + MAHR);
-       ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
+       writel((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
                  ioaddr + MALR);
 }
 
@@ -358,12 +358,12 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
        if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
                memcpy(ndev->dev_addr, mac, 6);
        } else {
-               ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
-               ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
-               ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
-               ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
-               ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
-               ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
+               ndev->dev_addr[0] = (readl(ioaddr + MAHR) >> 24);
+               ndev->dev_addr[1] = (readl(ioaddr + MAHR) >> 16) & 0xFF;
+               ndev->dev_addr[2] = (readl(ioaddr + MAHR) >> 8) & 0xFF;
+               ndev->dev_addr[3] = (readl(ioaddr + MAHR) & 0xFF);
+               ndev->dev_addr[4] = (readl(ioaddr + MALR) >> 8) & 0xFF;
+               ndev->dev_addr[5] = (readl(ioaddr + MALR) & 0xFF);
        }
 }
 
@@ -379,19 +379,19 @@ struct bb_info {
 /* PHY bit set */
 static void bb_set(u32 addr, u32 msk)
 {
-       ctrl_outl(ctrl_inl(addr) | msk, addr);
+       writel(readl(addr) | msk, addr);
 }
 
 /* PHY bit clear */
 static void bb_clr(u32 addr, u32 msk)
 {
-       ctrl_outl((ctrl_inl(addr) & ~msk), addr);
+       writel((readl(addr) & ~msk), addr);
 }
 
 /* PHY bit read */
 static int bb_read(u32 addr, u32 msk)
 {
-       return (ctrl_inl(addr) & msk) != 0;
+       return (readl(addr) & msk) != 0;
 }
 
 /* Data I/O pin control */
@@ -506,9 +506,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
                rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
                /* Rx descriptor address set */
                if (i == 0) {
-                       ctrl_outl(mdp->rx_desc_dma, ioaddr + RDLAR);
+                       writel(mdp->rx_desc_dma, ioaddr + RDLAR);
 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
-                       ctrl_outl(mdp->rx_desc_dma, ioaddr + RDFAR);
+                       writel(mdp->rx_desc_dma, ioaddr + RDFAR);
 #endif
                }
        }
@@ -528,9 +528,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
                txdesc->buffer_length = 0;
                if (i == 0) {
                        /* Tx descriptor address set */
-                       ctrl_outl(mdp->tx_desc_dma, ioaddr + TDLAR);
+                       writel(mdp->tx_desc_dma, ioaddr + TDLAR);
 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
-                       ctrl_outl(mdp->tx_desc_dma, ioaddr + TDFAR);
+                       writel(mdp->tx_desc_dma, ioaddr + TDFAR);
 #endif
                }
        }
@@ -623,71 +623,71 @@ static int sh_eth_dev_init(struct net_device *ndev)
        /* Descriptor format */
        sh_eth_ring_format(ndev);
        if (mdp->cd->rpadir)
-               ctrl_outl(mdp->cd->rpadir_value, ioaddr + RPADIR);
+               writel(mdp->cd->rpadir_value, ioaddr + RPADIR);
 
        /* all sh_eth int mask */
-       ctrl_outl(0, ioaddr + EESIPR);
+       writel(0, ioaddr + EESIPR);
 
 #if defined(__LITTLE_ENDIAN__)
        if (mdp->cd->hw_swap)
-               ctrl_outl(EDMR_EL, ioaddr + EDMR);
+               writel(EDMR_EL, ioaddr + EDMR);
        else
 #endif
-               ctrl_outl(0, ioaddr + EDMR);
+               writel(0, ioaddr + EDMR);
 
        /* FIFO size set */
-       ctrl_outl(mdp->cd->fdr_value, ioaddr + FDR);
-       ctrl_outl(0, ioaddr + TFTR);
+       writel(mdp->cd->fdr_value, ioaddr + FDR);
+       writel(0, ioaddr + TFTR);
 
        /* Frame recv control */
-       ctrl_outl(mdp->cd->rmcr_value, ioaddr + RMCR);
+       writel(mdp->cd->rmcr_value, ioaddr + RMCR);
 
        rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
        tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
-       ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
+       writel(rx_int_var | tx_int_var, ioaddr + TRSCER);
 
        if (mdp->cd->bculr)
-               ctrl_outl(0x800, ioaddr + BCULR);       /* Burst sycle set */
+               writel(0x800, ioaddr + BCULR);  /* Burst sycle set */
 
-       ctrl_outl(mdp->cd->fcftr_value, ioaddr + FCFTR);
+       writel(mdp->cd->fcftr_value, ioaddr + FCFTR);
 
        if (!mdp->cd->no_trimd)
-               ctrl_outl(0, ioaddr + TRIMD);
+               writel(0, ioaddr + TRIMD);
 
        /* Recv frame limit set register */
-       ctrl_outl(RFLR_VALUE, ioaddr + RFLR);
+       writel(RFLR_VALUE, ioaddr + RFLR);
 
-       ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
-       ctrl_outl(mdp->cd->eesipr_value, ioaddr + EESIPR);
+       writel(readl(ioaddr + EESR), ioaddr + EESR);
+       writel(mdp->cd->eesipr_value, ioaddr + EESIPR);
 
        /* PAUSE Prohibition */
-       val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) |
+       val = (readl(ioaddr + ECMR) & ECMR_DM) |
                ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
 
-       ctrl_outl(val, ioaddr + ECMR);
+       writel(val, ioaddr + ECMR);
 
        if (mdp->cd->set_rate)
                mdp->cd->set_rate(ndev);
 
        /* E-MAC Status Register clear */
-       ctrl_outl(mdp->cd->ecsr_value, ioaddr + ECSR);
+       writel(mdp->cd->ecsr_value, ioaddr + ECSR);
 
        /* E-MAC Interrupt Enable register */
-       ctrl_outl(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
+       writel(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
 
        /* Set MAC address */
        update_mac_address(ndev);
 
        /* mask reset */
        if (mdp->cd->apr)
-               ctrl_outl(APR_AP, ioaddr + APR);
+               writel(APR_AP, ioaddr + APR);
        if (mdp->cd->mpr)
-               ctrl_outl(MPR_MP, ioaddr + MPR);
+               writel(MPR_MP, ioaddr + MPR);
        if (mdp->cd->tpauser)
-               ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
+               writel(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
 
        /* Setting the Rx mode will start the Rx process. */
-       ctrl_outl(EDRRR_R, ioaddr + EDRRR);
+       writel(EDRRR_R, ioaddr + EDRRR);
 
        netif_start_queue(ndev);
 
@@ -811,8 +811,8 @@ static int sh_eth_rx(struct net_device *ndev)
 
        /* Restart Rx engine if stopped. */
        /* If we don't need to check status, don't. -KDU */
-       if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R))
-               ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
+       if (!(readl(ndev->base_addr + EDRRR) & EDRRR_R))
+               writel(EDRRR_R, ndev->base_addr + EDRRR);
 
        return 0;
 }
@@ -827,8 +827,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
        u32 mask;
 
        if (intr_status & EESR_ECI) {
-               felic_stat = ctrl_inl(ioaddr + ECSR);
-               ctrl_outl(felic_stat, ioaddr + ECSR);   /* clear int */
+               felic_stat = readl(ioaddr + ECSR);
+               writel(felic_stat, ioaddr + ECSR);      /* clear int */
                if (felic_stat & ECSR_ICD)
                        mdp->stats.tx_carrier_errors++;
                if (felic_stat & ECSR_LCHNG) {
@@ -839,25 +839,25 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                                else
                                        link_stat = PHY_ST_LINK;
                        } else {
-                               link_stat = (ctrl_inl(ioaddr + PSR));
+                               link_stat = (readl(ioaddr + PSR));
                                if (mdp->ether_link_active_low)
                                        link_stat = ~link_stat;
                        }
                        if (!(link_stat & PHY_ST_LINK)) {
                                /* Link Down : disable tx and rx */
-                               ctrl_outl(ctrl_inl(ioaddr + ECMR) &
+                               writel(readl(ioaddr + ECMR) &
                                          ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
                        } else {
                                /* Link Up */
-                               ctrl_outl(ctrl_inl(ioaddr + EESIPR) &
+                               writel(readl(ioaddr + EESIPR) &
                                          ~DMAC_M_ECI, ioaddr + EESIPR);
                                /*clear int */
-                               ctrl_outl(ctrl_inl(ioaddr + ECSR),
+                               writel(readl(ioaddr + ECSR),
                                          ioaddr + ECSR);
-                               ctrl_outl(ctrl_inl(ioaddr + EESIPR) |
+                               writel(readl(ioaddr + EESIPR) |
                                          DMAC_M_ECI, ioaddr + EESIPR);
                                /* enable tx and rx */
-                               ctrl_outl(ctrl_inl(ioaddr + ECMR) |
+                               writel(readl(ioaddr + ECMR) |
                                          (ECMR_RE | ECMR_TE), ioaddr + ECMR);
                        }
                }
@@ -888,8 +888,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                /* Receive Descriptor Empty int */
                mdp->stats.rx_over_errors++;
 
-               if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R)
-                       ctrl_outl(EDRRR_R, ioaddr + EDRRR);
+               if (readl(ioaddr + EDRRR) ^ EDRRR_R)
+                       writel(EDRRR_R, ioaddr + EDRRR);
                dev_err(&ndev->dev, "Receive Descriptor Empty\n");
        }
        if (intr_status & EESR_RFE) {
@@ -903,7 +903,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                mask &= ~EESR_ADE;
        if (intr_status & mask) {
                /* Tx error */
-               u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
+               u32 edtrr = readl(ndev->base_addr + EDTRR);
                /* dmesg */
                dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
                                intr_status, mdp->cur_tx);
@@ -915,7 +915,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                /* SH7712 BUG */
                if (edtrr ^ EDTRR_TRNS) {
                        /* tx dma start */
-                       ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
+                       writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
                }
                /* wakeup */
                netif_wake_queue(ndev);
@@ -934,12 +934,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
        spin_lock(&mdp->lock);
 
        /* Get interrpt stat */
-       intr_status = ctrl_inl(ioaddr + EESR);
+       intr_status = readl(ioaddr + EESR);
        /* Clear interrupt */
        if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
                        EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
                        cd->tx_check | cd->eesr_err_check)) {
-               ctrl_outl(intr_status, ioaddr + EESR);
+               writel(intr_status, ioaddr + EESR);
                ret = IRQ_HANDLED;
        } else
                goto other_irq;
@@ -1000,7 +1000,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
                                mdp->cd->set_rate(ndev);
                }
                if (mdp->link == PHY_DOWN) {
-                       ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
+                       writel((readl(ioaddr + ECMR) & ~ECMR_TXF)
                                        | ECMR_DM, ioaddr + ECMR);
                        new_state = 1;
                        mdp->link = phydev->link;
@@ -1125,7 +1125,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
 
        /* worning message out. */
        printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
-              " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR));
+              " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
 
        /* tx_errors count up */
        mdp->stats.tx_errors++;
@@ -1196,8 +1196,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        mdp->cur_tx++;
 
-       if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
-               ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
+       if (!(readl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
+               writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
 
        return NETDEV_TX_OK;
 }
@@ -1212,11 +1212,11 @@ static int sh_eth_close(struct net_device *ndev)
        netif_stop_queue(ndev);
 
        /* Disable interrupts by clearing the interrupt mask. */
-       ctrl_outl(0x0000, ioaddr + EESIPR);
+       writel(0x0000, ioaddr + EESIPR);
 
        /* Stop the chip's Tx and Rx processes. */
-       ctrl_outl(0, ioaddr + EDTRR);
-       ctrl_outl(0, ioaddr + EDRRR);
+       writel(0, ioaddr + EDTRR);
+       writel(0, ioaddr + EDRRR);
 
        /* PHY Disconnect */
        if (mdp->phydev) {
@@ -1251,20 +1251,20 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
 
        pm_runtime_get_sync(&mdp->pdev->dev);
 
-       mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR);
-       ctrl_outl(0, ioaddr + TROCR);   /* (write clear) */
-       mdp->stats.collisions += ctrl_inl(ioaddr + CDCR);
-       ctrl_outl(0, ioaddr + CDCR);    /* (write clear) */
-       mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR);
-       ctrl_outl(0, ioaddr + LCCR);    /* (write clear) */
+       mdp->stats.tx_dropped += readl(ioaddr + TROCR);
+       writel(0, ioaddr + TROCR);      /* (write clear) */
+       mdp->stats.collisions += readl(ioaddr + CDCR);
+       writel(0, ioaddr + CDCR);       /* (write clear) */
+       mdp->stats.tx_carrier_errors += readl(ioaddr + LCCR);
+       writel(0, ioaddr + LCCR);       /* (write clear) */
 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
-       mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */
-       ctrl_outl(0, ioaddr + CERCR);   /* (write clear) */
-       mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */
-       ctrl_outl(0, ioaddr + CEECR);   /* (write clear) */
+       mdp->stats.tx_carrier_errors += readl(ioaddr + CERCR);/* CERCR */
+       writel(0, ioaddr + CERCR);      /* (write clear) */
+       mdp->stats.tx_carrier_errors += readl(ioaddr + CEECR);/* CEECR */
+       writel(0, ioaddr + CEECR);      /* (write clear) */
 #else
-       mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
-       ctrl_outl(0, ioaddr + CNDCR);   /* (write clear) */
+       mdp->stats.tx_carrier_errors += readl(ioaddr + CNDCR);
+       writel(0, ioaddr + CNDCR);      /* (write clear) */
 #endif
        pm_runtime_put_sync(&mdp->pdev->dev);
 
@@ -1295,11 +1295,11 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
 
        if (ndev->flags & IFF_PROMISC) {
                /* Set promiscuous. */
-               ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
+               writel((readl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
                          ioaddr + ECMR);
        } else {
                /* Normal, unicast/broadcast-only mode. */
-               ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
+               writel((readl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
                          ioaddr + ECMR);
        }
 }
@@ -1307,30 +1307,30 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
 /* SuperH's TSU register init function */
 static void sh_eth_tsu_init(u32 ioaddr)
 {
-       ctrl_outl(0, ioaddr + TSU_FWEN0);       /* Disable forward(0->1) */
-       ctrl_outl(0, ioaddr + TSU_FWEN1);       /* Disable forward(1->0) */
-       ctrl_outl(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */
-       ctrl_outl(0xc, ioaddr + TSU_BSYSL0);
-       ctrl_outl(0xc, ioaddr + TSU_BSYSL1);
-       ctrl_outl(0, ioaddr + TSU_PRISL0);
-       ctrl_outl(0, ioaddr + TSU_PRISL1);
-       ctrl_outl(0, ioaddr + TSU_FWSL0);
-       ctrl_outl(0, ioaddr + TSU_FWSL1);
-       ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
+       writel(0, ioaddr + TSU_FWEN0);  /* Disable forward(0->1) */
+       writel(0, ioaddr + TSU_FWEN1);  /* Disable forward(1->0) */
+       writel(0, ioaddr + TSU_FCM);    /* forward fifo 3k-3k */
+       writel(0xc, ioaddr + TSU_BSYSL0);
+       writel(0xc, ioaddr + TSU_BSYSL1);
+       writel(0, ioaddr + TSU_PRISL0);
+       writel(0, ioaddr + TSU_PRISL1);
+       writel(0, ioaddr + TSU_FWSL0);
+       writel(0, ioaddr + TSU_FWSL1);
+       writel(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
-       ctrl_outl(0, ioaddr + TSU_QTAG0);       /* Disable QTAG(0->1) */
-       ctrl_outl(0, ioaddr + TSU_QTAG1);       /* Disable QTAG(1->0) */
+       writel(0, ioaddr + TSU_QTAG0);  /* Disable QTAG(0->1) */
+       writel(0, ioaddr + TSU_QTAG1);  /* Disable QTAG(1->0) */
 #else
-       ctrl_outl(0, ioaddr + TSU_QTAGM0);      /* Disable QTAG(0->1) */
-       ctrl_outl(0, ioaddr + TSU_QTAGM1);      /* Disable QTAG(1->0) */
+       writel(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
+       writel(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
 #endif
-       ctrl_outl(0, ioaddr + TSU_FWSR);        /* all interrupt status clear */
-       ctrl_outl(0, ioaddr + TSU_FWINMK);      /* Disable all interrupt */
-       ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
-       ctrl_outl(0, ioaddr + TSU_POST1);       /* Disable CAM entry [ 0- 7] */
-       ctrl_outl(0, ioaddr + TSU_POST2);       /* Disable CAM entry [ 8-15] */
-       ctrl_outl(0, ioaddr + TSU_POST3);       /* Disable CAM entry [16-23] */
-       ctrl_outl(0, ioaddr + TSU_POST4);       /* Disable CAM entry [24-31] */
+       writel(0, ioaddr + TSU_FWSR);   /* all interrupt status clear */
+       writel(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
+       writel(0, ioaddr + TSU_TEN);    /* Disable all CAM entry */
+       writel(0, ioaddr + TSU_POST1);  /* Disable CAM entry [ 0- 7] */
+       writel(0, ioaddr + TSU_POST2);  /* Disable CAM entry [ 8-15] */
+       writel(0, ioaddr + TSU_POST3);  /* Disable CAM entry [16-23] */
+       writel(0, ioaddr + TSU_POST4);  /* Disable CAM entry [24-31] */
 }
 #endif /* SH_ETH_HAS_TSU */
 
index b154a94de03e61927cdc063c8671867108cf7b89..be8cc2a8e2137105ab85060472b748f4423f601d 100644 (file)
@@ -1745,7 +1745,6 @@ static int hso_serial_ioctl(struct tty_struct *tty, struct file *file,
                            unsigned int cmd, unsigned long arg)
 {
        struct hso_serial *serial =  get_serial_by_tty(tty);
-       void __user *uarg = (void __user *)arg;
        int ret = 0;
        D4("IOCTL cmd: %d, arg: %ld", cmd, arg);
 
index b2bcf99e6f087ab1dfca2aef092233eea9aa5fe6..7d42f9a2c06868027ac77c6265c76f9ce2288c27 100644 (file)
@@ -363,7 +363,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
 
        /* Paranoid */
        if (skb->len > IPHETH_BUF_SIZE) {
-               WARN(1, "%s: skb too large: %d bytes", __func__, skb->len);
+               WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
                dev->net->stats.tx_dropped++;
                dev_kfree_skb_irq(skb);
                return NETDEV_TX_OK;
index 6710f09346d6ba665d9003d98700b5f3ca2d5ac9..ef3667690b12b5f0ee83d45d36bce1d8fd584cf6 100644 (file)
@@ -359,7 +359,7 @@ fail:
 
 static int mdio_read(struct net_device *dev, int phy_id, int loc)
 {
-       pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev);
+       pegasus_t *pegasus = netdev_priv(dev);
        u16 res;
 
        read_mii_word(pegasus, phy_id, loc, &res);
@@ -397,7 +397,7 @@ fail:
 
 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
 {
-       pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev);
+       pegasus_t *pegasus = netdev_priv(dev);
 
        write_mii_word(pegasus, phy_id, loc, val);
 }
index 906a3ca3676b94c3aa6d8442bd7e56b9d0fa3eca..409c2e6053d03efe21e36ef14c4cae105b671ada 100644 (file)
 
 #include "vxge-traffic.h"
 #include "vxge-config.h"
-
-static enum vxge_hw_status
-__vxge_hw_fifo_create(
-       struct __vxge_hw_vpath_handle *vpath_handle,
-       struct vxge_hw_fifo_attr *attr);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_abort(
-       struct __vxge_hw_fifo *fifoh);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_reset(
-       struct __vxge_hw_fifo *ringh);
+#include "vxge-main.h"
 
 static enum vxge_hw_status
 __vxge_hw_fifo_delete(
@@ -71,53 +59,15 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
                        u32 size,
                        struct vxge_hw_mempool_dma *dma_object);
 
-
-static struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
-                       enum __vxge_hw_channel_type type, u32 length,
-                       u32 per_dtr_space, void *userdata);
-
 static void
 __vxge_hw_channel_free(
        struct __vxge_hw_channel *channel);
 
-static enum vxge_hw_status
-__vxge_hw_channel_initialize(
-       struct __vxge_hw_channel *channel);
-
-static enum vxge_hw_status
-__vxge_hw_channel_reset(
-       struct __vxge_hw_channel *channel);
-
 static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
 
-static enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
-
 static enum vxge_hw_status
 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
 
-static void
-__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
-
-static void
-__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
-       u32 vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg,
-       struct vxge_hw_device_hw_info *hw_info);
-
-static enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
-
-static void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
-
-static enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
-
 static enum vxge_hw_status
 __vxge_hw_device_register_poll(
        void __iomem    *reg,
@@ -138,9 +88,10 @@ __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
 
 static struct vxge_hw_mempool*
 __vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
-                        u32 item_size, u32 private_size, u32 items_initial,
-                        u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
-                        void *userdata);
+                       u32 item_size, u32 private_size, u32 items_initial,
+                       u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
+                       void *userdata);
+
 static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
 
 static enum vxge_hw_status
@@ -153,52 +104,353 @@ vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
 static enum vxge_hw_status
 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
 
-static u64
-__vxge_hw_vpath_pci_func_mode_get(u32  vp_id,
-                                 struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-static u32
-__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
+static void
+__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
 
 static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
-                        u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
+__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
+                                 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
 
 static enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
+__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
+                                 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
 
+static void
+vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
+{
+       u64 val64;
 
-static enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
+       val64 = readq(&vp_reg->rxmac_vcfg0);
+       val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
+       writeq(val64, &vp_reg->rxmac_vcfg0);
+       val64 = readq(&vp_reg->rxmac_vcfg0);
 
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
-                          struct vxge_hw_device_hw_info *hw_info);
+       return;
+}
 
-static enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
+/*
+ * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
+ */
+int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+       struct vxge_hw_vpath_reg __iomem *vp_reg;
+       struct __vxge_hw_virtualpath *vpath;
+       u64 val64, rxd_count, rxd_spat;
+       int count = 0, total_count = 0;
 
-static void
-__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
+       vpath = &hldev->virtual_paths[vp_id];
+       vp_reg = vpath->vp_reg;
 
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
-                            u32 operation, u32 offset, u64 *stat);
+       vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
 
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
-                                 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
+       /* Check that the ring controller for this vpath has enough free RxDs
+        * to send frames to the host.  This is done by reading the
+        * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
+        * RXD_SPAT value for the vpath.
+        */
+       val64 = readq(&vp_reg->prc_cfg6);
+       rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
+       /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
+        * leg room.
+        */
+       rxd_spat *= 2;
+
+       do {
+               mdelay(1);
+
+               rxd_count = readq(&vp_reg->prc_rxd_doorbell);
+
+               /* Check that the ring controller for this vpath does
+                * not have any frame in its pipeline.
+                */
+               val64 = readq(&vp_reg->frm_in_progress_cnt);
+               if ((rxd_count <= rxd_spat) || (val64 > 0))
+                       count = 0;
+               else
+                       count++;
+               total_count++;
+       } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
+                       (total_count < VXGE_HW_MAX_POLLING_COUNT));
+
+       if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
+               printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
+                       __func__);
+
+       return total_count;
+}
+
+/* vxge_hw_device_wait_receive_idle - This function waits until all frames
+ * stored in the frame buffer for each vpath assigned to the given
+ * function (hldev) have been sent to the host.
+ */
+void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
+{
+       int i, total_count = 0;
+
+       for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+               if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+                       continue;
+
+               total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
+               if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
+                       break;
+       }
+}
 
 static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
-                                 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
+vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
+                    u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
+                    u64 *steer_ctrl)
+{
+       struct vxge_hw_vpath_reg __iomem *vp_reg;
+       enum vxge_hw_status status;
+       u64 val64;
+       u32 retry = 0, max_retry = 100;
+
+       vp_reg = vpath->vp_reg;
+
+       if (vpath->vp_open) {
+               max_retry = 3;
+               spin_lock(&vpath->lock);
+       }
+
+       writeq(*data0, &vp_reg->rts_access_steer_data0);
+       writeq(*data1, &vp_reg->rts_access_steer_data1);
+       wmb();
+
+       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
+               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
+               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
+               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
+               *steer_ctrl;
+
+       status = __vxge_hw_pio_mem_write64(val64,
+                                          &vp_reg->rts_access_steer_ctrl,
+                                          VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
+                                          VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+
+       /* The __vxge_hw_device_register_poll can udelay for a significant
+        * amount of time, blocking other proccess from the CPU.  If it delays
+        * for ~5secs, a NMI error can occur.  A way around this is to give up
+        * the processor via msleep, but this is not allowed is under lock.
+        * So, only allow it to sleep for ~4secs if open.  Otherwise, delay for
+        * 1sec and sleep for 10ms until the firmware operation has completed
+        * or timed-out.
+        */
+       while ((status != VXGE_HW_OK) && retry++ < max_retry) {
+               if (!vpath->vp_open)
+                       msleep(20);
+               status = __vxge_hw_device_register_poll(
+                                       &vp_reg->rts_access_steer_ctrl,
+                                       VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
+                                       VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+       }
+
+       if (status != VXGE_HW_OK)
+               goto out;
+
+       val64 = readq(&vp_reg->rts_access_steer_ctrl);
+       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+               *data0 = readq(&vp_reg->rts_access_steer_data0);
+               *data1 = readq(&vp_reg->rts_access_steer_data1);
+               *steer_ctrl = val64;
+       } else
+               status = VXGE_HW_FAIL;
+
+out:
+       if (vpath->vp_open)
+               spin_unlock(&vpath->lock);
+       return status;
+}
+
+enum vxge_hw_status
+vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
+                            u32 *minor, u32 *build)
+{
+       u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status;
+
+       vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+       status = vxge_hw_vpath_fw_api(vpath,
+                                     VXGE_HW_FW_UPGRADE_ACTION,
+                                     VXGE_HW_FW_UPGRADE_MEMO,
+                                     VXGE_HW_FW_UPGRADE_OFFSET_READ,
+                                     &data0, &data1, &steer_ctrl);
+       if (status != VXGE_HW_OK)
+               return status;
+
+       *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
+       *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
+       *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
+
+       return status;
+}
+
+enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
+{
+       u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status;
+       u32 ret;
+
+       vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+       status = vxge_hw_vpath_fw_api(vpath,
+                                     VXGE_HW_FW_UPGRADE_ACTION,
+                                     VXGE_HW_FW_UPGRADE_MEMO,
+                                     VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
+                                     &data0, &data1, &steer_ctrl);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
+               goto exit;
+       }
+
+       ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
+       if (ret != 1) {
+               vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
+                               __func__, ret);
+               status = VXGE_HW_FAIL;
+       }
+
+exit:
+       return status;
+}
+
+enum vxge_hw_status
+vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
+{
+       u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status;
+       int ret_code, sec_code;
+
+       vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+       /* send upgrade start command */
+       status = vxge_hw_vpath_fw_api(vpath,
+                                     VXGE_HW_FW_UPGRADE_ACTION,
+                                     VXGE_HW_FW_UPGRADE_MEMO,
+                                     VXGE_HW_FW_UPGRADE_OFFSET_START,
+                                     &data0, &data1, &steer_ctrl);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
+                               __func__);
+               return status;
+       }
+
+       /* Transfer fw image to adapter 16 bytes at a time */
+       for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
+               steer_ctrl = 0;
+
+               /* The next 128bits of fwdata to be loaded onto the adapter */
+               data0 = *((u64 *)fwdata);
+               data1 = *((u64 *)fwdata + 1);
+
+               status = vxge_hw_vpath_fw_api(vpath,
+                                             VXGE_HW_FW_UPGRADE_ACTION,
+                                             VXGE_HW_FW_UPGRADE_MEMO,
+                                             VXGE_HW_FW_UPGRADE_OFFSET_SEND,
+                                             &data0, &data1, &steer_ctrl);
+               if (status != VXGE_HW_OK) {
+                       vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
+                                       __func__);
+                       goto out;
+               }
+
+               ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
+               switch (ret_code) {
+               case VXGE_HW_FW_UPGRADE_OK:
+                       /* All OK, send next 16 bytes. */
+                       break;
+               case VXGE_FW_UPGRADE_BYTES2SKIP:
+                       /* skip bytes in the stream */
+                       fwdata += (data0 >> 8) & 0xFFFFFFFF;
+                       break;
+               case VXGE_HW_FW_UPGRADE_DONE:
+                       goto out;
+               case VXGE_HW_FW_UPGRADE_ERR:
+                       sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
+                       switch (sec_code) {
+                       case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
+                       case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
+                               printk(KERN_ERR
+                                      "corrupted data from .ncf file\n");
+                               break;
+                       case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
+                       case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
+                       case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
+                       case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
+                       case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
+                               printk(KERN_ERR "invalid .ncf file\n");
+                               break;
+                       case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
+                               printk(KERN_ERR "buffer overflow\n");
+                               break;
+                       case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
+                               printk(KERN_ERR "failed to flash the image\n");
+                               break;
+                       case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
+                               printk(KERN_ERR
+                                      "generic error. Unknown error type\n");
+                               break;
+                       default:
+                               printk(KERN_ERR "Unknown error of type %d\n",
+                                      sec_code);
+                               break;
+                       }
+                       status = VXGE_HW_FAIL;
+                       goto out;
+               default:
+                       printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
+                       status = VXGE_HW_FAIL;
+                       goto out;
+               }
+               /* point to next 16 bytes */
+               fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
+       }
+out:
+       return status;
+}
+
+enum vxge_hw_status
+vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
+                               struct eprom_image *img)
+{
+       u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status;
+       int i;
+
+       vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+       for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+               data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
+               data1 = steer_ctrl = 0;
+
+               status = vxge_hw_vpath_fw_api(vpath,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+                       VXGE_HW_FW_API_GET_EPROM_REV,
+                       0, &data0, &data1, &steer_ctrl);
+               if (status != VXGE_HW_OK)
+                       break;
+
+               img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
+               img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
+               img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
+               img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
+       }
+
+       return status;
+}
 
 /*
  * __vxge_hw_channel_allocate - Allocate memory for channel
  * This function allocates required memory for the channel and various arrays
  * in the channel
  */
-struct __vxge_hw_channel*
+static struct __vxge_hw_channel *
 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
                           enum __vxge_hw_channel_type type,
        u32 length, u32 per_dtr_space, void *userdata)
@@ -269,7 +521,7 @@ exit0:
  * This function deallocates memory from the channel and various arrays
  * in the channel
  */
-void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
+static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
 {
        kfree(channel->work_arr);
        kfree(channel->free_arr);
@@ -283,7 +535,7 @@ void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
  * This function initializes a channel by properly setting the
  * various references
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
 {
        u32 i;
@@ -318,7 +570,7 @@ __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
  * __vxge_hw_channel_reset - Resets a channel
  * This function resets a channel by properly setting the various references
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
 {
        u32 i;
@@ -345,8 +597,7 @@ __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
  * Initialize certain PCI/PCI-X configuration registers
  * with recommended values. Save config space for future hw resets.
  */
-void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
+static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
 {
        u16 cmd = 0;
 
@@ -390,7 +641,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
        return ret;
 }
 
- /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
+/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
  * in progress
  * This routine checks the vpath reset in progress register is turned zero
  */
@@ -435,7 +686,7 @@ exit:
  * register location pointers in the device object. It waits until the ric is
  * completed initializing registers.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
 {
        u64 val64;
@@ -495,26 +746,6 @@ exit:
        return status;
 }
 
-/*
- * __vxge_hw_device_id_get
- * This routine returns sets the device id and revision numbers into the device
- * structure
- */
-void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
-{
-       u64 val64;
-
-       val64 = readq(&hldev->common_reg->titan_asic_id);
-       hldev->device_id =
-               (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
-
-       hldev->major_revision =
-               (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
-
-       hldev->minor_revision =
-               (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
-}
-
 /*
  * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
  * This routine returns the Access Rights of the driver
@@ -567,11 +798,26 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
                return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
 }
 
+/*
+ * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
+ * Returns the function number of the vpath.
+ */
+static u32
+__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
+{
+       u64 val64;
+
+       val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
+
+       return
+        (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
+}
+
 /*
  * __vxge_hw_device_host_info_get
  * This routine returns the host type assignments
  */
-void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
+static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
 {
        u64 val64;
        u32 i;
@@ -584,16 +830,18 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
        hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
                if (!(hldev->vpath_assignments & vxge_mBIT(i)))
                        continue;
 
                hldev->func_id =
-                       __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
+                       __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
 
                hldev->access_rights = __vxge_hw_device_access_rights_get(
                        hldev->host_type, hldev->func_id);
 
+               hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
+               hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
+
                hldev->first_vp_id = i;
                break;
        }
@@ -627,25 +875,216 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
                return VXGE_HW_ERR_INVALID_PCI_INFO;
        }
 
-       return VXGE_HW_OK;
+       return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_initialize
+ * Initialize Titan-V hardware.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
+{
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
+                               hldev->func_id)) {
+               /* Validate the pci-e link width and speed */
+               status = __vxge_hw_verify_pci_e_info(hldev);
+               if (status != VXGE_HW_OK)
+                       goto exit;
+       }
+
+exit:
+       return status;
+}
+
+/*
+ * __vxge_hw_vpath_fw_ver_get - Get the fw version
+ * Returns FW Version
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
+                          struct vxge_hw_device_hw_info *hw_info)
+{
+       struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
+       struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
+       struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
+       struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
+       u64 data0, data1 = 0, steer_ctrl = 0;
+       enum vxge_hw_status status;
+
+       status = vxge_hw_vpath_fw_api(vpath,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+                       0, &data0, &data1, &steer_ctrl);
+       if (status != VXGE_HW_OK)
+               goto exit;
+
+       fw_date->day =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
+       fw_date->month =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
+       fw_date->year =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
+
+       snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
+                fw_date->month, fw_date->day, fw_date->year);
+
+       fw_version->major =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
+       fw_version->minor =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
+       fw_version->build =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
+
+       snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
+                fw_version->major, fw_version->minor, fw_version->build);
+
+       flash_date->day =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
+       flash_date->month =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
+       flash_date->year =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
+
+       snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
+                flash_date->month, flash_date->day, flash_date->year);
+
+       flash_version->major =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
+       flash_version->minor =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
+       flash_version->build =
+           (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
+
+       snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
+                flash_version->major, flash_version->minor,
+                flash_version->build);
+
+exit:
+       return status;
+}
+
+/*
+ * __vxge_hw_vpath_card_info_get - Get the serial numbers,
+ * part number and product description.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
+                             struct vxge_hw_device_hw_info *hw_info)
+{
+       enum vxge_hw_status status;
+       u64 data0, data1 = 0, steer_ctrl = 0;
+       u8 *serial_number = hw_info->serial_number;
+       u8 *part_number = hw_info->part_number;
+       u8 *product_desc = hw_info->product_desc;
+       u32 i, j = 0;
+
+       data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
+
+       status = vxge_hw_vpath_fw_api(vpath,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+                       0, &data0, &data1, &steer_ctrl);
+       if (status != VXGE_HW_OK)
+               return status;
+
+       ((u64 *)serial_number)[0] = be64_to_cpu(data0);
+       ((u64 *)serial_number)[1] = be64_to_cpu(data1);
+
+       data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
+       data1 = steer_ctrl = 0;
+
+       status = vxge_hw_vpath_fw_api(vpath,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+                       0, &data0, &data1, &steer_ctrl);
+       if (status != VXGE_HW_OK)
+               return status;
+
+       ((u64 *)part_number)[0] = be64_to_cpu(data0);
+       ((u64 *)part_number)[1] = be64_to_cpu(data1);
+
+       for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
+            i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
+               data0 = i;
+               data1 = steer_ctrl = 0;
+
+               status = vxge_hw_vpath_fw_api(vpath,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+                       0, &data0, &data1, &steer_ctrl);
+               if (status != VXGE_HW_OK)
+                       return status;
+
+               ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
+               ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
+       }
+
+       return status;
+}
+
+/*
+ * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
+ * Returns pci function mode
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
+                                 struct vxge_hw_device_hw_info *hw_info)
+{
+       u64 data0, data1 = 0, steer_ctrl = 0;
+       enum vxge_hw_status status;
+
+       data0 = 0;
+
+       status = vxge_hw_vpath_fw_api(vpath,
+                       VXGE_HW_FW_API_GET_FUNC_MODE,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+                       0, &data0, &data1, &steer_ctrl);
+       if (status != VXGE_HW_OK)
+               return status;
+
+       hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
+       return status;
 }
 
 /*
- * __vxge_hw_device_initialize
- * Initialize Titan-V hardware.
+ * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
+ *               from MAC address table.
  */
-enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
+static enum vxge_hw_status
+__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
+                        u8 *macaddr, u8 *macaddr_mask)
 {
-       enum vxge_hw_status status = VXGE_HW_OK;
+       u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
+           data0 = 0, data1 = 0, steer_ctrl = 0;
+       enum vxge_hw_status status;
+       int i;
 
-       if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
-                               hldev->func_id)) {
-               /* Validate the pci-e link width and speed */
-               status = __vxge_hw_verify_pci_e_info(hldev);
+       do {
+               status = vxge_hw_vpath_fw_api(vpath, action,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+                       0, &data0, &data1, &steer_ctrl);
                if (status != VXGE_HW_OK)
                        goto exit;
-       }
 
+               data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
+               data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
+                                                                       data1);
+
+               for (i = ETH_ALEN; i > 0; i--) {
+                       macaddr[i - 1] = (u8) (data0 & 0xFF);
+                       data0 >>= 8;
+
+                       macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
+                       data1 >>= 8;
+               }
+
+               action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
+               data0 = 0, data1 = 0, steer_ctrl = 0;
+
+       } while (!is_valid_ether_addr(macaddr));
 exit:
        return status;
 }
@@ -665,9 +1104,9 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
        struct vxge_hw_toc_reg __iomem *toc;
        struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
        struct vxge_hw_common_reg __iomem *common_reg;
-       struct vxge_hw_vpath_reg __iomem *vpath_reg;
        struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
        enum vxge_hw_status status;
+       struct __vxge_hw_virtualpath vpath;
 
        memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
 
@@ -702,7 +1141,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
                vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
                                (bar0 + val64);
 
-               hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
+               hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
                if (__vxge_hw_device_access_rights_get(hw_info->host_type,
                        hw_info->func_id) &
                        VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
@@ -718,16 +1157,19 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
 
                val64 = readq(&toc->toc_vpath_pointer[i]);
 
-               vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
+               vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
+                              (bar0 + val64);
+               vpath.vp_open = 0;
 
-               hw_info->function_mode =
-                       __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
+               status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
+               if (status != VXGE_HW_OK)
+                       goto exit;
 
-               status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
+               status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
                if (status != VXGE_HW_OK)
                        goto exit;
 
-               status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
+               status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
                if (status != VXGE_HW_OK)
                        goto exit;
 
@@ -735,14 +1177,15 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
        }
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
                if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
                        continue;
 
                val64 = readq(&toc->toc_vpath_pointer[i]);
-               vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
+               vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
+                              (bar0 + val64);
+               vpath.vp_open = 0;
 
-               status =  __vxge_hw_vpath_addr_get(i, vpath_reg,
+               status =  __vxge_hw_vpath_addr_get(&vpath,
                                hw_info->mac_addrs[i],
                                hw_info->mac_addr_masks[i]);
                if (status != VXGE_HW_OK)
@@ -806,7 +1249,6 @@ vxge_hw_device_initialize(
                vfree(hldev);
                goto exit;
        }
-       __vxge_hw_device_id_get(hldev);
 
        __vxge_hw_device_host_info_get(hldev);
 
@@ -814,7 +1256,6 @@ vxge_hw_device_initialize(
        nblocks++;
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
                if (!(hldev->vpath_assignments & vxge_mBIT(i)))
                        continue;
 
@@ -839,7 +1280,6 @@ vxge_hw_device_initialize(
        }
 
        status = __vxge_hw_device_initialize(hldev);
-
        if (status != VXGE_HW_OK) {
                vxge_hw_device_terminate(hldev);
                goto exit;
@@ -876,7 +1316,6 @@ vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
        enum vxge_hw_status status = VXGE_HW_OK;
 
        for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
                if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
                        (hldev->virtual_paths[i].vp_open ==
                                VXGE_HW_VP_NOT_OPEN))
@@ -1165,7 +1604,6 @@ exit:
  * It can be used to set or reset Pause frame generation or reception
  * support of the NIC.
  */
-
 enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
                                                 u32 port, u32 tx, u32 rx)
 {
@@ -1409,7 +1847,6 @@ exit:
 /*
  * __vxge_hw_ring_create - Create a Ring
  * This function creates Ring and initializes it.
- *
  */
 static enum vxge_hw_status
 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
@@ -1845,7 +2282,7 @@ static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
  * __vxge_hw_device_fifo_config_check - Check fifo configuration.
  * Check the fifo configuration
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
 {
        if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
@@ -1893,7 +2330,7 @@ __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
  * __vxge_hw_device_config_check - Check device configuration.
  * Check the device configuration
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
 {
        u32 i;
@@ -2453,7 +2890,7 @@ __vxge_hw_fifo_mempool_item_alloc(
  * __vxge_hw_fifo_create - Create a FIFO
  * This function creates FIFO and initializes it.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
                      struct vxge_hw_fifo_attr *attr)
 {
@@ -2516,454 +2953,164 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
         *
         * During "reserve" operations more memory can be allocated on demand
         * for example due to FIFO full condition.
-        *
-        * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
-        * routine which will essentially stop the channel and free resources.
-        */
-
-       /* TxDL common private size == TxDL private  +  driver private */
-       fifo->priv_size =
-               sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
-       fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
-                       VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
-
-       fifo->per_txdl_space = attr->per_txdl_space;
-
-       /* recompute txdl size to be cacheline aligned */
-       fifo->txdl_size = txdl_size;
-       fifo->txdl_per_memblock = txdl_per_memblock;
-
-       fifo->txdl_term = attr->txdl_term;
-       fifo->callback = attr->callback;
-
-       if (fifo->txdl_per_memblock == 0) {
-               __vxge_hw_fifo_delete(vp);
-               status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
-               goto exit;
-       }
-
-       fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
-
-       fifo->mempool =
-               __vxge_hw_mempool_create(vpath->hldev,
-                       fifo->config->memblock_size,
-                       fifo->txdl_size,
-                       fifo->priv_size,
-                       (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
-                       (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
-                       &fifo_mp_callback,
-                       fifo);
-
-       if (fifo->mempool == NULL) {
-               __vxge_hw_fifo_delete(vp);
-               status = VXGE_HW_ERR_OUT_OF_MEMORY;
-               goto exit;
-       }
-
-       status = __vxge_hw_channel_initialize(&fifo->channel);
-       if (status != VXGE_HW_OK) {
-               __vxge_hw_fifo_delete(vp);
-               goto exit;
-       }
-
-       vxge_assert(fifo->channel.reserve_ptr);
-exit:
-       return status;
-}
-
-/*
- * __vxge_hw_fifo_abort - Returns the TxD
- * This function terminates the TxDs of fifo
- */
-static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
-{
-       void *txdlh;
-
-       for (;;) {
-               vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
-
-               if (txdlh == NULL)
-                       break;
-
-               vxge_hw_channel_dtr_complete(&fifo->channel);
-
-               if (fifo->txdl_term) {
-                       fifo->txdl_term(txdlh,
-                       VXGE_HW_TXDL_STATE_POSTED,
-                       fifo->channel.userdata);
-               }
-
-               vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
-       }
-
-       return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_fifo_reset - Resets the fifo
- * This function resets the fifo during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       __vxge_hw_fifo_abort(fifo);
-       status = __vxge_hw_channel_reset(&fifo->channel);
-
-       return status;
-}
-
-/*
- * __vxge_hw_fifo_delete - Removes the FIFO
- * This function freeup the memory pool and removes the FIFO
- */
-enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
-{
-       struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
-
-       __vxge_hw_fifo_abort(fifo);
-
-       if (fifo->mempool)
-               __vxge_hw_mempool_destroy(fifo->mempool);
-
-       vp->vpath->fifoh = NULL;
-
-       __vxge_hw_channel_free(&fifo->channel);
-
-       return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_vpath_pci_read - Read the content of given address
- *                          in pci config space.
- * Read from the vpath pci config space.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
-                        u32 phy_func_0, u32 offset, u32 *val)
-{
-       u64 val64;
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
-
-       val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
-
-       if (phy_func_0)
-               val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
-
-       writeq(val64, &vp_reg->pci_config_access_cfg1);
-       wmb();
-       writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
-                       &vp_reg->pci_config_access_cfg2);
-       wmb();
-
-       status = __vxge_hw_device_register_poll(
-                       &vp_reg->pci_config_access_cfg2,
-                       VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
-       if (status != VXGE_HW_OK)
-               goto exit;
-
-       val64 = readq(&vp_reg->pci_config_access_status);
-
-       if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
-               status = VXGE_HW_FAIL;
-               *val = 0;
-       } else
-               *val = (u32)vxge_bVALn(val64, 32, 32);
-exit:
-       return status;
-}
-
-/*
- * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
- * Returns the function number of the vpath.
- */
-static u32
-__vxge_hw_vpath_func_id_get(u32 vp_id,
-       struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
-{
-       u64 val64;
-
-       val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
-
-       return
-        (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
-}
-
-/*
- * __vxge_hw_read_rts_ds - Program RTS steering critieria
- */
-static inline void
-__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
-                     u64 dta_struct_sel)
-{
-       writeq(0, &vpath_reg->rts_access_steer_ctrl);
-       wmb();
-       writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
-       writeq(0, &vpath_reg->rts_access_steer_data1);
-       wmb();
-}
-
-
-/*
- * __vxge_hw_vpath_card_info_get - Get the serial numbers,
- * part number and product description.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
-       u32 vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg,
-       struct vxge_hw_device_hw_info *hw_info)
-{
-       u32 i, j;
-       u64 val64;
-       u64 data1 = 0ULL;
-       u64 data2 = 0ULL;
-       enum vxge_hw_status status = VXGE_HW_OK;
-       u8 *serial_number = hw_info->serial_number;
-       u8 *part_number = hw_info->part_number;
-       u8 *product_desc = hw_info->product_desc;
-
-       __vxge_hw_read_rts_ds(vpath_reg,
-               VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
-
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vpath_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
-       if (status != VXGE_HW_OK)
-               return status;
-
-       val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
-       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-               data1 = readq(&vpath_reg->rts_access_steer_data0);
-               ((u64 *)serial_number)[0] = be64_to_cpu(data1);
-
-               data2 = readq(&vpath_reg->rts_access_steer_data1);
-               ((u64 *)serial_number)[1] = be64_to_cpu(data2);
-               status = VXGE_HW_OK;
-       } else
-               *serial_number = 0;
-
-       __vxge_hw_read_rts_ds(vpath_reg,
-                       VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
-
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vpath_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
-       if (status != VXGE_HW_OK)
-               return status;
-
-       val64 = readq(&vpath_reg->rts_access_steer_ctrl);
+        *
+        * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
+        * routine which will essentially stop the channel and free resources.
+        */
 
-       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+       /* TxDL common private size == TxDL private  +  driver private */
+       fifo->priv_size =
+               sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
+       fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
+                       VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
 
-               data1 = readq(&vpath_reg->rts_access_steer_data0);
-               ((u64 *)part_number)[0] = be64_to_cpu(data1);
+       fifo->per_txdl_space = attr->per_txdl_space;
 
-               data2 = readq(&vpath_reg->rts_access_steer_data1);
-               ((u64 *)part_number)[1] = be64_to_cpu(data2);
+       /* recompute txdl size to be cacheline aligned */
+       fifo->txdl_size = txdl_size;
+       fifo->txdl_per_memblock = txdl_per_memblock;
 
-               status = VXGE_HW_OK;
+       fifo->txdl_term = attr->txdl_term;
+       fifo->callback = attr->callback;
 
-       } else
-               *part_number = 0;
+       if (fifo->txdl_per_memblock == 0) {
+               __vxge_hw_fifo_delete(vp);
+               status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
+               goto exit;
+       }
 
-       j = 0;
+       fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
 
-       for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
-            i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
+       fifo->mempool =
+               __vxge_hw_mempool_create(vpath->hldev,
+                       fifo->config->memblock_size,
+                       fifo->txdl_size,
+                       fifo->priv_size,
+                       (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
+                       (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
+                       &fifo_mp_callback,
+                       fifo);
 
-               __vxge_hw_read_rts_ds(vpath_reg, i);
+       if (fifo->mempool == NULL) {
+               __vxge_hw_fifo_delete(vp);
+               status = VXGE_HW_ERR_OUT_OF_MEMORY;
+               goto exit;
+       }
 
-               val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
+       status = __vxge_hw_channel_initialize(&fifo->channel);
+       if (status != VXGE_HW_OK) {
+               __vxge_hw_fifo_delete(vp);
+               goto exit;
+       }
 
-               status = __vxge_hw_pio_mem_write64(val64,
-                               &vpath_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+       vxge_assert(fifo->channel.reserve_ptr);
+exit:
+       return status;
+}
 
-               if (status != VXGE_HW_OK)
-                       return status;
+/*
+ * __vxge_hw_fifo_abort - Returns the TxD
+ * This function terminates the TxDs of fifo
+ */
+static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
+{
+       void *txdlh;
 
-               val64 = readq(&vpath_reg->rts_access_steer_ctrl);
+       for (;;) {
+               vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
 
-               if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+               if (txdlh == NULL)
+                       break;
 
-                       data1 = readq(&vpath_reg->rts_access_steer_data0);
-                       ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
+               vxge_hw_channel_dtr_complete(&fifo->channel);
 
-                       data2 = readq(&vpath_reg->rts_access_steer_data1);
-                       ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
+               if (fifo->txdl_term) {
+                       fifo->txdl_term(txdlh,
+                       VXGE_HW_TXDL_STATE_POSTED,
+                       fifo->channel.userdata);
+               }
 
-                       status = VXGE_HW_OK;
-               } else
-                       *product_desc = 0;
+               vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
        }
 
-       return status;
+       return VXGE_HW_OK;
 }
 
 /*
- * __vxge_hw_vpath_fw_ver_get - Get the fw version
- * Returns FW Version
+ * __vxge_hw_fifo_reset - Resets the fifo
+ * This function resets the fifo during vpath reset operation
  */
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(
-       u32 vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg,
-       struct vxge_hw_device_hw_info *hw_info)
+static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
 {
-       u64 val64;
-       u64 data1 = 0ULL;
-       u64 data2 = 0ULL;
-       struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
-       struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
-       struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
-       struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
        enum vxge_hw_status status = VXGE_HW_OK;
 
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
+       __vxge_hw_fifo_abort(fifo);
+       status = __vxge_hw_channel_reset(&fifo->channel);
 
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vpath_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+       return status;
+}
 
-       if (status != VXGE_HW_OK)
-               goto exit;
+/*
+ * __vxge_hw_fifo_delete - Removes the FIFO
+ * This function freeup the memory pool and removes the FIFO
+ */
+static enum vxge_hw_status
+__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
+{
+       struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
 
-       val64 = readq(&vpath_reg->rts_access_steer_ctrl);
+       __vxge_hw_fifo_abort(fifo);
 
-       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+       if (fifo->mempool)
+               __vxge_hw_mempool_destroy(fifo->mempool);
 
-               data1 = readq(&vpath_reg->rts_access_steer_data0);
-               data2 = readq(&vpath_reg->rts_access_steer_data1);
-
-               fw_date->day =
-                       (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
-                                               data1);
-               fw_date->month =
-                       (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
-                                               data1);
-               fw_date->year =
-                       (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
-                                               data1);
-
-               snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
-                       fw_date->month, fw_date->day, fw_date->year);
-
-               fw_version->major =
-                   (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
-               fw_version->minor =
-                   (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
-               fw_version->build =
-                   (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
-
-               snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
-                   fw_version->major, fw_version->minor, fw_version->build);
-
-               flash_date->day =
-                 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
-               flash_date->month =
-                (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
-               flash_date->year =
-                (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
-
-               snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
-                       "%2.2d/%2.2d/%4.4d",
-                       flash_date->month, flash_date->day, flash_date->year);
-
-               flash_version->major =
-                (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
-               flash_version->minor =
-                (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
-               flash_version->build =
-                (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
-
-               snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
-                       flash_version->major, flash_version->minor,
-                       flash_version->build);
+       vp->vpath->fifoh = NULL;
 
-               status = VXGE_HW_OK;
+       __vxge_hw_channel_free(&fifo->channel);
 
-       } else
-               status = VXGE_HW_FAIL;
-exit:
-       return status;
+       return VXGE_HW_OK;
 }
 
 /*
- * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
- * Returns pci function mode
+ * __vxge_hw_vpath_pci_read - Read the content of given address
+ *                          in pci config space.
+ * Read from the vpath pci config space.
  */
-static u64
-__vxge_hw_vpath_pci_func_mode_get(
-       u32  vp_id,
-       struct vxge_hw_vpath_reg __iomem *vpath_reg)
+static enum vxge_hw_status
+__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
+                        u32 phy_func_0, u32 offset, u32 *val)
 {
        u64 val64;
-       u64 data1 = 0ULL;
        enum vxge_hw_status status = VXGE_HW_OK;
+       struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
 
-       __vxge_hw_read_rts_ds(vpath_reg,
-               VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
+       val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
 
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
+       if (phy_func_0)
+               val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
 
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vpath_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+       writeq(val64, &vp_reg->pci_config_access_cfg1);
+       wmb();
+       writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
+                       &vp_reg->pci_config_access_cfg2);
+       wmb();
+
+       status = __vxge_hw_device_register_poll(
+                       &vp_reg->pci_config_access_cfg2,
+                       VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
 
        if (status != VXGE_HW_OK)
                goto exit;
 
-       val64 = readq(&vpath_reg->rts_access_steer_ctrl);
+       val64 = readq(&vp_reg->pci_config_access_status);
 
-       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-               data1 = readq(&vpath_reg->rts_access_steer_data0);
-               status = VXGE_HW_OK;
-       } else {
-               data1 = 0;
+       if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
                status = VXGE_HW_FAIL;
-       }
+               *val = 0;
+       } else
+               *val = (u32)vxge_bVALn(val64, 32, 32);
 exit:
-       return data1;
+       return status;
 }
 
 /**
@@ -2974,37 +3121,24 @@ exit:
  * Flicker the link LED.
  */
 enum vxge_hw_status
-vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
-                              u64 on_off)
+vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
 {
-       u64 val64;
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct vxge_hw_vpath_reg __iomem *vp_reg;
+       struct __vxge_hw_virtualpath *vpath;
+       u64 data0, data1 = 0, steer_ctrl = 0;
+       enum vxge_hw_status status;
 
        if (hldev == NULL) {
                status = VXGE_HW_ERR_INVALID_DEVICE;
                goto exit;
        }
 
-       vp_reg = hldev->vpath_reg[hldev->first_vp_id];
-
-       writeq(0, &vp_reg->rts_access_steer_ctrl);
-       wmb();
-       writeq(on_off, &vp_reg->rts_access_steer_data0);
-       writeq(0, &vp_reg->rts_access_steer_data1);
-       wmb();
-
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
+       vpath = &hldev->virtual_paths[hldev->first_vp_id];
 
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vp_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+       data0 = on_off;
+       status = vxge_hw_vpath_fw_api(vpath,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+                       0, &data0, &data1, &steer_ctrl);
 exit:
        return status;
 }
@@ -3013,63 +3147,38 @@ exit:
  * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
  */
 enum vxge_hw_status
-__vxge_hw_vpath_rts_table_get(
-       struct __vxge_hw_vpath_handle *vp,
-       u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
+__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
+                             u32 action, u32 rts_table, u32 offset,
+                             u64 *data0, u64 *data1)
 {
-       u64 val64;
-       struct __vxge_hw_virtualpath *vpath;
-       struct vxge_hw_vpath_reg __iomem *vp_reg;
-
-       enum vxge_hw_status status = VXGE_HW_OK;
+       enum vxge_hw_status status;
+       u64 steer_ctrl = 0;
 
        if (vp == NULL) {
                status = VXGE_HW_ERR_INVALID_HANDLE;
                goto exit;
        }
 
-       vpath = vp->vpath;
-       vp_reg = vpath->vp_reg;
-
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
-
        if ((rts_table ==
-               VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
+            VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
            (rts_table ==
-               VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
+            VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
            (rts_table ==
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
+            VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
            (rts_table ==
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
-               val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
+            VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
+               steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
        }
 
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vp_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               vpath->hldev->config.device_poll_millis);
-
+       status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
+                                     data0, data1, &steer_ctrl);
        if (status != VXGE_HW_OK)
                goto exit;
 
-       val64 = readq(&vp_reg->rts_access_steer_ctrl);
-
-       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
-               *data1 = readq(&vp_reg->rts_access_steer_data0);
-
-               if ((rts_table ==
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
-               (rts_table ==
-               VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
-                       *data2 = readq(&vp_reg->rts_access_steer_data1);
-               }
-               status = VXGE_HW_OK;
-       } else
-               status = VXGE_HW_FAIL;
+       if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
+           (rts_table !=
+            VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
+               *data1 = 0;
 exit:
        return status;
 }
@@ -3078,107 +3187,27 @@ exit:
  * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
  */
 enum vxge_hw_status
-__vxge_hw_vpath_rts_table_set(
-       struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
-       u32 offset, u64 data1, u64 data2)
+__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
+                             u32 rts_table, u32 offset, u64 steer_data0,
+                             u64 steer_data1)
 {
-       u64 val64;
-       struct __vxge_hw_virtualpath *vpath;
-       enum vxge_hw_status status = VXGE_HW_OK;
-       struct vxge_hw_vpath_reg __iomem *vp_reg;
+       u64 data0, data1 = 0, steer_ctrl = 0;
+       enum vxge_hw_status status;
 
        if (vp == NULL) {
                status = VXGE_HW_ERR_INVALID_HANDLE;
                goto exit;
        }
 
-       vpath = vp->vpath;
-       vp_reg = vpath->vp_reg;
-
-       writeq(data1, &vp_reg->rts_access_steer_data0);
-       wmb();
+       data0 = steer_data0;
 
        if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
            (rts_table ==
-               VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
-               writeq(data2, &vp_reg->rts_access_steer_data1);
-               wmb();
-       }
-
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
-
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vp_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               vpath->hldev->config.device_poll_millis);
-
-       if (status != VXGE_HW_OK)
-               goto exit;
-
-       val64 = readq(&vp_reg->rts_access_steer_ctrl);
-
-       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
-               status = VXGE_HW_OK;
-       else
-               status = VXGE_HW_FAIL;
-exit:
-       return status;
-}
-
-/*
- * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
- *               from MAC address table.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(
-       u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
-       u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
-{
-       u32 i;
-       u64 val64;
-       u64 data1 = 0ULL;
-       u64 data2 = 0ULL;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-               VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
-       status = __vxge_hw_pio_mem_write64(val64,
-                               &vpath_reg->rts_access_steer_ctrl,
-                               VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-                               VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
-       if (status != VXGE_HW_OK)
-               goto exit;
-
-       val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
-       if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
-               data1 = readq(&vpath_reg->rts_access_steer_data0);
-               data2 = readq(&vpath_reg->rts_access_steer_data1);
-
-               data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
-               data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
-                                                       data2);
-
-               for (i = ETH_ALEN; i > 0; i--) {
-                       macaddr[i-1] = (u8)(data1 & 0xFF);
-                       data1 >>= 8;
+            VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
+               data1 = steer_data1;
 
-                       macaddr_mask[i-1] = (u8)(data2 & 0xFF);
-                       data2 >>= 8;
-               }
-               status = VXGE_HW_OK;
-       } else
-               status = VXGE_HW_FAIL;
+       status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
+                                     &data0, &data1, &steer_ctrl);
 exit:
        return status;
 }
@@ -3204,6 +3233,8 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
                     VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
                     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
                        0, &data0, &data1);
+       if (status != VXGE_HW_OK)
+               goto exit;
 
        data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
                        VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
@@ -4117,6 +4148,7 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
 
        vpath = &hldev->virtual_paths[vp_id];
 
+       spin_lock_init(&hldev->virtual_paths[vp_id].lock);
        vpath->vp_id = vp_id;
        vpath->vp_open = VXGE_HW_VP_OPEN;
        vpath->hldev = hldev;
@@ -4127,14 +4159,12 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
        __vxge_hw_vpath_reset(hldev, vp_id);
 
        status = __vxge_hw_vpath_reset_check(vpath);
-
        if (status != VXGE_HW_OK) {
                memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
                goto exit;
        }
 
        status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
-
        if (status != VXGE_HW_OK) {
                memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
                goto exit;
@@ -4148,7 +4178,6 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
                hldev->tim_int_mask1, vp_id);
 
        status = __vxge_hw_vpath_initialize(hldev, vp_id);
-
        if (status != VXGE_HW_OK)
                __vxge_hw_vp_terminate(hldev, vp_id);
 exit:
@@ -4335,16 +4364,18 @@ vpath_open_exit1:
 void
 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
 {
-       struct __vxge_hw_virtualpath *vpath = NULL;
+       struct __vxge_hw_virtualpath *vpath = vp->vpath;
+       struct __vxge_hw_ring *ring = vpath->ringh;
+       struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
        u64 new_count, val64, val164;
-       struct __vxge_hw_ring *ring;
 
-       vpath = vp->vpath;
-       ring = vpath->ringh;
+       if (vdev->titan1) {
+               new_count = readq(&vpath->vp_reg->rxdmem_size);
+               new_count &= 0x1fff;
+       } else
+               new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
 
-       new_count = readq(&vpath->vp_reg->rxdmem_size);
-       new_count &= 0x1fff;
-       val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
+       val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
 
        writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
                &vpath->vp_reg->prc_rxd_doorbell);
@@ -4414,7 +4445,9 @@ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
 
        __vxge_hw_vp_terminate(devh, vp_id);
 
+       spin_lock(&vpath->lock);
        vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
+       spin_unlock(&vpath->lock);
 
 vpath_close_exit:
        return status;
@@ -4810,7 +4843,7 @@ static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
  * __vxge_hw_blockpool_create - Create block pool
  */
 
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
                           struct __vxge_hw_blockpool *blockpool,
                           u32 pool_size,
@@ -4910,7 +4943,7 @@ blockpool_create_exit:
  * __vxge_hw_blockpool_destroy - Deallocates the block pool
  */
 
-void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
+static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
 {
 
        struct __vxge_hw_device *hldev;
@@ -5076,7 +5109,7 @@ exit:
  * Allocates a block of memory of given size, either from block pool
  * or by calling vxge_os_dma_malloc()
  */
-void *
+static void *
 __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
                                struct vxge_hw_mempool_dma *dma_object)
 {
@@ -5140,7 +5173,7 @@ exit:
  * __vxge_hw_blockpool_free - Frees the memory allcoated with
                                __vxge_hw_blockpool_malloc
  */
-void
+static void
 __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
                        void *memblock, u32 size,
                        struct vxge_hw_mempool_dma *dma_object)
@@ -5192,7 +5225,7 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
  * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
  * This function allocates a block from block pool or from the system
  */
-struct __vxge_hw_blockpool_entry *
+static struct __vxge_hw_blockpool_entry *
 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
 {
        struct __vxge_hw_blockpool_entry *entry = NULL;
@@ -5227,7 +5260,7 @@ __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
  *
  * This function frees a block from block pool
  */
-void
+static void
 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
                        struct __vxge_hw_blockpool_entry *entry)
 {
index 5c00861b6c2c08351f1f52023f4037dfaad45236..5b2c8313426d3c7d7d051f4f51f020746d2af936 100644 (file)
 #define VXGE_CACHE_LINE_SIZE 128
 #endif
 
-#define vxge_os_vaprintf(level, mask, fmt, ...) { \
-       char buff[255]; \
-               snprintf(buff, 255, fmt, __VA_ARGS__); \
-               printk(buff); \
-               printk("\n"); \
-}
-
 #ifndef VXGE_ALIGN
 #define VXGE_ALIGN(adrs, size) \
        (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
 #define VXGE_HW_MAX_MTU                                9600
 #define VXGE_HW_DEFAULT_MTU                    1500
 
-#ifdef VXGE_DEBUG_ASSERT
+#define VXGE_HW_MAX_ROM_IMAGES                 8
+
+struct eprom_image {
+       u8 is_valid:1;
+       u8 index;
+       u8 type;
+       u16 version;
+};
 
+#ifdef VXGE_DEBUG_ASSERT
 /**
  * vxge_assert
  * @test: C-condition to check
  * compilation
  * time.
  */
-#define vxge_assert(test) { \
-       if (!(test)) \
-               vxge_os_bug("bad cond: "#test" at %s:%d\n", \
-                               __FILE__, __LINE__); }
+#define vxge_assert(test) BUG_ON(!(test))
 #else
 #define vxge_assert(test)
 #endif /* end of VXGE_DEBUG_ASSERT */
 
 /**
- * enum enum vxge_debug_level
+ * enum vxge_debug_level
  * @VXGE_NONE: debug disabled
  * @VXGE_ERR: all errors going to be logged out
  * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
@@ -158,6 +156,47 @@ enum vxge_hw_device_link_state {
        VXGE_HW_LINK_UP
 };
 
+/**
+ * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
+ * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
+ * @VXGE_HW_FW_UPGRADE_DONE:  upload completed
+ * @VXGE_HW_FW_UPGRADE_ERR:  upload error
+ * @VXGE_FW_UPGRADE_BYTES2SKIP:  skip bytes in the stream
+ *
+ */
+enum vxge_hw_fw_upgrade_code {
+       VXGE_HW_FW_UPGRADE_OK           = 0,
+       VXGE_HW_FW_UPGRADE_DONE         = 1,
+       VXGE_HW_FW_UPGRADE_ERR          = 2,
+       VXGE_FW_UPGRADE_BYTES2SKIP      = 3
+};
+
+/**
+ * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
+ * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
+ * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
+ * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
+ */
+enum vxge_hw_fw_upgrade_err_code {
+       VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1           = 1,
+       VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW          = 2,
+       VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3           = 3,
+       VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4           = 4,
+       VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5           = 5,
+       VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6           = 6,
+       VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7           = 7,
+       VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8           = 8,
+       VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN    = 9,
+       VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH          = 10
+};
+
 /**
  * struct vxge_hw_device_date - Date Format
  * @day: Day
@@ -465,7 +504,6 @@ struct vxge_hw_device_config {
  * See also: vxge_hw_driver_initialize().
  */
 struct vxge_hw_uld_cbs {
-
        void (*link_up)(struct __vxge_hw_device *devh);
        void (*link_down)(struct __vxge_hw_device *devh);
        void (*crit_err)(struct __vxge_hw_device *devh,
@@ -652,6 +690,7 @@ struct __vxge_hw_virtualpath {
        struct vxge_hw_vpath_stats_hw_info      *hw_stats;
        struct vxge_hw_vpath_stats_hw_info      *hw_stats_sav;
        struct vxge_hw_vpath_stats_sw_info      *sw_stats;
+       spinlock_t lock;
 };
 
 /*
@@ -674,9 +713,6 @@ struct __vxge_hw_vpath_handle{
 /**
  * struct __vxge_hw_device  - Hal device object
  * @magic: Magic Number
- * @device_id: PCI Device Id of the adapter
- * @major_revision: PCI Device major revision
- * @minor_revision: PCI Device minor revision
  * @bar0: BAR0 virtual address.
  * @pdev: Physical device handle
  * @config: Confguration passed by the LL driver at initialization
@@ -688,9 +724,6 @@ struct __vxge_hw_device {
        u32                             magic;
 #define VXGE_HW_DEVICE_MAGIC           0x12345678
 #define VXGE_HW_DEVICE_DEAD            0xDEADDEAD
-       u16                             device_id;
-       u8                              major_revision;
-       u8                              minor_revision;
        void __iomem                    *bar0;
        struct pci_dev                  *pdev;
        struct net_device               *ndev;
@@ -731,6 +764,7 @@ struct __vxge_hw_device {
        u32                             debug_level;
        u32                             level_err;
        u32                             level_trace;
+       u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
 };
 
 #define VXGE_HW_INFO_LEN       64
@@ -1413,12 +1447,12 @@ enum vxge_hw_rth_algoritms {
  * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
  */
 struct vxge_hw_rth_hash_types {
-       u8 hash_type_tcpipv4_en;
-       u8 hash_type_ipv4_en;
-       u8 hash_type_tcpipv6_en;
-       u8 hash_type_ipv6_en;
-       u8 hash_type_tcpipv6ex_en;
-       u8 hash_type_ipv6ex_en;
+       u8 hash_type_tcpipv4_en:1,
+          hash_type_ipv4_en:1,
+          hash_type_tcpipv6_en:1,
+          hash_type_ipv6_en:1,
+          hash_type_tcpipv6ex_en:1,
+          hash_type_ipv6ex_en:1;
 };
 
 void vxge_hw_device_debug_set(
@@ -2000,7 +2034,7 @@ enum vxge_hw_status
 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
 
 /**
- * vxge_debug
+ * vxge_debug_ll
  * @level: level of debug verbosity.
  * @mask: mask for the debug
  * @buf: Circular buffer for tracing
@@ -2012,26 +2046,13 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
  * may be compiled out if DEBUG macro was never defined.
  * See also: enum vxge_debug_level{}.
  */
-
-#define vxge_trace_aux(level, mask, fmt, ...) \
-{\
-               vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\
-}
-
-#define vxge_debug(module, level, mask, fmt, ...) { \
-if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \
-       (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\
-       if ((mask & VXGE_DEBUG_MASK) == mask)\
-               vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \
-} \
-}
-
 #if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
-#define vxge_debug_ll(level, mask, fmt, ...) \
-{\
-       vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\
-}
-
+#define vxge_debug_ll(level, mask, fmt, ...) do {                             \
+       if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) ||  \
+           (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
+               if ((mask & VXGE_DEBUG_MASK) == mask)                          \
+                       printk(fmt "\n", __VA_ARGS__);                         \
+} while (0)
 #else
 #define vxge_debug_ll(level, mask, fmt, ...)
 #endif
@@ -2051,4 +2072,26 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
 
 enum vxge_hw_status
 __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
+
+#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
+#define VXGE_HW_MAX_POLLING_COUNT 100
+
+void
+vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
+                            u32 *minor, u32 *build);
+
+enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
+                    int size);
+
+enum vxge_hw_status
+vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
+                               struct eprom_image *eprom_image_data);
+
+int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
 #endif
index b67746eef923e2cdeb9886130796e375d3b34818..bc9bd10357060e429146f613554923e7e3bda01c 100644 (file)
@@ -11,7 +11,7 @@
  *                 Virtualized Server Adapter.
  * Copyright(c) 2002-2010 Exar Corp.
  ******************************************************************************/
-#include<linux/ethtool.h>
+#include <linux/ethtool.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/etherdevice.h>
@@ -29,7 +29,6 @@
  * Return value:
  * 0 on success.
  */
-
 static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
 {
        /* We currently only support 10Gb/FULL */
@@ -79,10 +78,9 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
  * Returns driver specefic information like name, version etc.. to ethtool.
  */
 static void vxge_ethtool_gdrvinfo(struct net_device *dev,
-                       struct ethtool_drvinfo *info)
+                                 struct ethtool_drvinfo *info)
 {
-       struct vxgedev *vdev;
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       struct vxgedev *vdev = netdev_priv(dev);
        strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
        strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
        strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
@@ -104,15 +102,14 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
  * buffer area.
  */
 static void vxge_ethtool_gregs(struct net_device *dev,
-                       struct ethtool_regs *regs, void *space)
+                              struct ethtool_regs *regs, void *space)
 {
        int index, offset;
        enum vxge_hw_status status;
        u64 reg;
-       u64 *reg_space = (u64 *) space;
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-       struct __vxge_hw_device  *hldev = (struct __vxge_hw_device *)
-                                       pci_get_drvdata(vdev->pdev);
+       u64 *reg_space = (u64 *)space;
+       struct vxgedev *vdev = netdev_priv(dev);
+       struct __vxge_hw_device *hldev = vdev->devh;
 
        regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
        regs->version = vdev->pdev->subsystem_device;
@@ -147,9 +144,8 @@ static void vxge_ethtool_gregs(struct net_device *dev,
  */
 static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
 {
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-       struct __vxge_hw_device  *hldev = (struct __vxge_hw_device  *)
-                       pci_get_drvdata(vdev->pdev);
+       struct vxgedev *vdev = netdev_priv(dev);
+       struct __vxge_hw_device *hldev = vdev->devh;
 
        vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
        msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME);
@@ -168,11 +164,10 @@ static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
  *  void
  */
 static void vxge_ethtool_getpause_data(struct net_device *dev,
-                                       struct ethtool_pauseparam *ep)
+                                      struct ethtool_pauseparam *ep)
 {
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-       struct __vxge_hw_device  *hldev = (struct __vxge_hw_device  *)
-                       pci_get_drvdata(vdev->pdev);
+       struct vxgedev *vdev = netdev_priv(dev);
+       struct __vxge_hw_device *hldev = vdev->devh;
 
        vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
 }
@@ -188,11 +183,10 @@ static void vxge_ethtool_getpause_data(struct net_device *dev,
  * int, returns 0 on Success
  */
 static int vxge_ethtool_setpause_data(struct net_device *dev,
-                                       struct ethtool_pauseparam *ep)
+                                     struct ethtool_pauseparam *ep)
 {
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-       struct __vxge_hw_device  *hldev = (struct __vxge_hw_device  *)
-                       pci_get_drvdata(vdev->pdev);
+       struct vxgedev *vdev = netdev_priv(dev);
+       struct __vxge_hw_device *hldev = vdev->devh;
 
        vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
 
@@ -209,9 +203,8 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
        enum vxge_hw_status status;
        enum vxge_hw_status swstatus;
        struct vxge_vpath *vpath = NULL;
-
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-       struct __vxge_hw_device  *hldev = vdev->devh;
+       struct vxgedev *vdev = netdev_priv(dev);
+       struct __vxge_hw_device *hldev = vdev->devh;
        struct vxge_hw_xmac_stats *xmac_stats;
        struct vxge_hw_device_stats_sw_info *sw_stats;
        struct vxge_hw_device_stats_hw_info *hw_stats;
@@ -574,12 +567,12 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
        kfree(hw_stats);
 }
 
-static void vxge_ethtool_get_strings(struct net_device *dev,
-                             u32 stringset, u8 *data)
+static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
+                                    u8 *data)
 {
        int stat_size = 0;
        int i, j;
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+       struct vxgedev *vdev = netdev_priv(dev);
        switch (stringset) {
        case ETH_SS_STATS:
                vxge_add_string("VPATH STATISTICS%s\t\t\t",
@@ -1066,21 +1059,21 @@ static void vxge_ethtool_get_strings(struct net_device *dev,
 
 static int vxge_ethtool_get_regs_len(struct net_device *dev)
 {
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+       struct vxgedev *vdev = netdev_priv(dev);
 
        return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
 }
 
 static u32 vxge_get_rx_csum(struct net_device *dev)
 {
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+       struct vxgedev *vdev = netdev_priv(dev);
 
        return vdev->rx_csum;
 }
 
 static int vxge_set_rx_csum(struct net_device *dev, u32 data)
 {
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+       struct vxgedev *vdev = netdev_priv(dev);
 
        if (data)
                vdev->rx_csum = 1;
@@ -1102,7 +1095,7 @@ static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data)
 
 static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
 {
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+       struct vxgedev *vdev = netdev_priv(dev);
 
        switch (sset) {
        case ETH_SS_STATS:
@@ -1119,6 +1112,59 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
        }
 }
 
+static int vxge_set_flags(struct net_device *dev, u32 data)
+{
+       struct vxgedev *vdev = netdev_priv(dev);
+       enum vxge_hw_status status;
+
+       if (data & ~ETH_FLAG_RXHASH)
+               return -EOPNOTSUPP;
+
+       if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
+               return 0;
+
+       if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
+               return -EINVAL;
+
+       vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
+
+       /* Enabling RTH requires some of the logic in vxge_device_register and a
+        * vpath reset.  Due to these restrictions, only allow modification
+        * while the interface is down.
+        */
+       status = vxge_reset_all_vpaths(vdev);
+       if (status != VXGE_HW_OK) {
+               vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
+               return -EFAULT;
+       }
+
+       if (vdev->devh->config.rth_en)
+               dev->features |= NETIF_F_RXHASH;
+       else
+               dev->features &= ~NETIF_F_RXHASH;
+
+       return 0;
+}
+
+static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
+{
+       struct vxgedev *vdev = netdev_priv(dev);
+
+       if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
+               printk(KERN_INFO "Single Function Mode is required to flash the"
+                      " firmware\n");
+               return -EINVAL;
+       }
+
+       if (netif_running(dev)) {
+               printk(KERN_INFO "Interface %s must be down to flash the "
+                      "firmware\n", dev->name);
+               return -EBUSY;
+       }
+
+       return vxge_fw_upgrade(vdev, parms->data, 1);
+}
+
 static const struct ethtool_ops vxge_ethtool_ops = {
        .get_settings           = vxge_ethtool_gset,
        .set_settings           = vxge_ethtool_sset,
@@ -1140,6 +1186,8 @@ static const struct ethtool_ops vxge_ethtool_ops = {
        .phys_id                = vxge_ethtool_idnic,
        .get_sset_count         = vxge_ethtool_get_sset_count,
        .get_ethtool_stats      = vxge_get_ethtool_stats,
+       .set_flags              = vxge_set_flags,
+       .flash_device           = vxge_fw_flash,
 };
 
 void vxge_initialize_ethtool_ops(struct net_device *ndev)
index 813829f3d0242be97ff22f97bf1724b7db3c8734..5cba4a684f08f6c108e269097fce6101474841c8 100644 (file)
@@ -50,6 +50,8 @@
 #include <net/ip.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/net_tstamp.h>
 #include "vxge-main.h"
 #include "vxge-reg.h"
 
@@ -90,7 +92,6 @@ static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
 static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
 static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
 static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
 
 static inline int is_vxge_card_up(struct vxgedev *vdev)
 {
@@ -152,7 +153,7 @@ static void
 vxge_callback_link_up(struct __vxge_hw_device *hldev)
 {
        struct net_device *dev = hldev->ndev;
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+       struct vxgedev *vdev = netdev_priv(dev);
 
        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
                vdev->ndev->name, __func__, __LINE__);
@@ -176,7 +177,7 @@ static void
 vxge_callback_link_down(struct __vxge_hw_device *hldev)
 {
        struct net_device *dev = hldev->ndev;
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+       struct vxgedev *vdev = netdev_priv(dev);
 
        vxge_debug_entryexit(VXGE_TRACE,
                "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
@@ -369,7 +370,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
                 u8 t_code, void *userdata)
 {
        struct vxge_ring *ring = (struct vxge_ring *)userdata;
-       struct  net_device *dev = ring->ndev;
+       struct net_device *dev = ring->ndev;
        unsigned int dma_sizes;
        void *first_dtr = NULL;
        int dtr_cnt = 0;
@@ -513,6 +514,23 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
                else
                        skb_checksum_none_assert(skb);
 
+
+               if (ring->rx_hwts) {
+                       struct skb_shared_hwtstamps *skb_hwts;
+                       u32 ns = *(u32 *)(skb->head + pkt_length);
+
+                       skb_hwts = skb_hwtstamps(skb);
+                       skb_hwts->hwtstamp = ns_to_ktime(ns);
+                       skb_hwts->syststamp.tv64 = 0;
+               }
+
+               /* rth_hash_type and rth_it_hit are non-zero regardless of
+                * whether rss is enabled.  Only the rth_value is zero/non-zero
+                * if rss is disabled/enabled, so key off of that.
+                */
+               if (ext_info.rth_value)
+                       skb->rxhash = ext_info.rth_value;
+
                vxge_rx_complete(ring, skb, ext_info.vlan,
                        pkt_length, &ext_info);
 
@@ -670,7 +688,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
        struct vxge_vpath *vpath = NULL;
        struct __vxge_hw_device *hldev;
 
-       hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+       hldev = pci_get_drvdata(vdev->pdev);
 
        mac_address = (u8 *)&mac_addr;
        memcpy(mac_address, mac_header, ETH_ALEN);
@@ -769,7 +787,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       vdev = netdev_priv(dev);
 
        if (unlikely(!is_vxge_card_up(vdev))) {
                vxge_debug_tx(VXGE_ERR,
@@ -1034,7 +1052,7 @@ static void vxge_set_multicast(struct net_device *dev)
        vxge_debug_entryexit(VXGE_TRACE,
                "%s:%d", __func__, __LINE__);
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       vdev = netdev_priv(dev);
        hldev = (struct __vxge_hw_device  *)vdev->devh;
 
        if (unlikely(!is_vxge_card_up(vdev)))
@@ -1094,7 +1112,7 @@ static void vxge_set_multicast(struct net_device *dev)
                /* Delete previous MC's */
                for (i = 0; i < mcast_cnt; i++) {
                        list_for_each_safe(entry, next, list_head) {
-                               mac_entry = (struct vxge_mac_addrs *) entry;
+                               mac_entry = (struct vxge_mac_addrs *)entry;
                                /* Copy the mac address to delete */
                                mac_address = (u8 *)&mac_entry->macaddr;
                                memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1137,7 +1155,7 @@ _set_all_mcast:
                /* Delete previous MC's */
                for (i = 0; i < mcast_cnt; i++) {
                        list_for_each_safe(entry, next, list_head) {
-                               mac_entry = (struct vxge_mac_addrs *) entry;
+                               mac_entry = (struct vxge_mac_addrs *)entry;
                                /* Copy the mac address to delete */
                                mac_address = (u8 *)&mac_entry->macaddr;
                                memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1184,14 +1202,14 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
 {
        struct sockaddr *addr = p;
        struct vxgedev *vdev;
-       struct __vxge_hw_device  *hldev;
+       struct __vxge_hw_device *hldev;
        enum vxge_hw_status status = VXGE_HW_OK;
        struct macInfo mac_info_new, mac_info_old;
        int vpath_idx = 0;
 
        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       vdev = netdev_priv(dev);
        hldev = vdev->devh;
 
        if (!is_valid_ether_addr(addr->sa_data))
@@ -1292,8 +1310,13 @@ static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
 static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
 {
        struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
+       struct __vxge_hw_device *hldev;
        int msix_id;
 
+       hldev = pci_get_drvdata(vdev->pdev);
+
+       vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
+
        vxge_hw_vpath_intr_disable(vpath->handle);
 
        if (vdev->config.intr_type == INTA)
@@ -1423,6 +1446,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
        }
 
        if (event == VXGE_LL_FULL_RESET) {
+               vxge_hw_device_wait_receive_idle(vdev->devh);
                vxge_hw_device_intr_disable(vdev->devh);
 
                switch (vdev->cric_err_event) {
@@ -1608,8 +1632,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
        int budget_org = budget;
        struct vxge_ring *ring;
 
-       struct __vxge_hw_device  *hldev = (struct __vxge_hw_device *)
-               pci_get_drvdata(vdev->pdev);
+       struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
                ring = &vdev->vpaths[i].ring;
@@ -1645,11 +1668,11 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
  */
 static void vxge_netpoll(struct net_device *dev)
 {
-       struct __vxge_hw_device  *hldev;
+       struct __vxge_hw_device *hldev;
        struct vxgedev *vdev;
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
-       hldev = (struct __vxge_hw_device  *)pci_get_drvdata(vdev->pdev);
+       vdev = netdev_priv(dev);
+       hldev = pci_get_drvdata(vdev->pdev);
 
        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
@@ -1689,15 +1712,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
                mtable[index] = index % vdev->no_of_vpath;
        }
 
-       /* Fill RTH hash types */
-       hash_types.hash_type_tcpipv4_en   = vdev->config.rth_hash_type_tcpipv4;
-       hash_types.hash_type_ipv4_en      = vdev->config.rth_hash_type_ipv4;
-       hash_types.hash_type_tcpipv6_en   = vdev->config.rth_hash_type_tcpipv6;
-       hash_types.hash_type_ipv6_en      = vdev->config.rth_hash_type_ipv6;
-       hash_types.hash_type_tcpipv6ex_en =
-                                       vdev->config.rth_hash_type_tcpipv6ex;
-       hash_types.hash_type_ipv6ex_en    = vdev->config.rth_hash_type_ipv6ex;
-
        /* set indirection table, bucket-to-vpath mapping */
        status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
                                                vdev->no_of_vpath,
@@ -1710,12 +1724,21 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
                return status;
        }
 
+       /* Fill RTH hash types */
+       hash_types.hash_type_tcpipv4_en   = vdev->config.rth_hash_type_tcpipv4;
+       hash_types.hash_type_ipv4_en      = vdev->config.rth_hash_type_ipv4;
+       hash_types.hash_type_tcpipv6_en   = vdev->config.rth_hash_type_tcpipv6;
+       hash_types.hash_type_ipv6_en      = vdev->config.rth_hash_type_ipv6;
+       hash_types.hash_type_tcpipv6ex_en =
+                                       vdev->config.rth_hash_type_tcpipv6ex;
+       hash_types.hash_type_ipv6ex_en    = vdev->config.rth_hash_type_ipv6ex;
+
        /*
-       * Because the itable_set() method uses the active_table field
-       * for the target virtual path the RTH config should be updated
-       * for all VPATHs. The h/w only uses the lowest numbered VPATH
-       * when steering frames.
-       */
+        * Because the itable_set() method uses the active_table field
+        * for the target virtual path the RTH config should be updated
+        * for all VPATHs. The h/w only uses the lowest numbered VPATH
+        * when steering frames.
+        */
         for (index = 0; index < vdev->no_of_vpath; index++) {
                status = vxge_hw_vpath_rts_rth_set(
                                vdev->vpaths[index].handle,
@@ -1797,7 +1820,7 @@ static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
 {
        struct list_head *entry, *next;
        u64 del_mac = 0;
-       u8 *mac_address = (u8 *) (&del_mac);
+       u8 *mac_address = (u8 *)(&del_mac);
 
        /* Copy the mac address to delete from the list */
        memcpy(mac_address, mac->macaddr, ETH_ALEN);
@@ -1928,7 +1951,7 @@ static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
 }
 
 /* reset vpaths */
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
 {
        enum vxge_hw_status status = VXGE_HW_OK;
        struct vxge_vpath *vpath;
@@ -1988,8 +2011,23 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
                vpath = &vdev->vpaths[i];
-
                vxge_assert(vpath->is_configured);
+
+               if (!vdev->titan1) {
+                       struct vxge_hw_vp_config *vcfg;
+                       vcfg = &vdev->devh->config.vp_config[vpath->device_id];
+
+                       vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
+                       vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
+                       vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
+                       vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
+                       vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
+                       vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
+                       vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
+                       vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
+                       vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
+               }
+
                attr.vp_id = vpath->device_id;
                attr.fifo_attr.callback = vxge_xmit_compl;
                attr.fifo_attr.txdl_term = vxge_tx_term;
@@ -2024,6 +2062,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
                                vdev->config.fifo_indicate_max_pkts;
                        vpath->ring.rx_vector_no = 0;
                        vpath->ring.rx_csum = vdev->rx_csum;
+                       vpath->ring.rx_hwts = vdev->rx_hwts;
                        vpath->is_open = 1;
                        vdev->vp_handles[i] = vpath->handle;
                        vpath->ring.gro_enable = vdev->config.gro_enable;
@@ -2062,18 +2101,18 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
        struct __vxge_hw_device *hldev;
        u64 reason;
        enum vxge_hw_status status;
-       struct vxgedev *vdev = (struct vxgedev *) dev_id;;
+       struct vxgedev *vdev = (struct vxgedev *)dev_id;
 
        vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
        dev = vdev->ndev;
-       hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
+       hldev = pci_get_drvdata(vdev->pdev);
 
        if (pci_channel_offline(vdev->pdev))
                return IRQ_NONE;
 
        if (unlikely(!is_vxge_card_up(vdev)))
-               return IRQ_NONE;
+               return IRQ_HANDLED;
 
        status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
                        &reason);
@@ -2301,8 +2340,8 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
 
 static void vxge_rem_isr(struct vxgedev *vdev)
 {
-       struct __vxge_hw_device  *hldev;
-       hldev = (struct __vxge_hw_device  *) pci_get_drvdata(vdev->pdev);
+       struct __vxge_hw_device *hldev;
+       hldev = pci_get_drvdata(vdev->pdev);
 
 #ifdef CONFIG_PCI_MSI
        if (vdev->config.intr_type == MSI_X) {
@@ -2542,8 +2581,8 @@ vxge_open(struct net_device *dev)
        vxge_debug_entryexit(VXGE_TRACE,
                "%s: %s:%d", dev->name, __func__, __LINE__);
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
-       hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+       vdev = netdev_priv(dev);
+       hldev = pci_get_drvdata(vdev->pdev);
        function_mode = vdev->config.device_hw_info.function_mode;
 
        /* make sure you have link off by default every time Nic is
@@ -2598,6 +2637,8 @@ vxge_open(struct net_device *dev)
                        goto out2;
                }
        }
+       printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
+              hldev->config.rth_en ? "enabled" : "disabled");
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
                vpath = &vdev->vpaths[i];
@@ -2683,9 +2724,10 @@ vxge_open(struct net_device *dev)
                vxge_os_timer(vdev->vp_reset_timer,
                        vxge_poll_vp_reset, vdev, (HZ/2));
 
-       if (vdev->vp_lockup_timer.function == NULL)
-               vxge_os_timer(vdev->vp_lockup_timer,
-                       vxge_poll_vp_lockup, vdev, (HZ/2));
+       /* There is no need to check for RxD leak and RxD lookup on Titan1A */
+       if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
+               vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
+                             HZ / 2);
 
        set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
 
@@ -2767,8 +2809,8 @@ static int do_vxge_close(struct net_device *dev, int do_io)
        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
                dev->name, __func__, __LINE__);
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
-       hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+       vdev = netdev_priv(dev);
+       hldev = pci_get_drvdata(vdev->pdev);
 
        if (unlikely(!is_vxge_card_up(vdev)))
                return 0;
@@ -2778,7 +2820,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
        while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
                msleep(50);
 
-       clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
        if (do_io) {
                /* Put the vpath back in normal mode */
                vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
@@ -2818,10 +2859,17 @@ static int do_vxge_close(struct net_device *dev, int do_io)
 
                smp_wmb();
        }
-       del_timer_sync(&vdev->vp_lockup_timer);
+
+       if (vdev->titan1)
+               del_timer_sync(&vdev->vp_lockup_timer);
 
        del_timer_sync(&vdev->vp_reset_timer);
 
+       if (do_io)
+               vxge_hw_device_wait_receive_idle(hldev);
+
+       clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+
        /* Disable napi */
        if (vdev->config.intr_type != MSI_X)
                napi_disable(&vdev->napi);
@@ -2838,8 +2886,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
        if (do_io)
                vxge_hw_device_intr_disable(vdev->devh);
 
-       mdelay(1000);
-
        vxge_rem_isr(vdev);
 
        vxge_napi_del_all(vdev);
@@ -2954,6 +3000,101 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
        return net_stats;
 }
 
+static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev,
+                                                int enable)
+{
+       enum vxge_hw_status status;
+       u64 val64;
+
+       /* Timestamp is passed to the driver via the FCS, therefore we
+        * must disable the FCS stripping by the adapter.  Since this is
+        * required for the driver to load (due to a hardware bug),
+        * there is no need to do anything special here.
+        */
+       if (enable)
+               val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
+                       VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
+                       VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
+       else
+               val64 = 0;
+
+       status = vxge_hw_mgmt_reg_write(vdev->devh,
+                                       vxge_hw_mgmt_reg_type_mrpcim,
+                                       0,
+                                       offsetof(struct vxge_hw_mrpcim_reg,
+                                                xmac_timestamp),
+                                       val64);
+       vxge_hw_device_flush_io(vdev->devh);
+       return status;
+}
+
+static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
+{
+       struct hwtstamp_config config;
+       enum vxge_hw_status status;
+       int i;
+
+       if (copy_from_user(&config, data, sizeof(config)))
+               return -EFAULT;
+
+       /* reserved for future extensions */
+       if (config.flags)
+               return -EINVAL;
+
+       /* Transmit HW Timestamp not supported */
+       switch (config.tx_type) {
+       case HWTSTAMP_TX_OFF:
+               break;
+       case HWTSTAMP_TX_ON:
+       default:
+               return -ERANGE;
+       }
+
+       switch (config.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               status = vxge_timestamp_config(vdev, 0);
+               if (status != VXGE_HW_OK)
+                       return -EFAULT;
+
+               vdev->rx_hwts = 0;
+               config.rx_filter = HWTSTAMP_FILTER_NONE;
+               break;
+
+       case HWTSTAMP_FILTER_ALL:
+       case HWTSTAMP_FILTER_SOME:
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+               status = vxge_timestamp_config(vdev, 1);
+               if (status != VXGE_HW_OK)
+                       return -EFAULT;
+
+               vdev->rx_hwts = 1;
+               config.rx_filter = HWTSTAMP_FILTER_ALL;
+               break;
+
+       default:
+                return -ERANGE;
+       }
+
+       for (i = 0; i < vdev->no_of_vpath; i++)
+               vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
+
+       if (copy_to_user(data, &config, sizeof(config)))
+               return -EFAULT;
+
+       return 0;
+}
+
 /**
  * vxge_ioctl
  * @dev: Device pointer.
@@ -2966,7 +3107,20 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
  */
 static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-       return -EOPNOTSUPP;
+       struct vxgedev *vdev = netdev_priv(dev);
+       int ret;
+
+       switch (cmd) {
+       case SIOCSHWTSTAMP:
+               ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
+               if (ret)
+                       return ret;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
 }
 
 /**
@@ -2984,7 +3138,7 @@ vxge_tx_watchdog(struct net_device *dev)
 
        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       vdev = netdev_priv(dev);
 
        vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
 
@@ -3012,7 +3166,7 @@ vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
 
        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       vdev = netdev_priv(dev);
 
        vpath = &vdev->vpaths[0];
        if ((NULL == grp) && (vpath->is_open)) {
@@ -3061,7 +3215,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
        struct vxge_vpath *vpath;
        int vp_id;
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       vdev = netdev_priv(dev);
 
        /* Add these vlan to the vid table */
        for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
@@ -3088,7 +3242,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 
        vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-       vdev = (struct vxgedev *)netdev_priv(dev);
+       vdev = netdev_priv(dev);
 
        vlan_group_set_device(vdev->vlgrp, vid, NULL);
 
@@ -3125,6 +3279,19 @@ static const struct net_device_ops vxge_netdev_ops = {
 #endif
 };
 
+static int __devinit vxge_device_revision(struct vxgedev *vdev)
+{
+       int ret;
+       u8 revision;
+
+       ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
+       if (ret)
+               return -EIO;
+
+       vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
+       return 0;
+}
+
 static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
                                          struct vxge_config *config,
                                          int high_dma, int no_of_vpath,
@@ -3163,6 +3330,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
        vdev->pdev = hldev->pdev;
        memcpy(&vdev->config, config, sizeof(struct vxge_config));
        vdev->rx_csum = 1;      /* Enable Rx CSUM by default. */
+       vdev->rx_hwts = 0;
+
+       ret = vxge_device_revision(vdev);
+       if (ret < 0)
+               goto _out1;
 
        SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
 
@@ -3178,6 +3350,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
 
        vxge_initialize_ethtool_ops(ndev);
 
+       if (vdev->config.rth_steering != NO_STEERING) {
+               ndev->features |= NETIF_F_RXHASH;
+               hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
+       }
+
        /* Allocate memory for vpath */
        vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
                                no_of_vpath, GFP_KERNEL);
@@ -3227,6 +3404,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
                "%s: Ethernet device registered",
                ndev->name);
 
+       hldev->ndev = ndev;
        *vdev_out = vdev;
 
        /* Resetting the Device stats */
@@ -3261,36 +3439,29 @@ _out0:
  *
  * This function will unregister and free network device
  */
-static void
-vxge_device_unregister(struct __vxge_hw_device *hldev)
+static void vxge_device_unregister(struct __vxge_hw_device *hldev)
 {
        struct vxgedev *vdev;
        struct net_device *dev;
        char buf[IFNAMSIZ];
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-       (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-       u32 level_trace;
-#endif
 
        dev = hldev->ndev;
        vdev = netdev_priv(dev);
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-       (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-       level_trace = vdev->level_trace;
-#endif
-       vxge_debug_entryexit(level_trace,
-               "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
 
-       memcpy(buf, vdev->ndev->name, IFNAMSIZ);
+       vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
+                            __func__, __LINE__);
+
+       memcpy(buf, dev->name, IFNAMSIZ);
 
        /* in 2.6 will call stop() if device is up */
        unregister_netdev(dev);
 
        flush_scheduled_work();
 
-       vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
-       vxge_debug_entryexit(level_trace,
-               "%s: %s:%d  Exiting...", buf, __func__, __LINE__);
+       vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
+                       buf);
+       vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d  Exiting...", buf,
+                            __func__, __LINE__);
 }
 
 /*
@@ -3304,7 +3475,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
                        enum vxge_hw_event type, u64 vp_id)
 {
        struct net_device *dev = hldev->ndev;
-       struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+       struct vxgedev *vdev = netdev_priv(dev);
        struct vxge_vpath *vpath = NULL;
        int vpath_idx;
 
@@ -3813,8 +3984,7 @@ static int vxge_pm_resume(struct pci_dev *pdev)
 static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
                                                pci_channel_state_t state)
 {
-       struct __vxge_hw_device  *hldev =
-               (struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+       struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
        struct net_device *netdev = hldev->ndev;
 
        netif_device_detach(netdev);
@@ -3843,8 +4013,7 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
  */
 static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
 {
-       struct __vxge_hw_device  *hldev =
-               (struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+       struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
        struct net_device *netdev = hldev->ndev;
 
        struct vxgedev *vdev = netdev_priv(netdev);
@@ -3869,8 +4038,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
  */
 static void vxge_io_resume(struct pci_dev *pdev)
 {
-       struct __vxge_hw_device  *hldev =
-               (struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+       struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
        struct net_device *netdev = hldev->ndev;
 
        if (netif_running(netdev)) {
@@ -3914,6 +4082,142 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
        return num_functions;
 }
 
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
+{
+       struct __vxge_hw_device *hldev = vdev->devh;
+       u32 maj, min, bld, cmaj, cmin, cbld;
+       enum vxge_hw_status status;
+       const struct firmware *fw;
+       int ret;
+
+       ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
+       if (ret) {
+               vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
+                               VXGE_DRIVER_NAME, fw_name);
+               goto out;
+       }
+
+       /* Load the new firmware onto the adapter */
+       status = vxge_update_fw_image(hldev, fw->data, fw->size);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR,
+                               "%s: FW image download to adapter failed '%s'.",
+                               VXGE_DRIVER_NAME, fw_name);
+               ret = -EIO;
+               goto out;
+       }
+
+       /* Read the version of the new firmware */
+       status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR,
+                               "%s: Upgrade read version failed '%s'.",
+                               VXGE_DRIVER_NAME, fw_name);
+               ret = -EIO;
+               goto out;
+       }
+
+       cmaj = vdev->config.device_hw_info.fw_version.major;
+       cmin = vdev->config.device_hw_info.fw_version.minor;
+       cbld = vdev->config.device_hw_info.fw_version.build;
+       /* It's possible the version in /lib/firmware is not the latest version.
+        * If so, we could get into a loop of trying to upgrade to the latest
+        * and flashing the older version.
+        */
+       if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
+           !override) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
+              maj, min, bld);
+
+       /* Flash the adapter with the new firmware */
+       status = vxge_hw_flash_fw(hldev);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
+                               VXGE_DRIVER_NAME, fw_name);
+               ret = -EIO;
+               goto out;
+       }
+
+       printk(KERN_NOTICE "Upgrade of firmware successful!  Adapter must be "
+              "hard reset before using, thus requiring a system reboot or a "
+              "hotplug event.\n");
+
+out:
+       return ret;
+}
+
+static int vxge_probe_fw_update(struct vxgedev *vdev)
+{
+       u32 maj, min, bld;
+       int ret, gpxe = 0;
+       char *fw_name;
+
+       maj = vdev->config.device_hw_info.fw_version.major;
+       min = vdev->config.device_hw_info.fw_version.minor;
+       bld = vdev->config.device_hw_info.fw_version.build;
+
+       if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
+               return 0;
+
+       /* Ignore the build number when determining if the current firmware is
+        * "too new" to load the driver
+        */
+       if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
+               vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
+                               "version, unable to load driver\n",
+                               VXGE_DRIVER_NAME);
+               return -EINVAL;
+       }
+
+       /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
+        * work with this driver.
+        */
+       if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
+               vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
+                               "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
+               return -EINVAL;
+       }
+
+       /* If file not specified, determine gPXE or not */
+       if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
+               int i;
+               for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
+                       if (vdev->devh->eprom_versions[i]) {
+                               gpxe = 1;
+                               break;
+                       }
+       }
+       if (gpxe)
+               fw_name = "vxge/X3fw-pxe.ncf";
+       else
+               fw_name = "vxge/X3fw.ncf";
+
+       ret = vxge_fw_upgrade(vdev, fw_name, 0);
+       /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
+        * probe, so ignore them
+        */
+       if (ret != -EINVAL && ret != -ENOENT)
+               return -EIO;
+       else
+               ret = 0;
+
+       if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
+           VXGE_FW_VER(maj, min, 0)) {
+               vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
+                               " be used with this driver.\n"
+                               "Please get the latest version from "
+                               "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
+                               VXGE_DRIVER_NAME, maj, min, bld);
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
 /**
  * vxge_probe
  * @pdev : structure containing the PCI related information of the device.
@@ -3928,7 +4232,7 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
 static int __devinit
 vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
 {
-       struct __vxge_hw_device  *hldev;
+       struct __vxge_hw_device *hldev;
        enum vxge_hw_status status;
        int ret;
        int high_dma = 0;
@@ -4072,16 +4376,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                goto _exit3;
        }
 
-       if (ll_config->device_hw_info.fw_version.major !=
-               VXGE_DRIVER_FW_VERSION_MAJOR) {
-               vxge_debug_init(VXGE_ERR,
-                       "%s: Incorrect firmware version."
-                       "Please upgrade the firmware to version 1.x.x",
-                       VXGE_DRIVER_NAME);
-               ret = -EINVAL;
-               goto _exit3;
-       }
-
        vpath_mask = ll_config->device_hw_info.vpath_mask;
        if (vpath_mask == 0) {
                vxge_debug_ll_config(VXGE_TRACE,
@@ -4145,11 +4439,37 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                        goto _exit3;
        }
 
+       if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
+                       ll_config->device_hw_info.fw_version.minor,
+                       ll_config->device_hw_info.fw_version.build) >=
+           VXGE_EPROM_FW_VER) {
+               struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
+
+               status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
+               if (status != VXGE_HW_OK) {
+                       vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
+                                       VXGE_DRIVER_NAME);
+                       /* This is a non-fatal error, continue */
+               }
+
+               for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+                       hldev->eprom_versions[i] = img[i].version;
+                       if (!img[i].is_valid)
+                               break;
+                       vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
+                                       "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
+                                       VXGE_EPROM_IMG_MAJOR(img[i].version),
+                                       VXGE_EPROM_IMG_MINOR(img[i].version),
+                                       VXGE_EPROM_IMG_FIX(img[i].version),
+                                       VXGE_EPROM_IMG_BUILD(img[i].version));
+               }
+       }
+
        /* if FCS stripping is not disabled in MAC fail driver load */
-       if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
-               vxge_debug_init(VXGE_ERR,
-                       "%s: FCS stripping is not disabled in MAC"
-                       " failing driver load", VXGE_DRIVER_NAME);
+       status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
+       if (status != VXGE_HW_OK) {
+               vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
+                               " failing driver load", VXGE_DRIVER_NAME);
                ret = -EINVAL;
                goto _exit4;
        }
@@ -4163,28 +4483,32 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
        ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
        ll_config->addr_learn_en = addr_learn_en;
        ll_config->rth_algorithm = RTH_ALG_JENKINS;
-       ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
-       ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
-       ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
-       ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
-       ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
-       ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
+       ll_config->rth_hash_type_tcpipv4 = 1;
+       ll_config->rth_hash_type_ipv4 = 0;
+       ll_config->rth_hash_type_tcpipv6 = 0;
+       ll_config->rth_hash_type_ipv6 = 0;
+       ll_config->rth_hash_type_tcpipv6ex = 0;
+       ll_config->rth_hash_type_ipv6ex = 0;
        ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
        ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
        ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
 
-       if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
-               &vdev)) {
+       ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
+                                  &vdev);
+       if (ret) {
                ret = -EINVAL;
                goto _exit4;
        }
 
+       ret = vxge_probe_fw_update(vdev);
+       if (ret)
+               goto _exit5;
+
        vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
        VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
                vxge_hw_device_trace_level_get(hldev));
 
        /* set private HW device info */
-       hldev->ndev = vdev->ndev;
        vdev->mtu = VXGE_HW_DEFAULT_MTU;
        vdev->bar0 = attr.bar0;
        vdev->max_vpath_supported = max_vpath_supported;
@@ -4286,7 +4610,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                                "%s: mac_addr_list : memory allocation failed",
                                vdev->ndev->name);
                        ret = -EPERM;
-                       goto _exit5;
+                       goto _exit6;
                }
                macaddr = (u8 *)&entry->macaddr;
                memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
@@ -4326,10 +4650,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
        kfree(ll_config);
        return 0;
 
-_exit5:
+_exit6:
        for (i = 0; i < vdev->no_of_vpath; i++)
                vxge_free_mac_add_list(&vdev->vpaths[i]);
-
+_exit5:
        vxge_device_unregister(hldev);
 _exit4:
        pci_disable_sriov(pdev);
@@ -4354,34 +4678,25 @@ _exit0:
  * Description: This function is called by the Pci subsystem to release a
  * PCI device and free up all resource held up by the device.
  */
-static void __devexit
-vxge_remove(struct pci_dev *pdev)
+static void __devexit vxge_remove(struct pci_dev *pdev)
 {
-       struct __vxge_hw_device  *hldev;
+       struct __vxge_hw_device *hldev;
        struct vxgedev *vdev = NULL;
        struct net_device *dev;
        int i = 0;
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-       (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-       u32 level_trace;
-#endif
 
-       hldev = (struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+       hldev = pci_get_drvdata(pdev);
 
        if (hldev == NULL)
                return;
+
        dev = hldev->ndev;
        vdev = netdev_priv(dev);
 
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-       (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-       level_trace = vdev->level_trace;
-#endif
-       vxge_debug_entryexit(level_trace,
-               "%s:%d", __func__, __LINE__);
+       vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
 
-       vxge_debug_init(level_trace,
-               "%s : removing PCI device...", __func__);
+       vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
+                       __func__);
        vxge_device_unregister(hldev);
 
        for (i = 0; i < vdev->no_of_vpath; i++) {
@@ -4399,16 +4714,16 @@ vxge_remove(struct pci_dev *pdev)
        /* we are safe to free it now */
        free_netdev(dev);
 
-       vxge_debug_init(level_trace,
-               "%s:%d  Device unregistered", __func__, __LINE__);
+       vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
+                       __func__, __LINE__);
 
        vxge_hw_device_terminate(hldev);
 
        pci_disable_device(pdev);
        pci_release_regions(pdev);
        pci_set_drvdata(pdev, NULL);
-       vxge_debug_entryexit(level_trace,
-               "%s:%d  Exiting...", __func__, __LINE__);
+       vxge_debug_entryexit(vdev->level_trace, "%s:%d  Exiting...", __func__,
+                            __LINE__);
 }
 
 static struct pci_error_handlers vxge_err_handler = {
index de64536cb7d0d1c32943b6d7b64762e306f8bcb8..953cb0ded3e1897058b0e99724f9026e45d161da 100644 (file)
@@ -29,6 +29,9 @@
 
 #define PCI_DEVICE_ID_TITAN_WIN                0x5733
 #define PCI_DEVICE_ID_TITAN_UNI                0x5833
+#define VXGE_HW_TITAN1_PCI_REVISION    1
+#define VXGE_HW_TITAN1A_PCI_REVISION   2
+
 #define        VXGE_USE_DEFAULT                0xffffffff
 #define VXGE_HW_VPATH_MSIX_ACTIVE      4
 #define VXGE_ALARM_MSIX_ID             2
 
 #define VXGE_TTI_BTIMER_VAL 250000
 
-#define VXGE_TTI_LTIMER_VAL 1000
-#define VXGE_TTI_RTIMER_VAL 0
-#define VXGE_RTI_BTIMER_VAL 250
-#define VXGE_RTI_LTIMER_VAL 100
-#define VXGE_RTI_RTIMER_VAL 0
+#define VXGE_TTI_LTIMER_VAL    1000
+#define VXGE_T1A_TTI_LTIMER_VAL        80
+#define VXGE_TTI_RTIMER_VAL    0
+#define VXGE_T1A_TTI_RTIMER_VAL        400
+#define VXGE_RTI_BTIMER_VAL    250
+#define VXGE_RTI_LTIMER_VAL    100
+#define VXGE_RTI_RTIMER_VAL    0
 #define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
 #define VXGE_ISR_POLLING_CNT   8
 #define VXGE_MAX_CONFIG_DEV    0xFF
 #define TTI_TX_UFC_B   40
 #define TTI_TX_UFC_C   60
 #define TTI_TX_UFC_D   100
+#define TTI_T1A_TX_UFC_A       30
+#define TTI_T1A_TX_UFC_B       80
+/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
+/* Slope - 93 */
+/* 60 - 9k Mtu, 140 - 1.5k mtu */
+#define TTI_T1A_TX_UFC_C(mtu)  (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
+
+/* Slope - 37 */
+/* 100 - 9k Mtu, 300 - 1.5k mtu */
+#define TTI_T1A_TX_UFC_D(mtu)  (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
+
+
+#define RTI_RX_URANGE_A                5
+#define RTI_RX_URANGE_B                15
+#define RTI_RX_URANGE_C                40
+#define RTI_T1A_RX_URANGE_A    1
+#define RTI_T1A_RX_URANGE_B    20
+#define RTI_T1A_RX_URANGE_C    50
+#define RTI_RX_UFC_A           1
+#define RTI_RX_UFC_B           5
+#define RTI_RX_UFC_C           10
+#define RTI_RX_UFC_D           15
+#define RTI_T1A_RX_UFC_B       20
+#define RTI_T1A_RX_UFC_C       50
+#define RTI_T1A_RX_UFC_D       60
 
-#define RTI_RX_URANGE_A        5
-#define RTI_RX_URANGE_B        15
-#define RTI_RX_URANGE_C        40
-#define RTI_RX_UFC_A   1
-#define RTI_RX_UFC_B   5
-#define RTI_RX_UFC_C   10
-#define RTI_RX_UFC_D   15
 
 /* Milli secs timer period */
 #define VXGE_TIMER_DELAY               10000
@@ -145,15 +168,15 @@ struct vxge_config {
 
        int             addr_learn_en;
 
-       int             rth_steering;
-       int             rth_algorithm;
-       int             rth_hash_type_tcpipv4;
-       int             rth_hash_type_ipv4;
-       int             rth_hash_type_tcpipv6;
-       int             rth_hash_type_ipv6;
-       int             rth_hash_type_tcpipv6ex;
-       int             rth_hash_type_ipv6ex;
-       int             rth_bkt_sz;
+       u32             rth_steering:2,
+                       rth_algorithm:2,
+                       rth_hash_type_tcpipv4:1,
+                       rth_hash_type_ipv4:1,
+                       rth_hash_type_tcpipv6:1,
+                       rth_hash_type_ipv6:1,
+                       rth_hash_type_tcpipv6ex:1,
+                       rth_hash_type_ipv6ex:1,
+                       rth_bkt_sz:8;
        int             rth_jhash_golden_ratio;
        int             tx_steering_type;
        int     fifo_indicate_max_pkts;
@@ -248,8 +271,9 @@ struct vxge_ring {
         */
        int driver_id;
 
-        /* copy of the flag indicating whether rx_csum is to be used */
-       u32 rx_csum;
+       /* copy of the flag indicating whether rx_csum is to be used */
+       u32 rx_csum:1,
+           rx_hwts:1;
 
        int pkts_processed;
        int budget;
@@ -327,7 +351,9 @@ struct vxgedev {
        u16             all_multi_flg;
 
         /* A flag indicating whether rx_csum is to be used or not. */
-       u32     rx_csum;
+       u32     rx_csum:1,
+               rx_hwts:1,
+               titan1:1;
 
        struct vxge_msix_entry *vxge_entries;
        struct msix_entry *entries;
@@ -387,8 +413,6 @@ struct vxge_tx_priv {
        static int p = val; \
        module_param(p, int, 0)
 
-#define vxge_os_bug(fmt...)            { printk(fmt); BUG(); }
-
 #define vxge_os_timer(timer, handle, arg, exp) do { \
                init_timer(&timer); \
                timer.function = handle; \
@@ -397,6 +421,11 @@ struct vxge_tx_priv {
        } while (0);
 
 extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
+
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
+
 /**
  * #define VXGE_DEBUG_INIT: debug for initialization functions
  * #define VXGE_DEBUG_TX        : debug transmit related functions
index 3dd5c9615ef9a05e77bf023fef62379778cf3761..3e658b175947809e194e661fca36e9716304f193 100644 (file)
 #define VXGE_HW_TITAN_VPMGMT_REG_SPACES                        17
 #define VXGE_HW_TITAN_VPATH_REG_SPACES                 17
 
+#define VXGE_HW_FW_API_GET_EPROM_REV                   31
+
+#define VXGE_EPROM_IMG_MAJOR(val)              (u32) vxge_bVALn(val, 48, 4)
+#define VXGE_EPROM_IMG_MINOR(val)              (u32) vxge_bVALn(val, 52, 4)
+#define VXGE_EPROM_IMG_FIX(val)                        (u32) vxge_bVALn(val, 56, 4)
+#define VXGE_EPROM_IMG_BUILD(val)              (u32) vxge_bVALn(val, 60, 4)
+
+#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val)             vxge_bVALn(val, 16, 8)
+#define VXGE_HW_GET_EPROM_IMAGE_VALID(val)             vxge_bVALn(val, 31, 1)
+#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val)              vxge_bVALn(val, 40, 8)
+#define VXGE_HW_GET_EPROM_IMAGE_REV(val)               vxge_bVALn(val, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val)  vxge_vBIT(val, 16, 8)
+
+#define VXGE_HW_FW_API_GET_FUNC_MODE                   29
+#define VXGE_HW_GET_FUNC_MODE_VAL(val)                 (val & 0xFF)
+
+#define VXGE_HW_FW_UPGRADE_MEMO                                13
+#define VXGE_HW_FW_UPGRADE_ACTION                      16
+#define VXGE_HW_FW_UPGRADE_OFFSET_START                        2
+#define VXGE_HW_FW_UPGRADE_OFFSET_SEND                 3
+#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT               4
+#define VXGE_HW_FW_UPGRADE_OFFSET_READ                 5
+
+#define VXGE_HW_FW_UPGRADE_BLK_SIZE                    16
+#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val)          (val & 0xff)
+#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val)          ((val >> 8) & 0xff)
+
 #define VXGE_HW_ASIC_MODE_RESERVED                             0
 #define VXGE_HW_ASIC_MODE_NO_IOV                               1
 #define VXGE_HW_ASIC_MODE_SR_IOV                               2
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE             2
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN                3
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG       5
-#define        VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT  6
+#define        VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT          6
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG     7
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK          8
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY           9
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS               10
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS                11
-#define        VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
+#define        VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT         12
 #define        VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO           13
 
 #define        VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
 #define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
                                                        vxge_bVALn(bits, 48, 16)
 #define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
 
 #define        VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
                                                        vxge_bVALn(bits, 0, 18)
@@ -3998,6 +4026,7 @@ struct vxge_hw_vpath_reg {
 #define        VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN   vxge_mBIT(9)
 #define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
 #define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
+#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val)     vxge_bVALn(val, 36, 9)
 /*0x00a78*/    u64     prc_cfg7;
 #define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
 #define        VXGE_HW_PRC_CFG7_SMART_SCAT_EN  vxge_mBIT(11)
index 9890d4d596d0d29a5b8083656e416b1c30ca2b74..1fceee87622878f5e8e2b10d28a9901942bb83be 100644 (file)
@@ -1904,34 +1904,6 @@ enum vxge_hw_ring_tcode {
        VXGE_HW_RING_T_CODE_MULTI_ERR                   = 0xF
 };
 
-/**
- * enum enum vxge_hw_ring_hash_type - RTH hash types
- * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
- * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
- * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
- * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
- *
- * RTH hash types
- */
-enum vxge_hw_ring_hash_type {
-       VXGE_HW_RING_HASH_TYPE_NONE                     = 0x0,
-       VXGE_HW_RING_HASH_TYPE_TCP_IPV4         = 0x1,
-       VXGE_HW_RING_HASH_TYPE_UDP_IPV4         = 0x2,
-       VXGE_HW_RING_HASH_TYPE_IPV4                     = 0x3,
-       VXGE_HW_RING_HASH_TYPE_TCP_IPV6         = 0x4,
-       VXGE_HW_RING_HASH_TYPE_UDP_IPV6         = 0x5,
-       VXGE_HW_RING_HASH_TYPE_IPV6                     = 0x6,
-       VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX      = 0x7,
-       VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX      = 0x8,
-       VXGE_HW_RING_HASH_TYPE_IPV6_EX          = 0x9
-};
-
 enum vxge_hw_status vxge_hw_ring_rxd_reserve(
        struct __vxge_hw_ring *ring_handle,
        void **rxdh);
index 53fefe13736875650952d7f748c79edbc1acb8c7..f05bb2f55e73034309655df55df44ec9adc60d09 100644 (file)
 
 #define VXGE_VERSION_MAJOR     "2"
 #define VXGE_VERSION_MINOR     "0"
-#define VXGE_VERSION_FIX       "9"
-#define VXGE_VERSION_BUILD     "20840"
+#define VXGE_VERSION_FIX       "10"
+#define VXGE_VERSION_BUILD     "21808"
 #define VXGE_VERSION_FOR       "k"
+
+#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
+
+#define VXGE_DEAD_FW_VER_MAJOR 1
+#define VXGE_DEAD_FW_VER_MINOR 4
+#define VXGE_DEAD_FW_VER_BUILD 4
+
+#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
+                                    VXGE_DEAD_FW_VER_MINOR, \
+                                    VXGE_DEAD_FW_VER_BUILD)
+
+#define VXGE_EPROM_FW_VER_MAJOR        1
+#define VXGE_EPROM_FW_VER_MINOR        6
+#define VXGE_EPROM_FW_VER_BUILD        1
+
+#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
+                                     VXGE_EPROM_FW_VER_MINOR, \
+                                     VXGE_EPROM_FW_VER_BUILD)
+
+#define VXGE_CERT_FW_VER_MAJOR 1
+#define VXGE_CERT_FW_VER_MINOR 8
+#define VXGE_CERT_FW_VER_BUILD 1
+
+#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
+                                    VXGE_CERT_FW_VER_MINOR, \
+                                    VXGE_CERT_FW_VER_BUILD)
+
 #endif
index 8251946842e6186bddcd1cac303dbaea74664410..b9f93fbd9728d40dba9073f4e8936a1f1c4a6b6c 100644 (file)
@@ -566,8 +566,8 @@ static void ath_do_set_opmode(struct ath5k_softc *sc)
                  sc->opmode, ath_opmode_to_string(sc->opmode));
 }
 
-void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
-                                       struct ieee80211_vif *vif)
+static void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
+                                              struct ieee80211_vif *vif)
 {
        struct ath_common *common = ath5k_hw_common(sc->ah);
        struct ath_vif_iter_data iter_data;
@@ -3206,14 +3206,32 @@ static int ath5k_get_survey(struct ieee80211_hw *hw, int idx,
 {
        struct ath5k_softc *sc = hw->priv;
        struct ieee80211_conf *conf = &hw->conf;
+       struct ath_common *common = ath5k_hw_common(sc->ah);
+       struct ath_cycle_counters *cc = &common->cc_survey;
+       unsigned int div = common->clockrate * 1000;
 
-        if (idx != 0)
+       if (idx != 0)
                return -ENOENT;
 
        survey->channel = conf->channel;
        survey->filled = SURVEY_INFO_NOISE_DBM;
        survey->noise = sc->ah->ah_noise_floor;
 
+       spin_lock_bh(&common->cc_lock);
+       ath_hw_cycle_counters_update(common);
+       if (cc->cycles > 0) {
+               survey->filled |= SURVEY_INFO_CHANNEL_TIME |
+                       SURVEY_INFO_CHANNEL_TIME_BUSY |
+                       SURVEY_INFO_CHANNEL_TIME_RX |
+                       SURVEY_INFO_CHANNEL_TIME_TX;
+               survey->channel_time += cc->cycles / div;
+               survey->channel_time_busy += cc->rx_busy / div;
+               survey->channel_time_rx += cc->rx_frame / div;
+               survey->channel_time_tx += cc->tx_frame / div;
+       }
+       memset(cc, 0, sizeof(*cc));
+       spin_unlock_bh(&common->cc_lock);
+
        return 0;
 }
 
index acda56ee521bdf0d2d42102fa8e14188e480fb05..54dcf77e96466ead8a8abde7ab3b0bbc0e083acf 100644 (file)
@@ -554,63 +554,63 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
 
        len += snprintf(buf+len, sizeof(buf)-len,
                        "RX\n---------------------\n");
-       len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%d\t(%d%%)\n",
+       len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
                        st->rxerr_crc,
                        st->rx_all_count > 0 ?
                                st->rxerr_crc*100/st->rx_all_count : 0);
-       len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%d\t(%d%%)\n",
+       len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%u\t(%u%%)\n",
                        st->rxerr_phy,
                        st->rx_all_count > 0 ?
                                st->rxerr_phy*100/st->rx_all_count : 0);
        for (i = 0; i < 32; i++) {
                if (st->rxerr_phy_code[i])
                        len += snprintf(buf+len, sizeof(buf)-len,
-                               " phy_err[%d]\t%d\n",
+                               " phy_err[%u]\t%u\n",
                                i, st->rxerr_phy_code[i]);
        }
 
-       len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
+       len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%u\t(%u%%)\n",
                        st->rxerr_fifo,
                        st->rx_all_count > 0 ?
                                st->rxerr_fifo*100/st->rx_all_count : 0);
-       len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%d\t(%d%%)\n",
+       len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%u\t(%u%%)\n",
                        st->rxerr_decrypt,
                        st->rx_all_count > 0 ?
                                st->rxerr_decrypt*100/st->rx_all_count : 0);
-       len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%d\t(%d%%)\n",
+       len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%u\t(%u%%)\n",
                        st->rxerr_mic,
                        st->rx_all_count > 0 ?
                                st->rxerr_mic*100/st->rx_all_count : 0);
-       len += snprintf(buf+len, sizeof(buf)-len, "process\t%d\t(%d%%)\n",
+       len += snprintf(buf+len, sizeof(buf)-len, "process\t%u\t(%u%%)\n",
                        st->rxerr_proc,
                        st->rx_all_count > 0 ?
                                st->rxerr_proc*100/st->rx_all_count : 0);
-       len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%d\t(%d%%)\n",
+       len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%u\t(%u%%)\n",
                        st->rxerr_jumbo,
                        st->rx_all_count > 0 ?
                                st->rxerr_jumbo*100/st->rx_all_count : 0);
-       len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%d]\n",
+       len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%u]\n",
                        st->rx_all_count);
-       len += snprintf(buf+len, sizeof(buf)-len, "RX-all-bytes\t%d\n",
+       len += snprintf(buf+len, sizeof(buf)-len, "RX-all-bytes\t%u\n",
                        st->rx_bytes_count);
 
        len += snprintf(buf+len, sizeof(buf)-len,
                        "\nTX\n---------------------\n");
-       len += snprintf(buf+len, sizeof(buf)-len, "retry\t%d\t(%d%%)\n",
+       len += snprintf(buf+len, sizeof(buf)-len, "retry\t%u\t(%u%%)\n",
                        st->txerr_retry,
                        st->tx_all_count > 0 ?
                                st->txerr_retry*100/st->tx_all_count : 0);
-       len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
+       len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%u\t(%u%%)\n",
                        st->txerr_fifo,
                        st->tx_all_count > 0 ?
                                st->txerr_fifo*100/st->tx_all_count : 0);
-       len += snprintf(buf+len, sizeof(buf)-len, "filter\t%d\t(%d%%)\n",
+       len += snprintf(buf+len, sizeof(buf)-len, "filter\t%u\t(%u%%)\n",
                        st->txerr_filt,
                        st->tx_all_count > 0 ?
                                st->txerr_filt*100/st->tx_all_count : 0);
-       len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n",
+       len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%u]\n",
                        st->tx_all_count);
-       len += snprintf(buf+len, sizeof(buf)-len, "TX-all-bytes\t%d\n",
+       len += snprintf(buf+len, sizeof(buf)-len, "TX-all-bytes\t%u\n",
                        st->tx_bytes_count);
 
        if (len > sizeof(buf))
index b2adb2a281c2e9035cdd17f52d4c40386a4b408b..2509d0bf037d973fc1748fe80db6d104b5860011 100644 (file)
@@ -26,7 +26,7 @@
 struct ath5k_hw_rx_ctl {
        u32     rx_control_0; /* RX control word 0 */
        u32     rx_control_1; /* RX control word 1 */
-} __packed;
+} __packed __aligned(4);
 
 /* RX control word 1 fields/flags */
 #define AR5K_DESC_RX_CTL1_BUF_LEN              0x00000fff /* data buffer length */
@@ -39,7 +39,7 @@ struct ath5k_hw_rx_ctl {
 struct ath5k_hw_rx_status {
        u32     rx_status_0; /* RX status word 0 */
        u32     rx_status_1; /* RX status word 1 */
-} __packed;
+} __packed __aligned(4);
 
 /* 5210/5211 */
 /* RX status word 0 fields/flags */
@@ -129,7 +129,7 @@ enum ath5k_phy_error_code {
 struct ath5k_hw_2w_tx_ctl {
        u32     tx_control_0; /* TX control word 0 */
        u32     tx_control_1; /* TX control word 1 */
-} __packed;
+} __packed __aligned(4);
 
 /* TX control word 0 fields/flags */
 #define AR5K_2W_TX_DESC_CTL0_FRAME_LEN         0x00000fff /* frame length */
@@ -185,7 +185,7 @@ struct ath5k_hw_4w_tx_ctl {
        u32     tx_control_1; /* TX control word 1 */
        u32     tx_control_2; /* TX control word 2 */
        u32     tx_control_3; /* TX control word 3 */
-} __packed;
+} __packed __aligned(4);
 
 /* TX control word 0 fields/flags */
 #define AR5K_4W_TX_DESC_CTL0_FRAME_LEN         0x00000fff /* frame length */
@@ -244,7 +244,7 @@ struct ath5k_hw_4w_tx_ctl {
 struct ath5k_hw_tx_status {
        u32     tx_status_0; /* TX status word 0 */
        u32     tx_status_1; /* TX status word 1 */
-} __packed;
+} __packed __aligned(4);
 
 /* TX status word 0 fields/flags */
 #define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK     0x00000001 /* TX success */
@@ -282,7 +282,7 @@ struct ath5k_hw_tx_status {
 struct ath5k_hw_5210_tx_desc {
        struct ath5k_hw_2w_tx_ctl       tx_ctl;
        struct ath5k_hw_tx_status       tx_stat;
-} __packed;
+} __packed __aligned(4);
 
 /*
  * 5212 hardware TX descriptor
@@ -290,7 +290,7 @@ struct ath5k_hw_5210_tx_desc {
 struct ath5k_hw_5212_tx_desc {
        struct ath5k_hw_4w_tx_ctl       tx_ctl;
        struct ath5k_hw_tx_status       tx_stat;
-} __packed;
+} __packed __aligned(4);
 
 /*
  * Common hardware RX descriptor
@@ -298,7 +298,7 @@ struct ath5k_hw_5212_tx_desc {
 struct ath5k_hw_all_rx_desc {
        struct ath5k_hw_rx_ctl          rx_ctl;
        struct ath5k_hw_rx_status       rx_stat;
-} __packed;
+} __packed __aligned(4);
 
 /*
  * Atheros hardware DMA descriptor
@@ -313,7 +313,7 @@ struct ath5k_desc {
                struct ath5k_hw_5212_tx_desc    ds_tx5212;
                struct ath5k_hw_all_rx_desc     ds_rx;
        } ud;
-} __packed;
+} __packed __aligned(4);
 
 #define AR5K_RXDESC_INTREQ     0x0020
 
index 219367884e640a60a99a99eba59478aaba9bc1da..6b43f535ff5374d018438a809804b68ee9820eb3 100644 (file)
@@ -1102,18 +1102,12 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
   PHY calibration
 \*****************/
 
-static int sign_extend(int val, const int nbits)
-{
-       int order = BIT(nbits-1);
-       return (val ^ order) - order;
-}
-
 static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
 {
        s32 val;
 
        val = ath5k_hw_reg_read(ah, AR5K_PHY_NF);
-       return sign_extend(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 9);
+       return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8);
 }
 
 void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
index ea9f4497f58c79bd491d84181a605e7b251f4b91..c83a22cfbe1ee24b4e342e34b20f026d794f8feb 100644 (file)
@@ -873,7 +873,7 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
                                 channel->max_antenna_gain * 2,
                                 channel->max_power * 2,
                                 min((u32) MAX_RATE_POWER,
-                                (u32) regulatory->power_limit));
+                                (u32) regulatory->power_limit), false);
 
        /* Write analog registers */
        if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
@@ -1490,25 +1490,25 @@ static void ar5008_hw_do_getnf(struct ath_hw *ah,
        int16_t nf;
 
        nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
-       nfarray[0] = sign_extend(nf, 9);
+       nfarray[0] = sign_extend32(nf, 8);
 
        nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR);
-       nfarray[1] = sign_extend(nf, 9);
+       nfarray[1] = sign_extend32(nf, 8);
 
        nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR);
-       nfarray[2] = sign_extend(nf, 9);
+       nfarray[2] = sign_extend32(nf, 8);
 
        if (!IS_CHAN_HT40(ah->curchan))
                return;
 
        nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
-       nfarray[3] = sign_extend(nf, 9);
+       nfarray[3] = sign_extend32(nf, 8);
 
        nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR);
-       nfarray[4] = sign_extend(nf, 9);
+       nfarray[4] = sign_extend32(nf, 8);
 
        nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR);
-       nfarray[5] = sign_extend(nf, 9);
+       nfarray[5] = sign_extend32(nf, 8);
 }
 
 /*
index 50dda394f8bef01ee69fb3d3e88604e856f6b938..f0268e5eab3406831b88640bb37480c7b78de1b6 100644 (file)
@@ -90,13 +90,10 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
 
                *masked = isr & ATH9K_INT_COMMON;
 
-               if (ah->config.rx_intr_mitigation) {
-                       if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
-                               *masked |= ATH9K_INT_RX;
-               }
-
-               if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
+               if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM |
+                          AR_ISR_RXOK | AR_ISR_RXERR))
                        *masked |= ATH9K_INT_RX;
+
                if (isr &
                    (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
                     AR_ISR_TXEOL)) {
@@ -118,14 +115,6 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
                                  "receive FIFO overrun interrupt\n");
                }
 
-               if (!AR_SREV_9100(ah)) {
-                       if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
-                               u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
-                               if (isr5 & AR_ISR_S5_TIM_TIMER)
-                                       *masked |= ATH9K_INT_TIM_TIMER;
-                       }
-               }
-
                *masked |= mask2;
        }
 
@@ -136,17 +125,18 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
                u32 s5_s;
 
                s5_s = REG_READ(ah, AR_ISR_S5_S);
-               if (isr & AR_ISR_GENTMR) {
-                       ah->intr_gen_timer_trigger =
+               ah->intr_gen_timer_trigger =
                                MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
 
-                       ah->intr_gen_timer_thresh =
-                               MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
+               ah->intr_gen_timer_thresh =
+                       MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
 
-                       if (ah->intr_gen_timer_trigger)
-                               *masked |= ATH9K_INT_GENTIMER;
+               if (ah->intr_gen_timer_trigger)
+                       *masked |= ATH9K_INT_GENTIMER;
 
-               }
+               if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
+                   !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+                       *masked |= ATH9K_INT_TIM_TIMER;
        }
 
        if (sync_cause) {
@@ -218,77 +208,70 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
                                 struct ath_tx_status *ts)
 {
        struct ar5416_desc *ads = AR5416DESC(ds);
+       u32 status;
 
-       if ((ads->ds_txstatus9 & AR_TxDone) == 0)
+       status = ACCESS_ONCE(ads->ds_txstatus9);
+       if ((status & AR_TxDone) == 0)
                return -EINPROGRESS;
 
-       ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
        ts->ts_tstamp = ads->AR_SendTimestamp;
        ts->ts_status = 0;
        ts->ts_flags = 0;
 
-       if (ads->ds_txstatus1 & AR_FrmXmitOK)
+       if (status & AR_TxOpExceeded)
+               ts->ts_status |= ATH9K_TXERR_XTXOP;
+       ts->tid = MS(status, AR_TxTid);
+       ts->ts_rateindex = MS(status, AR_FinalTxIdx);
+       ts->ts_seqnum = MS(status, AR_SeqNum);
+
+       status = ACCESS_ONCE(ads->ds_txstatus0);
+       ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
+       ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
+       ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
+       if (status & AR_TxBaStatus) {
+               ts->ts_flags |= ATH9K_TX_BA;
+               ts->ba_low = ads->AR_BaBitmapLow;
+               ts->ba_high = ads->AR_BaBitmapHigh;
+       }
+
+       status = ACCESS_ONCE(ads->ds_txstatus1);
+       if (status & AR_FrmXmitOK)
                ts->ts_status |= ATH9K_TX_ACKED;
-       if (ads->ds_txstatus1 & AR_ExcessiveRetries)
-               ts->ts_status |= ATH9K_TXERR_XRETRY;
-       if (ads->ds_txstatus1 & AR_Filtered)
-               ts->ts_status |= ATH9K_TXERR_FILT;
-       if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
-               ts->ts_status |= ATH9K_TXERR_FIFO;
-               ath9k_hw_updatetxtriglevel(ah, true);
+       else {
+               if (status & AR_ExcessiveRetries)
+                       ts->ts_status |= ATH9K_TXERR_XRETRY;
+               if (status & AR_Filtered)
+                       ts->ts_status |= ATH9K_TXERR_FILT;
+               if (status & AR_FIFOUnderrun) {
+                       ts->ts_status |= ATH9K_TXERR_FIFO;
+                       ath9k_hw_updatetxtriglevel(ah, true);
+               }
        }
-       if (ads->ds_txstatus9 & AR_TxOpExceeded)
-               ts->ts_status |= ATH9K_TXERR_XTXOP;
-       if (ads->ds_txstatus1 & AR_TxTimerExpired)
+       if (status & AR_TxTimerExpired)
                ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
-
-       if (ads->ds_txstatus1 & AR_DescCfgErr)
+       if (status & AR_DescCfgErr)
                ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
-       if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
+       if (status & AR_TxDataUnderrun) {
                ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
                ath9k_hw_updatetxtriglevel(ah, true);
        }
-       if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
+       if (status & AR_TxDelimUnderrun) {
                ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
                ath9k_hw_updatetxtriglevel(ah, true);
        }
-       if (ads->ds_txstatus0 & AR_TxBaStatus) {
-               ts->ts_flags |= ATH9K_TX_BA;
-               ts->ba_low = ads->AR_BaBitmapLow;
-               ts->ba_high = ads->AR_BaBitmapHigh;
-       }
+       ts->ts_shortretry = MS(status, AR_RTSFailCnt);
+       ts->ts_longretry = MS(status, AR_DataFailCnt);
+       ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
 
-       ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
-       switch (ts->ts_rateindex) {
-       case 0:
-               ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
-               break;
-       case 1:
-               ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
-               break;
-       case 2:
-               ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
-               break;
-       case 3:
-               ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
-               break;
-       }
+       status = ACCESS_ONCE(ads->ds_txstatus5);
+       ts->ts_rssi = MS(status, AR_TxRSSICombined);
+       ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
+       ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
+       ts->ts_rssi_ext2 = MS(status, AR_TxRSSIAnt12);
 
-       ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
-       ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
-       ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
-       ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
-       ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
-       ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
-       ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
        ts->evm0 = ads->AR_TxEVM0;
        ts->evm1 = ads->AR_TxEVM1;
        ts->evm2 = ads->AR_TxEVM2;
-       ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
-       ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
-       ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
-       ts->tid = MS(ads->ds_txstatus9, AR_TxTid);
-       ts->ts_antenna = 0;
 
        return 0;
 }
index c00cdc67b55ba5291f9ebc758d342f7dfb3184a1..3fb97fdc1240d7a0fb50b1b2102b0810a1d5713f 100644 (file)
@@ -473,21 +473,21 @@ static void ar9002_hw_do_getnf(struct ath_hw *ah,
        int16_t nf;
 
        nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
-       nfarray[0] = sign_extend(nf, 9);
+       nfarray[0] = sign_extend32(nf, 8);
 
        nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR);
        if (IS_CHAN_HT40(ah->curchan))
-               nfarray[3] = sign_extend(nf, 9);
+               nfarray[3] = sign_extend32(nf, 8);
 
        if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
                return;
 
        nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
-       nfarray[1] = sign_extend(nf, 9);
+       nfarray[1] = sign_extend32(nf, 8);
 
        nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR9280_PHY_CH1_EXT_MINCCA_PWR);
        if (IS_CHAN_HT40(ah->curchan))
-               nfarray[4] = sign_extend(nf, 9);
+               nfarray[4] = sign_extend32(nf, 8);
 }
 
 static void ar9002_hw_set_nf_limits(struct ath_hw *ah)
index c4182359bee46603cffc46e9781750087f2a5f0f..a88fe0d6142fe4ea5a5eb69f2e6b1d023cb1eebb 100644 (file)
@@ -2131,8 +2131,9 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
                                        struct ath9k_channel *chan, u16 cfgCtl,
                                        u8 twiceAntennaReduction,
                                        u8 twiceMaxRegulatoryPower,
-                                       u8 powerLimit)
+                                       u8 powerLimit, bool test)
 {
+       struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
        struct ath_common *common = ath9k_hw_common(ah);
        u8 targetPowerValT2[ar9300RateSize];
        unsigned int i = 0;
@@ -2144,7 +2145,16 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
                                           twiceMaxRegulatoryPower,
                                           powerLimit);
 
-       while (i < ar9300RateSize) {
+       regulatory->max_power_level = 0;
+       for (i = 0; i < ar9300RateSize; i++) {
+               if (targetPowerValT2[i] > regulatory->max_power_level)
+                       regulatory->max_power_level = targetPowerValT2[i];
+       }
+
+       if (test)
+               return;
+
+       for (i = 0; i < ar9300RateSize; i++) {
                ath_print(common, ATH_DBG_EEPROM,
                          "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
                i++;
@@ -2159,9 +2169,6 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
                i++;
        }
 
-       /* Write target power array to registers */
-       ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
-
        /*
         * This is the TX power we send back to driver core,
         * and it can use to pass to userspace to display our
@@ -2180,7 +2187,10 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
                i = ALL_TARGET_HT20_0_8_16; /* ht20 */
 
        ah->txpower_limit = targetPowerValT2[i];
+       regulatory->max_power_level = targetPowerValT2[i];
 
+       /* Write target power array to registers */
+       ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
        ar9003_hw_calibration_apply(ah, chan->channel);
 }
 
index 3b424ca1ba844855b798e757e0d0727dd29e9e27..10c812e353a63b0294dab5669ab3888536eff0be 100644 (file)
@@ -237,10 +237,12 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
                                 struct ath_tx_status *ts)
 {
        struct ar9003_txs *ads;
+       u32 status;
 
        ads = &ah->ts_ring[ah->ts_tail];
 
-       if ((ads->status8 & AR_TxDone) == 0)
+       status = ACCESS_ONCE(ads->status8);
+       if ((status & AR_TxDone) == 0)
                return -EINPROGRESS;
 
        ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
@@ -253,57 +255,58 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
                return -EIO;
        }
 
+       if (status & AR_TxOpExceeded)
+               ts->ts_status |= ATH9K_TXERR_XTXOP;
+       ts->ts_rateindex = MS(status, AR_FinalTxIdx);
+       ts->ts_seqnum = MS(status, AR_SeqNum);
+       ts->tid = MS(status, AR_TxTid);
+
        ts->qid = MS(ads->ds_info, AR_TxQcuNum);
        ts->desc_id = MS(ads->status1, AR_TxDescId);
-       ts->ts_seqnum = MS(ads->status8, AR_SeqNum);
        ts->ts_tstamp = ads->status4;
        ts->ts_status = 0;
        ts->ts_flags  = 0;
 
-       if (ads->status3 & AR_ExcessiveRetries)
+       status = ACCESS_ONCE(ads->status2);
+       ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
+       ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
+       ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
+       if (status & AR_TxBaStatus) {
+               ts->ts_flags |= ATH9K_TX_BA;
+               ts->ba_low = ads->status5;
+               ts->ba_high = ads->status6;
+       }
+
+       status = ACCESS_ONCE(ads->status3);
+       if (status & AR_ExcessiveRetries)
                ts->ts_status |= ATH9K_TXERR_XRETRY;
-       if (ads->status3 & AR_Filtered)
+       if (status & AR_Filtered)
                ts->ts_status |= ATH9K_TXERR_FILT;
-       if (ads->status3 & AR_FIFOUnderrun) {
+       if (status & AR_FIFOUnderrun) {
                ts->ts_status |= ATH9K_TXERR_FIFO;
                ath9k_hw_updatetxtriglevel(ah, true);
        }
-       if (ads->status8 & AR_TxOpExceeded)
-               ts->ts_status |= ATH9K_TXERR_XTXOP;
-       if (ads->status3 & AR_TxTimerExpired)
+       if (status & AR_TxTimerExpired)
                ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
-
-       if (ads->status3 & AR_DescCfgErr)
+       if (status & AR_DescCfgErr)
                ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
-       if (ads->status3 & AR_TxDataUnderrun) {
+       if (status & AR_TxDataUnderrun) {
                ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
                ath9k_hw_updatetxtriglevel(ah, true);
        }
-       if (ads->status3 & AR_TxDelimUnderrun) {
+       if (status & AR_TxDelimUnderrun) {
                ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
                ath9k_hw_updatetxtriglevel(ah, true);
        }
-       if (ads->status2 & AR_TxBaStatus) {
-               ts->ts_flags |= ATH9K_TX_BA;
-               ts->ba_low = ads->status5;
-               ts->ba_high = ads->status6;
-       }
-
-       ts->ts_rateindex = MS(ads->status8, AR_FinalTxIdx);
-
-       ts->ts_rssi = MS(ads->status7, AR_TxRSSICombined);
-       ts->ts_rssi_ctl0 = MS(ads->status2, AR_TxRSSIAnt00);
-       ts->ts_rssi_ctl1 = MS(ads->status2, AR_TxRSSIAnt01);
-       ts->ts_rssi_ctl2 = MS(ads->status2, AR_TxRSSIAnt02);
-       ts->ts_rssi_ext0 = MS(ads->status7, AR_TxRSSIAnt10);
-       ts->ts_rssi_ext1 = MS(ads->status7, AR_TxRSSIAnt11);
-       ts->ts_rssi_ext2 = MS(ads->status7, AR_TxRSSIAnt12);
-       ts->ts_shortretry = MS(ads->status3, AR_RTSFailCnt);
-       ts->ts_longretry = MS(ads->status3, AR_DataFailCnt);
-       ts->ts_virtcol = MS(ads->status3, AR_VirtRetryCnt);
-       ts->ts_antenna = 0;
-
-       ts->tid = MS(ads->status8, AR_TxTid);
+       ts->ts_shortretry = MS(status, AR_RTSFailCnt);
+       ts->ts_longretry = MS(status, AR_DataFailCnt);
+       ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
+
+       status = ACCESS_ONCE(ads->status7);
+       ts->ts_rssi = MS(status, AR_TxRSSICombined);
+       ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
+       ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
+       ts->ts_rssi_ext2 = MS(status, AR_TxRSSIAnt12);
 
        memset(ads, 0, sizeof(*ads));
 
index 9f2cea70a840801dc1f767993a83b02fa4c29eb1..45cc7e80436c067430e140dd5dfb36b671afe62f 100644 (file)
@@ -65,7 +65,7 @@ struct ar9003_rxs {
        u32 status9;
        u32 status10;
        u32 status11;
-} __packed;
+} __packed __aligned(4);
 
 /* Transmit Control Descriptor */
 struct ar9003_txc {
@@ -93,7 +93,7 @@ struct ar9003_txc {
        u32 ctl21;  /* DMA control 21 */
        u32 ctl22;  /* DMA control 22 */
        u32 pad[9]; /* pad to cache line (128 bytes/32 dwords) */
-} __packed;
+} __packed __aligned(4);
 
 struct ar9003_txs {
        u32 ds_info;
@@ -105,7 +105,7 @@ struct ar9003_txs {
        u32 status6;
        u32 status7;
        u32 status8;
-} __packed;
+} __packed __aligned(4);
 
 void ar9003_hw_attach_mac_ops(struct ath_hw *hw);
 void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size);
index 669b777729b314161a99987812ed1b93ef16c168..44c5454b2ad8273cbfa905ee7f098ead379867ae 100644 (file)
@@ -614,7 +614,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
                                 channel->max_antenna_gain * 2,
                                 channel->max_power * 2,
                                 min((u32) MAX_RATE_POWER,
-                                (u32) regulatory->power_limit));
+                                (u32) regulatory->power_limit), false);
 
        return 0;
 }
@@ -1023,25 +1023,25 @@ static void ar9003_hw_do_getnf(struct ath_hw *ah,
        int16_t nf;
 
        nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR);
-       nfarray[0] = sign_extend(nf, 9);
+       nfarray[0] = sign_extend32(nf, 8);
 
        nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
-       nfarray[1] = sign_extend(nf, 9);
+       nfarray[1] = sign_extend32(nf, 8);
 
        nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR);
-       nfarray[2] = sign_extend(nf, 9);
+       nfarray[2] = sign_extend32(nf, 8);
 
        if (!IS_CHAN_HT40(ah->curchan))
                return;
 
        nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
-       nfarray[3] = sign_extend(nf, 9);
+       nfarray[3] = sign_extend32(nf, 8);
 
        nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR);
-       nfarray[4] = sign_extend(nf, 9);
+       nfarray[4] = sign_extend32(nf, 8);
 
        nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR);
-       nfarray[5] = sign_extend(nf, 9);
+       nfarray[5] = sign_extend32(nf, 8);
 }
 
 static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
index 170d44a35ccbd2ca06886be2be2ed8c1cd5dcf25..b3180935875de7aebb4ad43ab72be45e08991c01 100644 (file)
@@ -195,7 +195,6 @@ enum ATH_AGGR_STATUS {
 
 #define ATH_TXFIFO_DEPTH 8
 struct ath_txq {
-       int axq_class;
        u32 axq_qnum;
        u32 *axq_link;
        struct list_head axq_q;
@@ -208,11 +207,12 @@ struct ath_txq {
        struct list_head txq_fifo_pending;
        u8 txq_headidx;
        u8 txq_tailidx;
+       int pending_frames;
 };
 
 struct ath_atx_ac {
+       struct ath_txq *txq;
        int sched;
-       int qnum;
        struct list_head list;
        struct list_head tid_q;
 };
@@ -270,7 +270,6 @@ struct ath_node {
        struct ath_atx_ac ac[WME_NUM_AC];
        u16 maxampdu;
        u8 mpdudensity;
-       int last_rssi;
 };
 
 #define AGGR_CLEANUP         BIT(1)
@@ -291,12 +290,11 @@ struct ath_tx_control {
 struct ath_tx {
        u16 seq_no;
        u32 txqsetup;
-       int hwq_map[WME_NUM_AC];
        spinlock_t txbuflock;
        struct list_head txbuf;
        struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
        struct ath_descdma txdma;
-       int pending_frames[WME_NUM_AC];
+       struct ath_txq *txq_map[WME_NUM_AC];
 };
 
 struct ath_rx_edma {
@@ -310,7 +308,6 @@ struct ath_rx {
        u8 rxotherant;
        u32 *rxlink;
        unsigned int rxfilter;
-       spinlock_t pcu_lock;
        spinlock_t rxbuflock;
        struct list_head rxbuf;
        struct ath_descdma rxdma;
@@ -327,7 +324,6 @@ void ath_rx_cleanup(struct ath_softc *sc);
 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
-int ath_tx_setup(struct ath_softc *sc, int haltype);
 void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
 void ath_draintxq(struct ath_softc *sc,
                     struct ath_txq *txq, bool retry_tx);
@@ -600,9 +596,9 @@ struct ath_softc {
        struct ath_hw *sc_ah;
        void __iomem *mem;
        int irq;
-       spinlock_t sc_resetlock;
        spinlock_t sc_serial_rw;
        spinlock_t sc_pm_lock;
+       spinlock_t sc_pcu_lock;
        struct mutex mutex;
        struct work_struct paprd_work;
        struct work_struct hw_check_work;
@@ -662,11 +658,11 @@ struct ath_wiphy {
        bool idle;
        int chan_idx;
        int chan_is_ht;
+       int last_rssi;
 };
 
 void ath9k_tasklet(unsigned long data);
 int ath_reset(struct ath_softc *sc, bool retry_tx);
-int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
 int ath_cabq_update(struct ath_softc *);
 
 static inline void ath_read_cachesize(struct ath_common *common, int *csz)
index 19891e7d49aea7e371cfaf43d0228013f03e313e..2377376c8d4defff424e367adcf02184e354dfdf 100644 (file)
@@ -28,7 +28,7 @@ int ath_beaconq_config(struct ath_softc *sc)
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_tx_queue_info qi, qi_be;
-       int qnum;
+       struct ath_txq *txq;
 
        ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
        if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
@@ -38,8 +38,8 @@ int ath_beaconq_config(struct ath_softc *sc)
                qi.tqi_cwmax = 0;
        } else {
                /* Adhoc mode; important thing is to use 2x cwmin. */
-               qnum = sc->tx.hwq_map[WME_AC_BE];
-               ath9k_hw_get_txq_props(ah, qnum, &qi_be);
+               txq = sc->tx.txq_map[WME_AC_BE];
+               ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be);
                qi.tqi_aifs = qi_be.tqi_aifs;
                qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
                qi.tqi_cwmax = qi_be.tqi_cwmax;
@@ -503,7 +503,7 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
 
        /* Set the computed AP beacon timers */
 
-       ath9k_hw_set_interrupts(ah, 0);
+       ath9k_hw_disable_interrupts(ah);
        ath9k_beacon_init(sc, nexttbtt, intval);
        sc->beacon.bmisscnt = 0;
        ath9k_hw_set_interrupts(ah, ah->imask);
@@ -638,7 +638,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
 
        /* Set the computed STA beacon timers */
 
-       ath9k_hw_set_interrupts(ah, 0);
+       ath9k_hw_disable_interrupts(ah);
        ath9k_hw_set_sta_beacon_timers(ah, &bs);
        ah->imask |= ATH9K_INT_BMISS;
        ath9k_hw_set_interrupts(ah, ah->imask);
@@ -686,7 +686,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
 
        /* Set the computed ADHOC beacon timers */
 
-       ath9k_hw_set_interrupts(ah, 0);
+       ath9k_hw_disable_interrupts(ah);
        ath9k_beacon_init(sc, nexttbtt, intval);
        sc->beacon.bmisscnt = 0;
        ath9k_hw_set_interrupts(ah, ah->imask);
index f43a2d98421c37731118c0bda90c37538608409a..48b07c319a7fa506780d22c7b0f77d4bbde036c4 100644 (file)
@@ -107,12 +107,10 @@ static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
 /*
  * Update internal channel flags.
  */
-void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
-                              struct ath9k_channel *ichan)
+void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
+                              struct ieee80211_channel *chan,
+                              enum nl80211_channel_type channel_type)
 {
-       struct ieee80211_channel *chan = hw->conf.channel;
-       struct ieee80211_conf *conf = &hw->conf;
-
        ichan->channel = chan->center_freq;
        ichan->chan = chan;
 
@@ -124,9 +122,8 @@ void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
                ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
        }
 
-       if (conf_is_ht(conf))
-               ichan->chanmode = ath9k_get_extchanmode(chan,
-                                                       conf->channel_type);
+       if (channel_type != NL80211_CHAN_NO_HT)
+               ichan->chanmode = ath9k_get_extchanmode(chan, channel_type);
 }
 EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
 
@@ -142,7 +139,7 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
 
        chan_idx = curchan->hw_value;
        channel = &ah->channels[chan_idx];
-       ath9k_cmn_update_ichannel(hw, channel);
+       ath9k_cmn_update_ichannel(channel, curchan, hw->conf.channel_type);
 
        return channel;
 }
index fea3b3315391d3e1e0af5fc9d51a3d82ffb69004..4c04ee85ff0e75963b6724e53521d218db459012 100644 (file)
 #define WME_MAX_BA              WME_BA_BMP_SIZE
 #define ATH_TID_MAX_BUFS        (2 * WME_MAX_BA)
 
-#define WME_AC_BE   0
-#define WME_AC_BK   1
-#define WME_AC_VI   2
-#define WME_AC_VO   3
+/* These must match mac80211 skb queue mapping numbers */
+#define WME_AC_VO   0
+#define WME_AC_VI   1
+#define WME_AC_BE   2
+#define WME_AC_BK   3
 #define WME_NUM_AC  4
 
 #define ATH_RSSI_DUMMY_MARKER   0x127
@@ -62,8 +63,9 @@ enum ath_stomp_type {
 
 int ath9k_cmn_padpos(__le16 frame_control);
 int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
-void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
-                              struct ath9k_channel *ichan);
+void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
+                              struct ieee80211_channel *chan,
+                              enum nl80211_channel_type channel_type);
 struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
                                               struct ath_hw *ah);
 int ath9k_cmn_count_streams(unsigned int chainmask, int max);
index 43e71a944cb17331340e2460b9f0fae79323e9da..0c3c74c157fb036123322ae7176f51543ae3f999 100644 (file)
@@ -461,16 +461,16 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
 
        /* Put variable-length stuff down here, and check for overflows. */
        for (i = 0; i < sc->num_sec_wiphy; i++) {
-               struct ath_wiphy *aphy = sc->sec_wiphy[i];
-               if (aphy == NULL)
+               struct ath_wiphy *aphy_tmp = sc->sec_wiphy[i];
+               if (aphy_tmp == NULL)
                        continue;
-               chan = aphy->hw->conf.channel;
+               chan = aphy_tmp->hw->conf.channel;
                len += snprintf(buf + len, sizeof(buf) - len,
                        "secondary: %s (%s chan=%d ht=%d)\n",
-                       wiphy_name(aphy->hw->wiphy),
-                       ath_wiphy_state_str(aphy->state),
+                       wiphy_name(aphy_tmp->hw->wiphy),
+                       ath_wiphy_state_str(aphy_tmp->state),
                        ieee80211_frequency_to_channel(chan->center_freq),
-                       aphy->chan_is_ht);
+                                                      aphy_tmp->chan_is_ht);
        }
        if (len > sizeof(buf))
                len = sizeof(buf);
@@ -585,10 +585,10 @@ static const struct file_operations fops_wiphy = {
        do {                                                            \
                len += snprintf(buf + len, size - len,                  \
                                "%s%13u%11u%10u%10u\n", str,            \
-               sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BE]].elem, \
-               sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BK]].elem, \
-               sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VI]].elem, \
-               sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VO]].elem); \
+               sc->debug.stats.txstats[WME_AC_BE].elem, \
+               sc->debug.stats.txstats[WME_AC_BK].elem, \
+               sc->debug.stats.txstats[WME_AC_VI].elem, \
+               sc->debug.stats.txstats[WME_AC_VO].elem); \
 } while(0)
 
 static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
@@ -630,33 +630,35 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
        return retval;
 }
 
-void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
-                      struct ath_buf *bf, struct ath_tx_status *ts)
+void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
+                      struct ath_tx_status *ts)
 {
-       TX_STAT_INC(txq->axq_qnum, tx_pkts_all);
-       sc->debug.stats.txstats[txq->axq_qnum].tx_bytes_all += bf->bf_mpdu->len;
+       int qnum = skb_get_queue_mapping(bf->bf_mpdu);
+
+       TX_STAT_INC(qnum, tx_pkts_all);
+       sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
 
        if (bf_isampdu(bf)) {
                if (bf_isxretried(bf))
-                       TX_STAT_INC(txq->axq_qnum, a_xretries);
+                       TX_STAT_INC(qnum, a_xretries);
                else
-                       TX_STAT_INC(txq->axq_qnum, a_completed);
+                       TX_STAT_INC(qnum, a_completed);
        } else {
-               TX_STAT_INC(txq->axq_qnum, completed);
+               TX_STAT_INC(qnum, completed);
        }
 
        if (ts->ts_status & ATH9K_TXERR_FIFO)
-               TX_STAT_INC(txq->axq_qnum, fifo_underrun);
+               TX_STAT_INC(qnum, fifo_underrun);
        if (ts->ts_status & ATH9K_TXERR_XTXOP)
-               TX_STAT_INC(txq->axq_qnum, xtxop);
+               TX_STAT_INC(qnum, xtxop);
        if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
-               TX_STAT_INC(txq->axq_qnum, timer_exp);
+               TX_STAT_INC(qnum, timer_exp);
        if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
-               TX_STAT_INC(txq->axq_qnum, desc_cfg_err);
+               TX_STAT_INC(qnum, desc_cfg_err);
        if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
-               TX_STAT_INC(txq->axq_qnum, data_underrun);
+               TX_STAT_INC(qnum, data_underrun);
        if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
-               TX_STAT_INC(txq->axq_qnum, delim_underrun);
+               TX_STAT_INC(qnum, delim_underrun);
 }
 
 static const struct file_operations fops_xmit = {
index bb0823242ba008e0ccc4cbe0b169539ffa8c8dd3..646ff7e04c88d983ec3504a2befdc8d343f249f1 100644 (file)
@@ -169,8 +169,8 @@ void ath9k_exit_debug(struct ath_hw *ah);
 int ath9k_debug_create_root(void);
 void ath9k_debug_remove_root(void);
 void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
-void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
-                      struct ath_buf *bf, struct ath_tx_status *ts);
+void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
+                      struct ath_tx_status *ts);
 void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
 
 #else
@@ -199,7 +199,6 @@ static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
 }
 
 static inline void ath_debug_stat_tx(struct ath_softc *sc,
-                                    struct ath_txq *txq,
                                     struct ath_buf *bf,
                                     struct ath_tx_status *ts)
 {
index dacb45e1b9063b620736475b6116975fcb60cf4d..3c99830dab0c2b385725075e5d36afa05d7d44b6 100644 (file)
@@ -680,7 +680,8 @@ struct eeprom_ops {
        void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
        void (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan,
                           u16 cfgCtl, u8 twiceAntennaReduction,
-                          u8 twiceMaxRegulatoryPower, u8 powerLimit);
+                          u8 twiceMaxRegulatoryPower, u8 powerLimit,
+                          bool test);
        u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz);
 };
 
index 4fa4d8e28c64236ffa5d7547ad5d5234565658bc..c40c534c66622eb8bfb1a9c8459aea75d648198e 100644 (file)
@@ -726,7 +726,7 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
                                    u16 cfgCtl,
                                    u8 twiceAntennaReduction,
                                    u8 twiceMaxRegulatoryPower,
-                                   u8 powerLimit)
+                                   u8 powerLimit, bool test)
 {
        struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
        struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
@@ -751,15 +751,20 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
 
        ath9k_hw_set_4k_power_cal_table(ah, chan, &txPowerIndexOffset);
 
+       regulatory->max_power_level = 0;
        for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
                ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
                if (ratesArray[i] > AR5416_MAX_RATE_POWER)
                        ratesArray[i] = AR5416_MAX_RATE_POWER;
+
+               if (ratesArray[i] > regulatory->max_power_level)
+                       regulatory->max_power_level = ratesArray[i];
        }
 
+       if (test)
+           return;
 
        /* Update regulatory */
-
        i = rate6mb;
        if (IS_CHAN_HT40(chan))
                i = rateHt40_0;
index 195406db3bd899b28f96c7b8214428822c3735db..3ad1de253c8a4277675b9bede13a9103edbf6957 100644 (file)
@@ -853,7 +853,7 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
                                        struct ath9k_channel *chan, u16 cfgCtl,
                                        u8 twiceAntennaReduction,
                                        u8 twiceMaxRegulatoryPower,
-                                       u8 powerLimit)
+                                       u8 powerLimit, bool test)
 {
        struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
        struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
@@ -877,12 +877,26 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
 
        ath9k_hw_set_ar9287_power_cal_table(ah, chan, &txPowerIndexOffset);
 
+       regulatory->max_power_level = 0;
        for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
                ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
                if (ratesArray[i] > AR9287_MAX_RATE_POWER)
                        ratesArray[i] = AR9287_MAX_RATE_POWER;
+
+               if (ratesArray[i] > regulatory->max_power_level)
+                       regulatory->max_power_level = ratesArray[i];
        }
 
+       if (test)
+               return;
+
+       if (IS_CHAN_2GHZ(chan))
+               i = rate1l;
+       else
+               i = rate6mb;
+
+       regulatory->max_power_level = ratesArray[i];
+
        if (AR_SREV_9280_20_OR_LATER(ah)) {
                for (i = 0; i < Ar5416RateSize; i++)
                        ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2;
@@ -971,17 +985,6 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
                          | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
                          | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
        }
-
-       if (IS_CHAN_2GHZ(chan))
-               i = rate1l;
-       else
-               i = rate6mb;
-
-       if (AR_SREV_9280_20_OR_LATER(ah))
-               regulatory->max_power_level =
-                       ratesArray[i] + AR9287_PWR_TABLE_OFFSET_DB * 2;
-       else
-               regulatory->max_power_level = ratesArray[i];
 }
 
 static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah,
index 76b4d65472dd2b37dbf98ff69b57646e53443c61..a819ddc9fdbccde8fa12debc95c86f6b2c3eda58 100644 (file)
@@ -1258,7 +1258,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
                                    u16 cfgCtl,
                                    u8 twiceAntennaReduction,
                                    u8 twiceMaxRegulatoryPower,
-                                   u8 powerLimit)
+                                   u8 powerLimit, bool test)
 {
 #define RT_AR_DELTA(x) (ratesArray[x] - cck_ofdm_delta)
        struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
@@ -1285,12 +1285,44 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
 
        ath9k_hw_set_def_power_cal_table(ah, chan, &txPowerIndexOffset);
 
+       regulatory->max_power_level = 0;
        for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
                ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
                if (ratesArray[i] > AR5416_MAX_RATE_POWER)
                        ratesArray[i] = AR5416_MAX_RATE_POWER;
+               if (ratesArray[i] > regulatory->max_power_level)
+                       regulatory->max_power_level = ratesArray[i];
        }
 
+       if (!test) {
+               i = rate6mb;
+
+               if (IS_CHAN_HT40(chan))
+                       i = rateHt40_0;
+               else if (IS_CHAN_HT20(chan))
+                       i = rateHt20_0;
+
+               regulatory->max_power_level = ratesArray[i];
+       }
+
+       switch(ar5416_get_ntxchains(ah->txchainmask)) {
+       case 1:
+               break;
+       case 2:
+               regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
+               break;
+       case 3:
+               regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
+               break;
+       default:
+               ath_print(ath9k_hw_common(ah), ATH_DBG_EEPROM,
+                         "Invalid chainmask configuration\n");
+               break;
+       }
+
+       if (test)
+               return;
+
        if (AR_SREV_9280_20_OR_LATER(ah)) {
                for (i = 0; i < Ar5416RateSize; i++) {
                        int8_t pwr_table_offset;
@@ -1387,34 +1419,6 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
        REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
                  ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
                  | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0));
-
-       i = rate6mb;
-
-       if (IS_CHAN_HT40(chan))
-               i = rateHt40_0;
-       else if (IS_CHAN_HT20(chan))
-               i = rateHt20_0;
-
-       if (AR_SREV_9280_20_OR_LATER(ah))
-               regulatory->max_power_level =
-                       ratesArray[i] + AR5416_PWR_TABLE_OFFSET_DB * 2;
-       else
-               regulatory->max_power_level = ratesArray[i];
-
-       switch(ar5416_get_ntxchains(ah->txchainmask)) {
-       case 1:
-               break;
-       case 2:
-               regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
-               break;
-       case 3:
-               regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
-               break;
-       default:
-               ath_print(ath9k_hw_common(ah), ATH_DBG_EEPROM,
-                         "Invalid chainmask configuration\n");
-               break;
-       }
 }
 
 static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah,
index 4a9a68bba324790b5a549332e71723867a46eaf4..6a1a482f9dc30e2cacabe971186b630d5bc0c294 100644 (file)
@@ -259,7 +259,7 @@ static void ath9k_gen_timer_start(struct ath_hw *ah,
        ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
 
        if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
-               ath9k_hw_set_interrupts(ah, 0);
+               ath9k_hw_disable_interrupts(ah);
                ah->imask |= ATH9K_INT_GENTIMER;
                ath9k_hw_set_interrupts(ah, ah->imask);
        }
@@ -273,7 +273,7 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
 
        /* if no timer is enabled, turn off interrupt mask */
        if (timer_table->timer_mask.val == 0) {
-               ath9k_hw_set_interrupts(ah, 0);
+               ath9k_hw_disable_interrupts(ah);
                ah->imask &= ~ATH9K_INT_GENTIMER;
                ath9k_hw_set_interrupts(ah, ah->imask);
        }
@@ -310,10 +310,8 @@ static void ath_btcoex_period_timer(unsigned long data)
 
                timer_period = is_btscan ? btcoex->btscan_no_stomp :
                                           btcoex->btcoex_no_stomp;
-               ath9k_gen_timer_start(ah,
-                                     btcoex->no_stomp_timer,
-                                     (ath9k_hw_gettsf32(ah) +
-                                      timer_period), timer_period * 10);
+               ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, 0,
+                                     timer_period * 10);
                btcoex->hw_timer_enabled = true;
        }
 
index 9a3be8da755d2f7181ada323e5ec9054b58a0a9e..e9761c2c870073a4fa08e6a7b276bc8f95136c40 100644 (file)
@@ -29,7 +29,7 @@ static void ath_update_txpow(struct ath9k_htc_priv *priv)
        struct ath_hw *ah = priv->ah;
 
        if (priv->curtxpow != priv->txpowlimit) {
-               ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit);
+               ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit, false);
                /* read back in case value is clamped */
                priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
        }
@@ -184,47 +184,6 @@ err:
        return ret;
 }
 
-static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
-{
-       struct ath_common *common = ath9k_hw_common(priv->ah);
-       struct ath9k_htc_target_vif hvif;
-       int ret = 0;
-       u8 cmd_rsp;
-
-       if (priv->nvifs > 0)
-               return -ENOBUFS;
-
-       memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
-       memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
-
-       hvif.opmode = cpu_to_be32(HTC_M_MONITOR);
-       priv->ah->opmode = NL80211_IFTYPE_MONITOR;
-       hvif.index = priv->nvifs;
-
-       WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
-       if (ret)
-               return ret;
-
-       priv->nvifs++;
-       return 0;
-}
-
-static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
-{
-       struct ath_common *common = ath9k_hw_common(priv->ah);
-       struct ath9k_htc_target_vif hvif;
-       int ret = 0;
-       u8 cmd_rsp;
-
-       memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
-       memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
-       hvif.index = 0; /* Should do for now */
-       WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
-       priv->nvifs--;
-
-       return ret;
-}
-
 static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
                                 struct ieee80211_vif *vif,
                                 struct ieee80211_sta *sta)
@@ -1240,16 +1199,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
        WMI_CMD(WMI_STOP_RECV_CMDID);
        skb_queue_purge(&priv->tx_queue);
 
-       /* Remove monitor interface here */
-       if (ah->opmode == NL80211_IFTYPE_MONITOR) {
-               if (ath9k_htc_remove_monitor_interface(priv))
-                       ath_print(common, ATH_DBG_FATAL,
-                                 "Unable to remove monitor interface\n");
-               else
-                       ath_print(common, ATH_DBG_CONFIG,
-                                 "Monitor interface removed\n");
-       }
-
        if (ah->btcoex_hw.enabled) {
                ath9k_hw_btcoex_disable(ah);
                if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -1400,7 +1349,9 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
                ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
                          curchan->center_freq);
 
-               ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]);
+               ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
+                                         hw->conf.channel,
+                                         hw->conf.channel_type);
 
                if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
                        ath_print(common, ATH_DBG_FATAL,
@@ -1421,16 +1372,13 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
                }
        }
 
-       if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+       if (changed & IEEE80211_CONF_CHANGE_MONITOR)
                if (conf->flags & IEEE80211_CONF_MONITOR) {
-                       if (ath9k_htc_add_monitor_interface(priv))
-                               ath_print(common, ATH_DBG_FATAL,
-                                         "Failed to set monitor mode\n");
-                       else
-                               ath_print(common, ATH_DBG_CONFIG,
-                                         "HW opmode set to Monitor mode\n");
+                       ath_print(common, ATH_DBG_CONFIG,
+                                 "HW opmode set to Monitor mode\n");
+                       priv->ah->opmode = NL80211_IFTYPE_MONITOR;
                }
-       }
+
 
        if (changed & IEEE80211_CONF_CHANGE_IDLE) {
                mutex_lock(&priv->htc_pm_lock);
index 29d80ca783933684b7c5c531cbb970ee7e006cda..77958675b55f6eeaf56ad2e59810f7bf667359c6 100644 (file)
 /* TX */
 /******/
 
+static const int subtype_txq_to_hwq[] = {
+       [WME_AC_BE] = ATH_TXQ_AC_BE,
+       [WME_AC_BK] = ATH_TXQ_AC_BK,
+       [WME_AC_VI] = ATH_TXQ_AC_VI,
+       [WME_AC_VO] = ATH_TXQ_AC_VO,
+};
+
 #define ATH9K_HTC_INIT_TXQ(subtype) do {                       \
-               qi.tqi_subtype = subtype;                       \
+               qi.tqi_subtype = subtype_txq_to_hwq[subtype];   \
                qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;             \
                qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;            \
                qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;            \
index 6ebc68bca91f0a999614572b38f69452df49645d..5fb1bf33faa0922af81cc3c2fae5a181e70fe884 100644 (file)
@@ -1170,7 +1170,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
                             channel->max_antenna_gain * 2,
                             channel->max_power * 2,
                             min((u32) MAX_RATE_POWER,
-                            (u32) regulatory->power_limit));
+                            (u32) regulatory->power_limit), false);
 
        ath9k_hw_rfbus_done(ah);
 
@@ -2176,7 +2176,7 @@ bool ath9k_hw_disable(struct ath_hw *ah)
 }
 EXPORT_SYMBOL(ath9k_hw_disable);
 
-void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
+void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
 {
        struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
        struct ath9k_channel *chan = ah->curchan;
@@ -2189,7 +2189,7 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
                                 channel->max_antenna_gain * 2,
                                 channel->max_power * 2,
                                 min((u32) MAX_RATE_POWER,
-                                (u32) regulatory->power_limit));
+                                (u32) regulatory->power_limit), test);
 }
 EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
 
@@ -2323,11 +2323,10 @@ static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask)
        return timer_table->gen_timer_index[b];
 }
 
-u32 ath9k_hw_gettsf32(struct ath_hw *ah)
+static u32 ath9k_hw_gettsf32(struct ath_hw *ah)
 {
        return REG_READ(ah, AR_TSF_L32);
 }
-EXPORT_SYMBOL(ath9k_hw_gettsf32);
 
 struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
                                          void (*trigger)(void *),
index d47d1b4b6002f2650cba8c80401cd52089800cc6..e29c7122f466f3742e63e8d33ddf28e6861fdd6e 100644 (file)
 #define PAPRD_GAIN_TABLE_ENTRIES    32
 #define PAPRD_TABLE_SZ              24
 
+enum ath_hw_txq_subtype {
+       ATH_TXQ_AC_BE = 0,
+       ATH_TXQ_AC_BK = 1,
+       ATH_TXQ_AC_VI = 2,
+       ATH_TXQ_AC_VO = 3,
+};
+
 enum ath_ini_subsys {
        ATH_INI_PRE = 0,
        ATH_INI_CORE,
@@ -819,12 +826,6 @@ static inline struct ath_hw_ops *ath9k_hw_ops(struct ath_hw *ah)
        return &ah->ops;
 }
 
-static inline int sign_extend(int val, const int nbits)
-{
-       int order = BIT(nbits-1);
-       return (val ^ order) - order;
-}
-
 /* Initialization, Detach, Reset */
 const char *ath9k_hw_probe(u16 vendorid, u16 devid);
 void ath9k_hw_deinit(struct ath_hw *ah);
@@ -861,7 +862,7 @@ u32 ath9k_hw_getrxfilter(struct ath_hw *ah);
 void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits);
 bool ath9k_hw_phy_disable(struct ath_hw *ah);
 bool ath9k_hw_disable(struct ath_hw *ah);
-void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit);
+void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test);
 void ath9k_hw_setopmode(struct ath_hw *ah);
 void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
 void ath9k_hw_setbssidmask(struct ath_hw *ah);
@@ -893,7 +894,6 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer);
 
 void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer);
 void ath_gen_timer_isr(struct ath_hw *hw);
-u32 ath9k_hw_gettsf32(struct ath_hw *ah);
 
 void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
 
index 92bc5c5f48768ac42ca4b1bd02690d9d38ebed80..5c26818d79ecb257b4827885b87cd01fb21bd866 100644 (file)
@@ -398,7 +398,8 @@ static void ath9k_init_crypto(struct ath_softc *sc)
 
 static int ath9k_init_btcoex(struct ath_softc *sc)
 {
-       int r, qnum;
+       struct ath_txq *txq;
+       int r;
 
        switch (sc->sc_ah->btcoex_hw.scheme) {
        case ATH_BTCOEX_CFG_NONE:
@@ -411,8 +412,8 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
                r = ath_init_btcoex_timer(sc);
                if (r)
                        return -1;
-               qnum = sc->tx.hwq_map[WME_AC_BE];
-               ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
+               txq = sc->tx.txq_map[WME_AC_BE];
+               ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
                sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
                break;
        default:
@@ -425,59 +426,18 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
 
 static int ath9k_init_queues(struct ath_softc *sc)
 {
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        int i = 0;
 
-       for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
-               sc->tx.hwq_map[i] = -1;
-
        sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
-       if (sc->beacon.beaconq == -1) {
-               ath_print(common, ATH_DBG_FATAL,
-                         "Unable to setup a beacon xmit queue\n");
-               goto err;
-       }
-
        sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
-       if (sc->beacon.cabq == NULL) {
-               ath_print(common, ATH_DBG_FATAL,
-                         "Unable to setup CAB xmit queue\n");
-               goto err;
-       }
 
        sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
        ath_cabq_update(sc);
 
-       if (!ath_tx_setup(sc, WME_AC_BK)) {
-               ath_print(common, ATH_DBG_FATAL,
-                         "Unable to setup xmit queue for BK traffic\n");
-               goto err;
-       }
-
-       if (!ath_tx_setup(sc, WME_AC_BE)) {
-               ath_print(common, ATH_DBG_FATAL,
-                         "Unable to setup xmit queue for BE traffic\n");
-               goto err;
-       }
-       if (!ath_tx_setup(sc, WME_AC_VI)) {
-               ath_print(common, ATH_DBG_FATAL,
-                         "Unable to setup xmit queue for VI traffic\n");
-               goto err;
-       }
-       if (!ath_tx_setup(sc, WME_AC_VO)) {
-               ath_print(common, ATH_DBG_FATAL,
-                         "Unable to setup xmit queue for VO traffic\n");
-               goto err;
-       }
+       for (i = 0; i < WME_NUM_AC; i++)
+               sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
 
        return 0;
-
-err:
-       for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
-               if (ATH_TXQ_SETUP(sc, i))
-                       ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
-       return -EIO;
 }
 
 static int ath9k_init_channels_rates(struct ath_softc *sc)
@@ -583,7 +543,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
        spin_lock_init(&common->cc_lock);
 
        spin_lock_init(&sc->wiphy_lock);
-       spin_lock_init(&sc->sc_resetlock);
        spin_lock_init(&sc->sc_serial_rw);
        spin_lock_init(&sc->sc_pm_lock);
        mutex_init(&sc->mutex);
@@ -645,6 +604,37 @@ err_hw:
        return ret;
 }
 
+static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
+{
+       struct ieee80211_supported_band *sband;
+       struct ieee80211_channel *chan;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+       int i;
+
+       sband = &sc->sbands[band];
+       for (i = 0; i < sband->n_channels; i++) {
+               chan = &sband->channels[i];
+               ah->curchan = &ah->channels[chan->hw_value];
+               ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
+               ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
+               chan->max_power = reg->max_power_level / 2;
+       }
+}
+
+static void ath9k_init_txpower_limits(struct ath_softc *sc)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath9k_channel *curchan = ah->curchan;
+
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
+               ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
+               ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
+
+       ah->curchan = curchan;
+}
+
 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -706,6 +696,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
                    const struct ath_bus_ops *bus_ops)
 {
        struct ieee80211_hw *hw = sc->hw;
+       struct ath_wiphy *aphy = hw->priv;
        struct ath_common *common;
        struct ath_hw *ah;
        int error = 0;
@@ -738,6 +729,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
        if (error != 0)
                goto error_rx;
 
+       ath9k_init_txpower_limits(sc);
+
        /* Register with mac80211 */
        error = ieee80211_register_hw(hw);
        if (error)
@@ -755,6 +748,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
        INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
        INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
        sc->wiphy_scheduler_int = msecs_to_jiffies(500);
+       aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
 
        ath_init_leds(sc);
        ath_start_rfkill_poll(sc);
index 8c13479b17cd9d418f7d7f496b01dbf7445ba202..65b1ee2a9792fbce453958e0af80c447cd4ad9ce 100644 (file)
@@ -117,12 +117,11 @@ EXPORT_SYMBOL(ath9k_hw_numtxpending);
 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
 {
        u32 txcfg, curLevel, newLevel;
-       enum ath9k_int omask;
 
        if (ah->tx_trig_level >= ah->config.max_txtrig_level)
                return false;
 
-       omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL);
+       ath9k_hw_disable_interrupts(ah);
 
        txcfg = REG_READ(ah, AR_TXCFG);
        curLevel = MS(txcfg, AR_FTRIG);
@@ -136,7 +135,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
                REG_WRITE(ah, AR_TXCFG,
                          (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
 
-       ath9k_hw_set_interrupts(ah, omask);
+       ath9k_hw_enable_interrupts(ah);
 
        ah->tx_trig_level = newLevel;
 
@@ -849,28 +848,59 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
 }
 EXPORT_SYMBOL(ath9k_hw_intrpend);
 
-enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
-                                             enum ath9k_int ints)
+void ath9k_hw_disable_interrupts(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
+       REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
+       (void) REG_READ(ah, AR_IER);
+       if (!AR_SREV_9100(ah)) {
+               REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
+               (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
+
+               REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
+               (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
+       }
+}
+EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
+
+void ath9k_hw_enable_interrupts(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!(ah->imask & ATH9K_INT_GLOBAL))
+               return;
+
+       ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
+       REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
+       if (!AR_SREV_9100(ah)) {
+               REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
+                         AR_INTR_MAC_IRQ);
+               REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
+
+
+               REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
+                         AR_INTR_SYNC_DEFAULT);
+               REG_WRITE(ah, AR_INTR_SYNC_MASK,
+                         AR_INTR_SYNC_DEFAULT);
+       }
+       ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
+                 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
+}
+EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
+
+void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
 {
        enum ath9k_int omask = ah->imask;
        u32 mask, mask2;
        struct ath9k_hw_capabilities *pCap = &ah->caps;
        struct ath_common *common = ath9k_hw_common(ah);
 
-       ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
-
-       if (omask & ATH9K_INT_GLOBAL) {
-               ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
-               REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
-               (void) REG_READ(ah, AR_IER);
-               if (!AR_SREV_9100(ah)) {
-                       REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
-                       (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
+       if (!(ints & ATH9K_INT_GLOBAL))
+               ath9k_hw_enable_interrupts(ah);
 
-                       REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
-                       (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
-               }
-       }
+       ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
 
        /* TODO: global int Ref count */
        mask = ints & ATH9K_INT_COMMON;
@@ -946,24 +976,8 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
                        REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
        }
 
-       if (ints & ATH9K_INT_GLOBAL) {
-               ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
-               REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
-               if (!AR_SREV_9100(ah)) {
-                       REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
-                                 AR_INTR_MAC_IRQ);
-                       REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
-
-
-                       REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
-                                 AR_INTR_SYNC_DEFAULT);
-                       REG_WRITE(ah, AR_INTR_SYNC_MASK,
-                                 AR_INTR_SYNC_DEFAULT);
-               }
-               ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
-                         REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
-       }
+       ath9k_hw_enable_interrupts(ah);
 
-       return omask;
+       return;
 }
 EXPORT_SYMBOL(ath9k_hw_set_interrupts);
index 7c1a34d64f6debb70b23cd1c1cfe387fdb17868d..22907e21cc46fd68c15b566a09846e87c5f6982c 100644 (file)
@@ -104,13 +104,11 @@ struct ath_tx_status {
        u32 ts_tstamp;
        u16 ts_seqnum;
        u8 ts_status;
-       u8 ts_ratecode;
        u8 ts_rateindex;
        int8_t ts_rssi;
        u8 ts_shortretry;
        u8 ts_longretry;
        u8 ts_virtcol;
-       u8 ts_antenna;
        u8 ts_flags;
        int8_t ts_rssi_ctl0;
        int8_t ts_rssi_ctl1;
@@ -121,7 +119,6 @@ struct ath_tx_status {
        u8 qid;
        u16 desc_id;
        u8 tid;
-       u8 pad[2];
        u32 ba_low;
        u32 ba_high;
        u32 evm0;
@@ -240,7 +237,7 @@ struct ath_desc {
        u32 ds_ctl1;
        u32 ds_hw[20];
        void *ds_vdata;
-} __packed;
+} __packed __aligned(4);
 
 #define ATH9K_TXDESC_CLRDMASK          0x0001
 #define ATH9K_TXDESC_NOACK             0x0002
@@ -310,7 +307,7 @@ struct ar5416_desc {
                        u32 status8;
                } rx;
        } u;
-} __packed;
+} __packed __aligned(4);
 
 #define AR5416DESC(_ds)         ((struct ar5416_desc *)(_ds))
 #define AR5416DESC_CONST(_ds)   ((const struct ar5416_desc *)(_ds))
@@ -669,6 +666,7 @@ enum ath9k_key_type {
 
 struct ath_hw;
 struct ath9k_channel;
+enum ath9k_int;
 
 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q);
 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp);
@@ -700,8 +698,9 @@ int ath9k_hw_beaconq_setup(struct ath_hw *ah);
 
 /* Interrupt Handling */
 bool ath9k_hw_intrpend(struct ath_hw *ah);
-enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
-                                      enum ath9k_int ints);
+void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints);
+void ath9k_hw_enable_interrupts(struct ath_hw *ah);
+void ath9k_hw_disable_interrupts(struct ath_hw *ah);
 
 void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
 
index 25d3ef4c338e1eb45800b45fec112fc064c095af..f8c811af312d415cebb8fc0da0d058fb854bef25 100644 (file)
@@ -24,7 +24,7 @@ static void ath_update_txpow(struct ath_softc *sc)
        struct ath_hw *ah = sc->sc_ah;
 
        if (sc->curtxpow != sc->config.txpowlimit) {
-               ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit);
+               ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
                /* read back in case value is clamped */
                sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
        }
@@ -235,6 +235,8 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
 
        ath9k_ps_wakeup(sc);
 
+       spin_lock_bh(&sc->sc_pcu_lock);
+
        /*
         * This is only performed if the channel settings have
         * actually changed.
@@ -244,11 +246,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
         * hardware at the new frequency, and then re-enable
         * the relevant bits of the h/w.
         */
-       ath9k_hw_set_interrupts(ah, 0);
+       ath9k_hw_disable_interrupts(ah);
        ath_drain_all_txq(sc, false);
 
-       spin_lock_bh(&sc->rx.pcu_lock);
-
        stopped = ath_stoprecv(sc);
 
        /* XXX: do not flush receive queue here. We don't want
@@ -267,30 +267,22 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
                  channel->center_freq, conf_is_ht40(conf),
                  fastcc);
 
-       spin_lock_bh(&sc->sc_resetlock);
-
        r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
        if (r) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to reset channel (%u MHz), "
                          "reset status %d\n",
                          channel->center_freq, r);
-               spin_unlock_bh(&sc->sc_resetlock);
-               spin_unlock_bh(&sc->rx.pcu_lock);
                goto ps_restore;
        }
-       spin_unlock_bh(&sc->sc_resetlock);
 
        if (ath_startrecv(sc) != 0) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to restart recv logic\n");
                r = -EIO;
-               spin_unlock_bh(&sc->rx.pcu_lock);
                goto ps_restore;
        }
 
-       spin_unlock_bh(&sc->rx.pcu_lock);
-
        ath_update_txpow(sc);
        ath9k_hw_set_interrupts(ah, ah->imask);
 
@@ -301,6 +293,8 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
        }
 
  ps_restore:
+       spin_unlock_bh(&sc->sc_pcu_lock);
+
        ath9k_ps_restore(sc);
        return r;
 }
@@ -341,7 +335,7 @@ void ath_paprd_calibrate(struct work_struct *work)
        struct ath_tx_control txctl;
        struct ath9k_hw_cal_data *caldata = ah->caldata;
        struct ath_common *common = ath9k_hw_common(ah);
-       int qnum, ftype;
+       int ftype;
        int chain_ok = 0;
        int chain;
        int len = 1800;
@@ -368,8 +362,7 @@ void ath_paprd_calibrate(struct work_struct *work)
        memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
 
        memset(&txctl, 0, sizeof(txctl));
-       qnum = sc->tx.hwq_map[WME_AC_BE];
-       txctl.txq = &sc->tx.txq[qnum];
+       txctl.txq = sc->tx.txq_map[WME_AC_BE];
 
        ath9k_ps_wakeup(sc);
        ar9003_paprd_init_table(ah);
@@ -567,7 +560,6 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
                an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
                                     sta->ht_cap.ampdu_factor);
                an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
-               an->last_rssi = ATH_RSSI_DUMMY_MARKER;
        }
 }
 
@@ -615,6 +607,8 @@ void ath9k_tasklet(unsigned long data)
                return;
        }
 
+       spin_lock_bh(&sc->sc_pcu_lock);
+
        if (!ath9k_hw_check_alive(ah))
                ieee80211_queue_work(sc->hw, &sc->hw_check_work);
 
@@ -625,15 +619,12 @@ void ath9k_tasklet(unsigned long data)
                rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
 
        if (status & rxmask) {
-               spin_lock_bh(&sc->rx.pcu_lock);
-
                /* Check for high priority Rx first */
                if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
                    (status & ATH9K_INT_RXHP))
                        ath_rx_tasklet(sc, 0, true);
 
                ath_rx_tasklet(sc, 0, false);
-               spin_unlock_bh(&sc->rx.pcu_lock);
        }
 
        if (status & ATH9K_INT_TX) {
@@ -658,7 +649,9 @@ void ath9k_tasklet(unsigned long data)
                        ath_gen_timer_isr(sc->sc_ah);
 
        /* re-enable hardware interrupt */
-       ath9k_hw_set_interrupts(ah, ah->imask);
+       ath9k_hw_enable_interrupts(ah);
+
+       spin_unlock_bh(&sc->sc_pcu_lock);
        ath9k_ps_restore(sc);
 }
 
@@ -757,7 +750,7 @@ irqreturn_t ath_isr(int irq, void *dev)
                 * interrupt; otherwise it will continue to
                 * fire.
                 */
-               ath9k_hw_set_interrupts(ah, 0);
+               ath9k_hw_disable_interrupts(ah);
                /*
                 * Let the hal handle the event. We assume
                 * it will clear whatever condition caused
@@ -766,7 +759,7 @@ irqreturn_t ath_isr(int irq, void *dev)
                spin_lock(&common->cc_lock);
                ath9k_hw_proc_mib_event(ah);
                spin_unlock(&common->cc_lock);
-               ath9k_hw_set_interrupts(ah, ah->imask);
+               ath9k_hw_enable_interrupts(ah);
        }
 
        if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
@@ -783,8 +776,8 @@ chip_reset:
        ath_debug_stat_interrupt(sc, status);
 
        if (sched) {
-               /* turn off every interrupt except SWBA */
-               ath9k_hw_set_interrupts(ah, (ah->imask & ATH9K_INT_SWBA));
+               /* turn off every interrupt */
+               ath9k_hw_disable_interrupts(ah);
                tasklet_schedule(&sc->intr_tq);
        }
 
@@ -836,9 +829,11 @@ static u32 ath_get_extchanmode(struct ath_softc *sc,
 }
 
 static void ath9k_bss_assoc_info(struct ath_softc *sc,
+                                struct ieee80211_hw *hw,
                                 struct ieee80211_vif *vif,
                                 struct ieee80211_bss_conf *bss_conf)
 {
+       struct ath_wiphy *aphy = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
 
@@ -862,6 +857,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
                ath_beacon_config(sc, vif);
 
                /* Reset rssi stats */
+               aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
                sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
 
                sc->sc_flags |= SC_OP_ANI_RUN;
@@ -883,13 +879,13 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
        int r;
 
        ath9k_ps_wakeup(sc);
+       spin_lock_bh(&sc->sc_pcu_lock);
+
        ath9k_hw_configpcipowersave(ah, 0, 0);
 
        if (!ah->curchan)
                ah->curchan = ath_get_curchannel(sc, sc->hw);
 
-       spin_lock_bh(&sc->rx.pcu_lock);
-       spin_lock_bh(&sc->sc_resetlock);
        r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        if (r) {
                ath_print(common, ATH_DBG_FATAL,
@@ -897,17 +893,14 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
                          "reset status %d\n",
                          channel->center_freq, r);
        }
-       spin_unlock_bh(&sc->sc_resetlock);
 
        ath_update_txpow(sc);
        if (ath_startrecv(sc) != 0) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to restart recv logic\n");
-               spin_unlock_bh(&sc->rx.pcu_lock);
+               spin_unlock_bh(&sc->sc_pcu_lock);
                return;
        }
-       spin_unlock_bh(&sc->rx.pcu_lock);
-
        if (sc->sc_flags & SC_OP_BEACONS)
                ath_beacon_config(sc, NULL);    /* restart beacons */
 
@@ -920,6 +913,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
        ath9k_hw_set_gpio(ah, ah->led_pin, 0);
 
        ieee80211_wake_queues(hw);
+       spin_unlock_bh(&sc->sc_pcu_lock);
+
        ath9k_ps_restore(sc);
 }
 
@@ -930,6 +925,8 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
        int r;
 
        ath9k_ps_wakeup(sc);
+       spin_lock_bh(&sc->sc_pcu_lock);
+
        ieee80211_stop_queues(hw);
 
        /*
@@ -942,19 +939,16 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
        }
 
        /* Disable interrupts */
-       ath9k_hw_set_interrupts(ah, 0);
+       ath9k_hw_disable_interrupts(ah);
 
        ath_drain_all_txq(sc, false);   /* clear pending tx frames */
 
-       spin_lock_bh(&sc->rx.pcu_lock);
-
        ath_stoprecv(sc);               /* turn off frame recv */
        ath_flushrecv(sc);              /* flush recv queue */
 
        if (!ah->curchan)
                ah->curchan = ath_get_curchannel(sc, hw);
 
-       spin_lock_bh(&sc->sc_resetlock);
        r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        if (r) {
                ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
@@ -962,14 +956,14 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
                          "reset status %d\n",
                          channel->center_freq, r);
        }
-       spin_unlock_bh(&sc->sc_resetlock);
 
        ath9k_hw_phy_disable(ah);
 
-       spin_unlock_bh(&sc->rx.pcu_lock);
-
        ath9k_hw_configpcipowersave(ah, 1, 1);
+
+       spin_unlock_bh(&sc->sc_pcu_lock);
        ath9k_ps_restore(sc);
+
        ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
 }
 
@@ -983,29 +977,25 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
        /* Stop ANI */
        del_timer_sync(&common->ani.timer);
 
+       spin_lock_bh(&sc->sc_pcu_lock);
+
        ieee80211_stop_queues(hw);
 
-       ath9k_hw_set_interrupts(ah, 0);
+       ath9k_hw_disable_interrupts(ah);
        ath_drain_all_txq(sc, retry_tx);
 
-       spin_lock_bh(&sc->rx.pcu_lock);
-
        ath_stoprecv(sc);
        ath_flushrecv(sc);
 
-       spin_lock_bh(&sc->sc_resetlock);
        r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
        if (r)
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to reset hardware; reset status %d\n", r);
-       spin_unlock_bh(&sc->sc_resetlock);
 
        if (ath_startrecv(sc) != 0)
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to start recv logic\n");
 
-       spin_unlock_bh(&sc->rx.pcu_lock);
-
        /*
         * We may be doing a reset in response to a request
         * that changes the channel so update any state that
@@ -1030,6 +1020,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
        }
 
        ieee80211_wake_queues(hw);
+       spin_unlock_bh(&sc->sc_pcu_lock);
 
        /* Start ANI */
        ath_start_ani(common);
@@ -1037,56 +1028,6 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
        return r;
 }
 
-static int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
-{
-       int qnum;
-
-       switch (queue) {
-       case 0:
-               qnum = sc->tx.hwq_map[WME_AC_VO];
-               break;
-       case 1:
-               qnum = sc->tx.hwq_map[WME_AC_VI];
-               break;
-       case 2:
-               qnum = sc->tx.hwq_map[WME_AC_BE];
-               break;
-       case 3:
-               qnum = sc->tx.hwq_map[WME_AC_BK];
-               break;
-       default:
-               qnum = sc->tx.hwq_map[WME_AC_BE];
-               break;
-       }
-
-       return qnum;
-}
-
-int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
-{
-       int qnum;
-
-       switch (queue) {
-       case WME_AC_VO:
-               qnum = 0;
-               break;
-       case WME_AC_VI:
-               qnum = 1;
-               break;
-       case WME_AC_BE:
-               qnum = 2;
-               break;
-       case WME_AC_BK:
-               qnum = 3;
-               break;
-       default:
-               qnum = -1;
-               break;
-       }
-
-       return qnum;
-}
-
 /* XXX: Remove me once we don't depend on ath9k_channel for all
  * this redundant data */
 void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
@@ -1168,19 +1109,16 @@ static int ath9k_start(struct ieee80211_hw *hw)
         * be followed by initialization of the appropriate bits
         * and then setup of the interrupt mask.
         */
-       spin_lock_bh(&sc->rx.pcu_lock);
-       spin_lock_bh(&sc->sc_resetlock);
+       spin_lock_bh(&sc->sc_pcu_lock);
        r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
        if (r) {
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to reset hardware; reset status %d "
                          "(freq %u MHz)\n", r,
                          curchan->center_freq);
-               spin_unlock_bh(&sc->sc_resetlock);
-               spin_unlock_bh(&sc->rx.pcu_lock);
+               spin_unlock_bh(&sc->sc_pcu_lock);
                goto mutex_unlock;
        }
-       spin_unlock_bh(&sc->sc_resetlock);
 
        /*
         * This is needed only to setup initial state
@@ -1199,10 +1137,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
                ath_print(common, ATH_DBG_FATAL,
                          "Unable to start recv logic\n");
                r = -EIO;
-               spin_unlock_bh(&sc->rx.pcu_lock);
+               spin_unlock_bh(&sc->sc_pcu_lock);
                goto mutex_unlock;
        }
-       spin_unlock_bh(&sc->rx.pcu_lock);
+       spin_unlock_bh(&sc->sc_pcu_lock);
 
        /* Setup our intr mask. */
        ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
@@ -1262,7 +1200,6 @@ static int ath9k_tx(struct ieee80211_hw *hw,
        struct ath_tx_control txctl;
        int padpos, padsize;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-       int qnum;
 
        if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
                ath_print(common, ATH_DBG_XMIT,
@@ -1335,8 +1272,7 @@ static int ath9k_tx(struct ieee80211_hw *hw,
                memmove(skb->data, skb->data + padsize, padpos);
        }
 
-       qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
-       txctl.txq = &sc->tx.txq[qnum];
+       txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
 
        ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
 
@@ -1400,22 +1336,25 @@ static void ath9k_stop(struct ieee80211_hw *hw)
                        ath9k_btcoex_timer_pause(sc);
        }
 
+       spin_lock_bh(&sc->sc_pcu_lock);
+
        /* make sure h/w will not generate any interrupt
         * before setting the invalid flag. */
-       ath9k_hw_set_interrupts(ah, 0);
+       ath9k_hw_disable_interrupts(ah);
 
-       spin_lock_bh(&sc->rx.pcu_lock);
        if (!(sc->sc_flags & SC_OP_INVALID)) {
                ath_drain_all_txq(sc, false);
                ath_stoprecv(sc);
                ath9k_hw_phy_disable(ah);
        } else
                sc->rx.rxlink = NULL;
-       spin_unlock_bh(&sc->rx.pcu_lock);
 
        /* disable HAL and put h/w to sleep */
        ath9k_hw_disable(ah);
        ath9k_hw_configpcipowersave(ah, 1, 1);
+
+       spin_unlock_bh(&sc->sc_pcu_lock);
+
        ath9k_ps_restore(sc);
 
        /* Finally, put the chip in FULL SLEEP mode */
@@ -1822,12 +1761,15 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
        struct ath_wiphy *aphy = hw->priv;
        struct ath_softc *sc = aphy->sc;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_txq *txq;
        struct ath9k_tx_queue_info qi;
-       int ret = 0, qnum;
+       int ret = 0;
 
        if (queue >= WME_NUM_AC)
                return 0;
 
+       txq = sc->tx.txq_map[queue];
+
        mutex_lock(&sc->mutex);
 
        memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -1836,20 +1778,19 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
        qi.tqi_cwmin = params->cw_min;
        qi.tqi_cwmax = params->cw_max;
        qi.tqi_burstTime = params->txop;
-       qnum = ath_get_hal_qnum(queue, sc);
 
        ath_print(common, ATH_DBG_CONFIG,
                  "Configure tx [queue/halq] [%d/%d],  "
                  "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
-                 queue, qnum, params->aifs, params->cw_min,
+                 queue, txq->axq_qnum, params->aifs, params->cw_min,
                  params->cw_max, params->txop);
 
-       ret = ath_txq_update(sc, qnum, &qi);
+       ret = ath_txq_update(sc, txq->axq_qnum, &qi);
        if (ret)
                ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
 
        if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
-               if ((qnum == sc->tx.hwq_map[WME_AC_BE]) && !ret)
+               if (queue == WME_AC_BE && !ret)
                        ath_beaconq_config(sc);
 
        mutex_unlock(&sc->mutex);
@@ -2011,7 +1952,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
        if (changed & BSS_CHANGED_ASSOC) {
                ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
                        bss_conf->assoc);
-               ath9k_bss_assoc_info(sc, vif, bss_conf);
+               ath9k_bss_assoc_info(sc, hw, vif, bss_conf);
        }
 
        mutex_unlock(&sc->mutex);
index b5b651413e77104bdd4bffe3171354287ed0a828..6605bc2c2036ad83591780b811659de1add95c92 100644 (file)
@@ -247,34 +247,25 @@ static void ath_pci_remove(struct pci_dev *pdev)
 
 #ifdef CONFIG_PM
 
-static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int ath_pci_suspend(struct device *device)
 {
+       struct pci_dev *pdev = to_pci_dev(device);
        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
        struct ath_wiphy *aphy = hw->priv;
        struct ath_softc *sc = aphy->sc;
 
        ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
 
-       pci_save_state(pdev);
-       pci_disable_device(pdev);
-       pci_set_power_state(pdev, PCI_D3hot);
-
        return 0;
 }
 
-static int ath_pci_resume(struct pci_dev *pdev)
+static int ath_pci_resume(struct device *device)
 {
+       struct pci_dev *pdev = to_pci_dev(device);
        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
        struct ath_wiphy *aphy = hw->priv;
        struct ath_softc *sc = aphy->sc;
        u32 val;
-       int err;
-
-       pci_restore_state(pdev);
-
-       err = pci_enable_device(pdev);
-       if (err)
-               return err;
 
        /*
         * Suspend/Resume resets the PCI configuration space, so we have to
@@ -293,7 +284,23 @@ static int ath_pci_resume(struct pci_dev *pdev)
        return 0;
 }
 
-#endif /* CONFIG_PM */
+static const struct dev_pm_ops ath9k_pm_ops = {
+       .suspend = ath_pci_suspend,
+       .resume = ath_pci_resume,
+       .freeze = ath_pci_suspend,
+       .thaw = ath_pci_resume,
+       .poweroff = ath_pci_suspend,
+       .restore = ath_pci_resume,
+};
+
+#define ATH9K_PM_OPS   (&ath9k_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define ATH9K_PM_OPS   NULL
+
+#endif /* !CONFIG_PM */
+
 
 MODULE_DEVICE_TABLE(pci, ath_pci_id_table);
 
@@ -302,10 +309,7 @@ static struct pci_driver ath_pci_driver = {
        .id_table   = ath_pci_id_table,
        .probe      = ath_pci_probe,
        .remove     = ath_pci_remove,
-#ifdef CONFIG_PM
-       .suspend    = ath_pci_suspend,
-       .resume     = ath_pci_resume,
-#endif /* CONFIG_PM */
+       .driver.pm  = ATH9K_PM_OPS,
 };
 
 int ath_pci_init(void)
index 89978d71617fdaf89a70be70f28a77b199b18d98..85c8e9310cae3461fc12ad99d0b73d48db452768 100644 (file)
@@ -381,25 +381,6 @@ static const struct ath_rate_table ar5416_11g_ratetable = {
 static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
                                struct ieee80211_tx_rate *rate);
 
-static inline int8_t median(int8_t a, int8_t b, int8_t c)
-{
-       if (a >= b) {
-               if (b >= c)
-                       return b;
-               else if (a > c)
-                       return c;
-               else
-                       return a;
-       } else {
-               if (a >= c)
-                       return a;
-               else if (b >= c)
-                       return c;
-               else
-                       return b;
-       }
-}
-
 static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
                                   struct ath_rate_priv *ath_rc_priv)
 {
@@ -1444,12 +1425,12 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
                ath_rc_priv->neg_ht_rates.rs_nrates = j;
        }
 
-       is_cw40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+       is_cw40 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
 
        if (is_cw40)
-               is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+               is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
        else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
-               is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
+               is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
 
        /* Choose rate table first */
 
@@ -1468,10 +1449,8 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
        struct ath_rate_priv *ath_rc_priv = priv_sta;
        const struct ath_rate_table *rate_table = NULL;
        bool oper_cw40 = false, oper_sgi;
-       bool local_cw40 = (ath_rc_priv->ht_cap & WLAN_RC_40_FLAG) ?
-               true : false;
-       bool local_sgi = (ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG) ?
-               true : false;
+       bool local_cw40 = !!(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG);
+       bool local_sgi = !!(ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG);
 
        /* FIXME: Handle AP mode later when we support CWM */
 
index c76ea53c20ce7dbe41863c00930853a59ccc6cf8..c5c80764a94a4f1a6b8e67a9b78e16fcb1ff303d 100644 (file)
@@ -317,7 +317,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
        struct ath_buf *bf;
        int error = 0;
 
-       spin_lock_init(&sc->rx.pcu_lock);
+       spin_lock_init(&sc->sc_pcu_lock);
        sc->sc_flags &= ~SC_OP_RXFLUSH;
        spin_lock_init(&sc->rx.rxbuflock);
 
@@ -528,6 +528,8 @@ bool ath_stoprecv(struct ath_softc *sc)
                sc->rx.rxlink = NULL;
        spin_unlock_bh(&sc->rx.rxbuflock);
 
+       ATH_DBG_WARN(!stopped, "Could not stop RX, we could be "
+                    "confusing the DMA engine when we start RX up\n");
        return stopped;
 }
 
@@ -962,36 +964,23 @@ static void ath9k_process_rssi(struct ath_common *common,
                               struct ieee80211_hdr *hdr,
                               struct ath_rx_status *rx_stats)
 {
+       struct ath_wiphy *aphy = hw->priv;
        struct ath_hw *ah = common->ah;
-       struct ieee80211_sta *sta;
-       struct ath_node *an;
-       int last_rssi = ATH_RSSI_DUMMY_MARKER;
+       int last_rssi;
        __le16 fc;
 
+       if (ah->opmode != NL80211_IFTYPE_STATION)
+               return;
+
        fc = hdr->frame_control;
+       if (!ieee80211_is_beacon(fc) ||
+           compare_ether_addr(hdr->addr3, common->curbssid))
+               return;
 
-       rcu_read_lock();
-       /*
-        * XXX: use ieee80211_find_sta! This requires quite a bit of work
-        * under the current ath9k virtual wiphy implementation as we have
-        * no way of tying a vif to wiphy. Typically vifs are attached to
-        * at least one sdata of a wiphy on mac80211 but with ath9k virtual
-        * wiphy you'd have to iterate over every wiphy and each sdata.
-        */
-       if (is_multicast_ether_addr(hdr->addr1))
-               sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
-       else
-               sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, hdr->addr1);
-
-       if (sta) {
-               an = (struct ath_node *) sta->drv_priv;
-               if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
-                  !rx_stats->rs_moreaggr)
-                       ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi);
-               last_rssi = an->last_rssi;
-       }
-       rcu_read_unlock();
+       if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
+               ATH_RSSI_LPF(aphy->last_rssi, rx_stats->rs_rssi);
 
+       last_rssi = aphy->last_rssi;
        if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
                rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
                                              ATH_RSSI_EP_MULTIPLIER);
@@ -999,8 +988,7 @@ static void ath9k_process_rssi(struct ath_common *common,
                rx_stats->rs_rssi = 0;
 
        /* Update Beacon RSSI, this is used by ANI. */
-       if (ieee80211_is_beacon(fc))
-               ah->stats.avgbrssi = rx_stats->rs_rssi;
+       ah->stats.avgbrssi = rx_stats->rs_rssi;
 }
 
 /*
index ec7cf5ee56bc3ac9a850f7ee56fab95251b88d6d..4008f51d34c8cb0ad908ce9dd71be27332a098e3 100644 (file)
@@ -107,6 +107,7 @@ int ath9k_wiphy_add(struct ath_softc *sc)
        aphy->sc = sc;
        aphy->hw = hw;
        sc->sec_wiphy[i] = aphy;
+       aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
        spin_unlock_bh(&sc->wiphy_lock);
 
        memcpy(addr, common->macaddr, ETH_ALEN);
@@ -186,7 +187,7 @@ static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
        info->control.rates[1].idx = -1;
 
        memset(&txctl, 0, sizeof(struct ath_tx_control));
-       txctl.txq = &sc->tx.txq[sc->tx.hwq_map[WME_AC_VO]];
+       txctl.txq = sc->tx.txq_map[WME_AC_VO];
        txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
 
        if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
index f2ade2402ce27709e1da385941b9adbaece96b1e..6380bbd82d49b7153d2c2fc48b607c874c312f40 100644 (file)
@@ -124,7 +124,7 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
 
 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
-       struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
+       struct ath_txq *txq = tid->ac->txq;
 
        WARN_ON(!tid->paused);
 
@@ -142,7 +142,7 @@ unlock:
 
 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
-       struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
+       struct ath_txq *txq = tid->ac->txq;
        struct ath_buf *bf;
        struct list_head bf_head;
        struct ath_tx_status ts;
@@ -817,7 +817,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
 {
        struct ath_node *an = (struct ath_node *)sta->drv_priv;
        struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
-       struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
+       struct ath_txq *txq = txtid->ac->txq;
 
        if (txtid->state & AGGR_CLEANUP)
                return;
@@ -888,10 +888,16 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_tx_queue_info qi;
+       static const int subtype_txq_to_hwq[] = {
+               [WME_AC_BE] = ATH_TXQ_AC_BE,
+               [WME_AC_BK] = ATH_TXQ_AC_BK,
+               [WME_AC_VI] = ATH_TXQ_AC_VI,
+               [WME_AC_VO] = ATH_TXQ_AC_VO,
+       };
        int qnum, i;
 
        memset(&qi, 0, sizeof(qi));
-       qi.tqi_subtype = subtype;
+       qi.tqi_subtype = subtype_txq_to_hwq[subtype];
        qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
        qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
        qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
@@ -940,7 +946,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
        if (!ATH_TXQ_SETUP(sc, qnum)) {
                struct ath_txq *txq = &sc->tx.txq[qnum];
 
-               txq->axq_class = subtype;
                txq->axq_qnum = qnum;
                txq->axq_link = NULL;
                INIT_LIST_HEAD(&txq->axq_q);
@@ -1148,13 +1153,11 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
                ath_print(common, ATH_DBG_FATAL,
                          "Failed to stop TX DMA. Resetting hardware!\n");
 
-               spin_lock_bh(&sc->sc_resetlock);
                r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
                if (r)
                        ath_print(common, ATH_DBG_FATAL,
                                  "Unable to reset hardware; reset status %d\n",
                                  r);
-               spin_unlock_bh(&sc->sc_resetlock);
        }
 
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
@@ -1212,24 +1215,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
        }
 }
 
-int ath_tx_setup(struct ath_softc *sc, int haltype)
-{
-       struct ath_txq *txq;
-
-       if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
-               ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
-                         "HAL AC %u out of range, max %zu!\n",
-                        haltype, ARRAY_SIZE(sc->tx.hwq_map));
-               return 0;
-       }
-       txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
-       if (txq != NULL) {
-               sc->tx.hwq_map[haltype] = txq->axq_qnum;
-               return 1;
-       } else
-               return 0;
-}
-
 /***********/
 /* TX, DMA */
 /***********/
@@ -1710,6 +1695,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
                        goto tx_done;
                }
 
+               WARN_ON(tid->ac->txq != txctl->txq);
                if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
                        /*
                         * Try aggregation if it's a unicast data frame
@@ -1749,6 +1735,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                return -1;
        }
 
+       q = skb_get_queue_mapping(skb);
        r = ath_tx_setup_buffer(hw, bf, skb, txctl);
        if (unlikely(r)) {
                ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
@@ -1758,8 +1745,9 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                 * we will at least have to run TX completionon one buffer
                 * on the queue */
                spin_lock_bh(&txq->axq_lock);
-               if (!txq->stopped && txq->axq_depth > 1) {
-                       ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
+               if (txq == sc->tx.txq_map[q] && !txq->stopped &&
+                   txq->axq_depth > 1) {
+                       ath_mac80211_stop_queue(sc, q);
                        txq->stopped = 1;
                }
                spin_unlock_bh(&txq->axq_lock);
@@ -1769,13 +1757,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                return r;
        }
 
-       q = skb_get_queue_mapping(skb);
-       if (q >= 4)
-               q = 0;
-
        spin_lock_bh(&txq->axq_lock);
-       if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
-               ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
+       if (txq == sc->tx.txq_map[q] &&
+           ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
+               ath_mac80211_stop_queue(sc, q);
                txq->stopped = 1;
        }
        spin_unlock_bh(&txq->axq_lock);
@@ -1843,7 +1828,8 @@ exit:
 /*****************/
 
 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
-                           struct ath_wiphy *aphy, int tx_flags)
+                           struct ath_wiphy *aphy, int tx_flags,
+                           struct ath_txq *txq)
 {
        struct ieee80211_hw *hw = sc->hw;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1890,11 +1876,12 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
                ath9k_tx_status(hw, skb);
        else {
                q = skb_get_queue_mapping(skb);
-               if (q >= 4)
-                       q = 0;
-
-               if (--sc->tx.pending_frames[q] < 0)
-                       sc->tx.pending_frames[q] = 0;
+               if (txq == sc->tx.txq_map[q]) {
+                       spin_lock_bh(&txq->axq_lock);
+                       if (WARN_ON(--txq->pending_frames < 0))
+                               txq->pending_frames = 0;
+                       spin_unlock_bh(&txq->axq_lock);
+               }
 
                ieee80211_tx_status(hw, skb);
        }
@@ -1929,8 +1916,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
                else
                        complete(&sc->paprd_complete);
        } else {
-               ath_debug_stat_tx(sc, txq, bf, ts);
-               ath_tx_complete(sc, skb, bf->aphy, tx_flags);
+               ath_debug_stat_tx(sc, bf, ts);
+               ath_tx_complete(sc, skb, bf->aphy, tx_flags, txq);
        }
        /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
         * accidentally reference it later.
@@ -2020,16 +2007,13 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
        tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
 }
 
-static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
+static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
 {
-       int qnum;
-
-       qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
-       if (qnum == -1)
-               return;
+       struct ath_txq *txq;
 
+       txq = sc->tx.txq_map[qnum];
        spin_lock_bh(&txq->axq_lock);
-       if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
+       if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
                if (ath_mac80211_start_queue(sc, qnum))
                        txq->stopped = 0;
        }
@@ -2046,6 +2030,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
        struct ath_tx_status ts;
        int txok;
        int status;
+       int qnum;
 
        ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
                  txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
@@ -2121,12 +2106,15 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
                        ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
                }
 
+               qnum = skb_get_queue_mapping(bf->bf_mpdu);
+
                if (bf_isampdu(bf))
                        ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
                else
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
 
-               ath_wake_mac80211_queue(sc, txq);
+               if (txq == sc->tx.txq_map[qnum])
+                       ath_wake_mac80211_queue(sc, qnum);
 
                spin_lock_bh(&txq->axq_lock);
                if (sc->sc_flags & SC_OP_TXAGGR)
@@ -2196,6 +2184,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
        struct list_head bf_head;
        int status;
        int txok;
+       int qnum;
 
        for (;;) {
                status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
@@ -2239,13 +2228,16 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
                        ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
                }
 
+               qnum = skb_get_queue_mapping(bf->bf_mpdu);
+
                if (bf_isampdu(bf))
                        ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
                else
                        ath_tx_complete_buf(sc, bf, txq, &bf_head,
                                            &txs, txok, 0);
 
-               ath_wake_mac80211_queue(sc, txq);
+               if (txq == sc->tx.txq_map[qnum])
+                       ath_wake_mac80211_queue(sc, qnum);
 
                spin_lock_bh(&txq->axq_lock);
                if (!list_empty(&txq->txq_fifo_pending)) {
@@ -2377,7 +2369,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
        for (acno = 0, ac = &an->ac[acno];
             acno < WME_NUM_AC; acno++, ac++) {
                ac->sched    = false;
-               ac->qnum = sc->tx.hwq_map[acno];
+               ac->txq = sc->tx.txq_map[acno];
                INIT_LIST_HEAD(&ac->tid_q);
        }
 }
@@ -2387,17 +2379,13 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
        struct ath_atx_ac *ac;
        struct ath_atx_tid *tid;
        struct ath_txq *txq;
-       int i, tidno;
+       int tidno;
 
        for (tidno = 0, tid = &an->tid[tidno];
             tidno < WME_NUM_TID; tidno++, tid++) {
-               i = tid->ac->qnum;
-
-               if (!ATH_TXQ_SETUP(sc, i))
-                       continue;
 
-               txq = &sc->tx.txq[i];
                ac = tid->ac;
+               txq = ac->txq;
 
                spin_lock_bh(&txq->axq_lock);
 
index 6cf0c9ef47aa4e7ae005abc034edca2fb40d30e6..d07ff7f2fd92d653103538b86353c6c986f9aca9 100644 (file)
@@ -48,7 +48,7 @@
 #include <linux/usb.h>
 #ifdef CONFIG_CARL9170_LEDS
 #include <linux/leds.h>
-#endif /* CONFIG_CARL170_LEDS */
+#endif /* CONFIG_CARL9170_LEDS */
 #ifdef CONFIG_CARL9170_WPC
 #include <linux/input.h>
 #endif /* CONFIG_CARL9170_WPC */
@@ -215,7 +215,7 @@ enum carl9170_restart_reasons {
        CARL9170_RR_TOO_MANY_FIRMWARE_ERRORS,
        CARL9170_RR_WATCHDOG,
        CARL9170_RR_STUCK_TX,
-       CARL9170_RR_SLOW_SYSTEM,
+       CARL9170_RR_UNRESPONSIVE_DEVICE,
        CARL9170_RR_COMMAND_TIMEOUT,
        CARL9170_RR_TOO_MANY_PHY_ERRORS,
        CARL9170_RR_LOST_RSP,
@@ -287,6 +287,7 @@ struct ar9170 {
 
        /* reset / stuck frames/queue detection */
        struct work_struct restart_work;
+       struct work_struct ping_work;
        unsigned int restart_counter;
        unsigned long queue_stop_timeout[__AR9170_NUM_TXQ];
        unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ];
index d552166db5059149cd8e653fb2e1154dfba85173..3680dfc70f4659179977620a753dcfac05e987ce 100644 (file)
@@ -97,13 +97,13 @@ struct carl9170_set_key_cmd {
        __le16          type;
        u8              macAddr[6];
        u32             key[4];
-} __packed;
+} __packed __aligned(4);
 #define CARL9170_SET_KEY_CMD_SIZE              28
 
 struct carl9170_disable_key_cmd {
        __le16          user;
        __le16          padding;
-} __packed;
+} __packed __aligned(4);
 #define CARL9170_DISABLE_KEY_CMD_SIZE          4
 
 struct carl9170_u32_list {
@@ -206,7 +206,7 @@ struct carl9170_cmd {
                struct carl9170_rx_filter_cmd   rx_filter;
                u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
        } __packed;
-} __packed;
+} __packed __aligned(4);
 
 #define        CARL9170_TX_STATUS_QUEUE        3
 #define        CARL9170_TX_STATUS_QUEUE_S      0
@@ -216,6 +216,7 @@ struct carl9170_cmd {
 #define        CARL9170_TX_STATUS_TRIES        (7 << CARL9170_TX_STATUS_TRIES_S)
 #define        CARL9170_TX_STATUS_SUCCESS      0x80
 
+#ifdef __CARL9170FW__
 /*
  * NOTE:
  * Both structs [carl9170_tx_status and _carl9170_tx_status]
@@ -232,6 +233,8 @@ struct carl9170_tx_status {
        u8 tries:3;
        u8 success:1;
 } __packed;
+#endif /* __CARL9170FW__ */
+
 struct _carl9170_tx_status {
        /*
         * This version should be immune to all alignment bugs.
@@ -272,13 +275,15 @@ struct carl9170_rsp {
                struct carl9170_rf_init_result  rf_init_res;
                struct carl9170_u32_list        rreg_res;
                struct carl9170_u32_list        echo;
+#ifdef __CARL9170FW__
                struct carl9170_tx_status       tx_status[0];
+#endif /* __CARL9170FW__ */
                struct _carl9170_tx_status      _tx_status[0];
                struct carl9170_gpio            gpio;
                struct carl9170_tsf_rsp         tsf;
                struct carl9170_psm             psm;
                u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
        } __packed;
-} __packed;
+} __packed __aligned(4);
 
 #endif /* __CARL9170_SHARED_FWCMD_H */
index 2f471b3f05afdec737ab6e1d293ac40bb7c0db5c..e85df6edfed32ed6d58888b7fb18476aab9d40cd 100644 (file)
@@ -712,7 +712,8 @@ struct ar9170_stream {
        __le16 tag;
 
        u8 payload[0];
-};
+} __packed __aligned(4);
+#define AR9170_STREAM_LEN                              4
 
 #define AR9170_MAX_ACKTABLE_ENTRIES                    8
 #define AR9170_MAX_VIRTUAL_MAC                         7
@@ -736,4 +737,8 @@ struct ar9170_stream {
 
 #define MOD_VAL(reg, value, newvalue)                                  \
        (((value) & ~reg) | (((newvalue) << reg##_S) & reg))
+
+#define GET_VAL(reg, value)                                            \
+       (((value) & reg) >> reg##_S)
+
 #endif /* __CARL9170_SHARED_HW_H */
index 2305bc27151c0a327def9ab3d8d9e196af98cd34..385cf508479b447b903f799acdfd0be111241a5b 100644 (file)
@@ -205,8 +205,8 @@ int carl9170_init_mac(struct ar9170 *ar)
        carl9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105);
 
        /* Aggregation MAX number and timeout */
-       carl9170_regwrite(AR9170_MAC_REG_AMPDU_FACTOR, 0xa);
-       carl9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, 0x140a00);
+       carl9170_regwrite(AR9170_MAC_REG_AMPDU_FACTOR, 0x8000a);
+       carl9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, 0x140a07);
 
        carl9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
                          AR9170_MAC_FTF_DEFAULTS);
@@ -457,8 +457,9 @@ int carl9170_set_beacon_timers(struct ar9170 *ar)
 
 int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
 {
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
        struct carl9170_vif_info *cvif;
+       struct ieee80211_tx_info *txinfo;
        __le32 *data, *old = NULL;
        u32 word, off, addr, len;
        int i = 0, err = 0;
@@ -487,7 +488,13 @@ found:
 
        if (!skb) {
                err = -ENOMEM;
-               goto out_unlock;
+               goto err_free;
+       }
+
+       txinfo = IEEE80211_SKB_CB(skb);
+       if (txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS) {
+               err = -EINVAL;
+               goto err_free;
        }
 
        spin_lock_bh(&ar->beacon_lock);
@@ -504,11 +511,8 @@ found:
                        wiphy_err(ar->hw->wiphy, "beacon does not "
                                  "fit into device memory!\n");
                }
-
-               spin_unlock_bh(&ar->beacon_lock);
-               dev_kfree_skb_any(skb);
                err = -EINVAL;
-               goto out_unlock;
+               goto err_unlock;
        }
 
        if (len > AR9170_MAC_BCN_LENGTH_MAX) {
@@ -518,22 +522,22 @@ found:
                                 AR9170_MAC_BCN_LENGTH_MAX, len);
                }
 
-               spin_unlock_bh(&ar->beacon_lock);
-               dev_kfree_skb_any(skb);
                err = -EMSGSIZE;
-               goto out_unlock;
+               goto err_unlock;
        }
 
-       carl9170_async_regwrite_begin(ar);
+       i = txinfo->control.rates[0].idx;
+       if (txinfo->band != IEEE80211_BAND_2GHZ)
+               i += 4;
 
-       /* XXX: use skb->cb info */
-       if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
-               carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP,
-                               ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400);
-       } else {
-               carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP,
-                               ((skb->len + FCS_LEN) << 16) + 0x001b);
-       }
+       word = __carl9170_ratetable[i].hw_value & 0xf;
+       if (i < 4)
+               word |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
+       else
+               word |= ((skb->len + FCS_LEN) << 16) + 0x0010;
+
+       carl9170_async_regwrite_begin(ar);
+       carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, word);
 
        for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
                /*
@@ -557,7 +561,7 @@ found:
                cvif->beacon = skb;
        spin_unlock_bh(&ar->beacon_lock);
        if (err)
-               goto out_unlock;
+               goto err_free;
 
        if (submit) {
                err = carl9170_bcn_ctrl(ar, cvif->id,
@@ -565,10 +569,18 @@ found:
                                        addr, skb->len + FCS_LEN);
 
                if (err)
-                       goto out_unlock;
+                       goto err_free;
        }
 out_unlock:
        rcu_read_unlock();
+       return 0;
+
+err_unlock:
+       spin_unlock_bh(&ar->beacon_lock);
+
+err_free:
+       rcu_read_unlock();
+       dev_kfree_skb_any(skb);
        return err;
 }
 
index 980ae70ea424f6dc50a93734446536588bea9734..4ae6a584907618d4b45b0273615c606c33600267 100644 (file)
@@ -428,6 +428,7 @@ static void carl9170_cancel_worker(struct ar9170 *ar)
        cancel_delayed_work_sync(&ar->led_work);
 #endif /* CONFIG_CARL9170_LEDS */
        cancel_work_sync(&ar->ps_work);
+       cancel_work_sync(&ar->ping_work);
        cancel_work_sync(&ar->ampdu_work);
 }
 
@@ -533,6 +534,21 @@ void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
         */
 }
 
+static void carl9170_ping_work(struct work_struct *work)
+{
+       struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
+       int err;
+
+       if (!IS_STARTED(ar))
+               return;
+
+       mutex_lock(&ar->mutex);
+       err = carl9170_echo_test(ar, 0xdeadbeef);
+       if (err)
+               carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
+       mutex_unlock(&ar->mutex);
+}
+
 static int carl9170_init_interface(struct ar9170 *ar,
                                   struct ieee80211_vif *vif)
 {
@@ -1614,6 +1630,7 @@ void *carl9170_alloc(size_t priv_size)
                skb_queue_head_init(&ar->tx_pending[i]);
        }
        INIT_WORK(&ar->ps_work, carl9170_ps_work);
+       INIT_WORK(&ar->ping_work, carl9170_ping_work);
        INIT_WORK(&ar->restart_work, carl9170_restart_work);
        INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
        INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
@@ -1828,7 +1845,7 @@ int carl9170_register(struct ar9170 *ar)
        err = carl9170_led_register(ar);
        if (err)
                goto err_unreg;
-#endif /* CONFIG_CAR9L170_LEDS */
+#endif /* CONFIG_CARL9170_LEDS */
 
 #ifdef CONFIG_CARL9170_WPC
        err = carl9170_register_wps_button(ar);
index 89deca37a9888963d4e9cca97b5173c84a528946..82bc81c4c9304253805ce43ad9c935121962dc0c 100644 (file)
@@ -1554,15 +1554,6 @@ static int carl9170_set_power_cal(struct ar9170 *ar, u32 freq,
        return carl9170_regwrite_result();
 }
 
-/* TODO: replace this with sign_extend32(noise, 8) */
-static int carl9170_calc_noise_dbm(u32 raw_noise)
-{
-       if (raw_noise & 0x100)
-               return ~0x1ff | raw_noise;
-       else
-               return raw_noise;
-}
-
 int carl9170_get_noisefloor(struct ar9170 *ar)
 {
        static const u32 phy_regs[] = {
@@ -1578,11 +1569,11 @@ int carl9170_get_noisefloor(struct ar9170 *ar)
                return err;
 
        for (i = 0; i < 2; i++) {
-               ar->noise[i] = carl9170_calc_noise_dbm(
-                       (phy_res[i] >> 19) & 0x1ff);
+               ar->noise[i] = sign_extend32(GET_VAL(
+                       AR9170_PHY_CCA_MIN_PWR, phy_res[i]), 8);
 
-               ar->noise[i + 2] = carl9170_calc_noise_dbm(
-                       (phy_res[i + 2] >> 23) & 0x1ff);
+               ar->noise[i + 2] = sign_extend32(GET_VAL(
+                       AR9170_PHY_EXT_CCA_MIN_PWR, phy_res[i + 2]), 8);
        }
 
        return 0;
index 02c34eb4ebdec5fe64eb0730e24e89393c8f800f..024fb42bc7877bc61eb4d7ce8e0505c6af5f5854 100644 (file)
 #define                AR9170_PHY_AGC_CONTROL_NO_UPDATE_NF     0x00020000
 
 #define        AR9170_PHY_REG_CCA                      (AR9170_PHY_REG_BASE + 0x0064)
-#define                AR9170_PHY_CCA_MINCCA_PWR               0x0ff80000
-#define                AR9170_PHY_CCA_MINCCA_PWR_S             19
+#define                AR9170_PHY_CCA_MIN_PWR                  0x0ff80000
+#define                AR9170_PHY_CCA_MIN_PWR_S                19
 #define                AR9170_PHY_CCA_THRESH62                 0x0007f000
 #define                AR9170_PHY_CCA_THRESH62_S               12
 
 #define                AR9170_PHY_EXT_CCA_CYCPWR_THR1_S        9
 #define                AR9170_PHY_EXT_CCA_THRESH62             0x007f0000
 #define                AR9170_PHY_EXT_CCA_THRESH62_S           16
-#define                AR9170_PHY_EXT_MINCCA_PWR               0xff800000
-#define                AR9170_PHY_EXT_MINCCA_PWR_S             23
+#define                AR9170_PHY_EXT_CCA_MIN_PWR              0xff800000
+#define                AR9170_PHY_EXT_CCA_MIN_PWR_S            23
 
 #define        AR9170_PHY_REG_SFCORR_EXT               (AR9170_PHY_REG_BASE + 0x01c0)
 #define                AR9170_PHY_SFCORR_EXT_M1_THRESH         0x0000007f
 #define                AR9170_PHY_FORCE_XPA_CFG_S              0
 
 #define        AR9170_PHY_REG_CH1_CCA                  (AR9170_PHY_REG_BASE + 0x1064)
-#define                AR9170_PHY_CH1_MINCCA_PWR               0x0ff80000
-#define                AR9170_PHY_CH1_MINCCA_PWR_S             19
+#define                AR9170_PHY_CH1_CCA_MIN_PWR              0x0ff80000
+#define                AR9170_PHY_CH1_CCA_MIN_PWR_S            19
 
 #define        AR9170_PHY_REG_CH2_CCA                  (AR9170_PHY_REG_BASE + 0x2064)
-#define                AR9170_PHY_CH2_MINCCA_PWR               0x0ff80000
-#define                AR9170_PHY_CH2_MINCCA_PWR_S             19
+#define                AR9170_PHY_CH2_CCA_MIN_PWR              0x0ff80000
+#define                AR9170_PHY_CH2_CCA_MIN_PWR_S            19
 
 #define        AR9170_PHY_REG_CH1_EXT_CCA              (AR9170_PHY_REG_BASE + 0x11bc)
-#define                AR9170_PHY_CH1_EXT_MINCCA_PWR           0xff800000
-#define                AR9170_PHY_CH1_EXT_MINCCA_PWR_S         23
+#define                AR9170_PHY_CH1_EXT_CCA_MIN_PWR          0xff800000
+#define                AR9170_PHY_CH1_EXT_CCA_MIN_PWR_S        23
 
 #define        AR9170_PHY_REG_CH2_EXT_CCA              (AR9170_PHY_REG_BASE + 0x21bc)
-#define                AR9170_PHY_CH2_EXT_MINCCA_PWR           0xff800000
-#define                AR9170_PHY_CH2_EXT_MINCCA_PWR_S         23
+#define                AR9170_PHY_CH2_EXT_CCA_MIN_PWR          0xff800000
+#define                AR9170_PHY_CH2_EXT_CCA_MIN_PWR_S        23
 
 #endif /* __CARL9170_SHARED_PHY_H */
index b575c865142d64f82f8e072518c0de44e46039cc..688eede48516a56c19dd72687103feee95e8746b 100644 (file)
@@ -242,9 +242,11 @@ static void carl9170_tx_release(struct kref *ref)
                        ar->tx_ampdu_schedule = true;
 
                if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) {
-                       txinfo->status.ampdu_len = txinfo->pad[0];
-                       txinfo->status.ampdu_ack_len = txinfo->pad[1];
-                       txinfo->pad[0] = txinfo->pad[1] = 0;
+                       struct _carl9170_tx_superframe *super;
+
+                       super = (void *)skb->data;
+                       txinfo->status.ampdu_len = super->s.rix;
+                       txinfo->status.ampdu_ack_len = super->s.cnt;
                } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) {
                        /*
                         * drop redundant tx_status reports:
@@ -337,7 +339,8 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
        u8 tid;
 
        if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
-           txinfo->flags & IEEE80211_TX_CTL_INJECTED)
+           txinfo->flags & IEEE80211_TX_CTL_INJECTED ||
+          (!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR))))
                return;
 
        tx_info = IEEE80211_SKB_CB(skb);
@@ -389,8 +392,8 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
                sta_info->stats[tid].ampdu_ack_len++;
 
        if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
-               txinfo->pad[0] = sta_info->stats[tid].ampdu_len;
-               txinfo->pad[1] = sta_info->stats[tid].ampdu_ack_len;
+               super->s.rix = sta_info->stats[tid].ampdu_len;
+               super->s.cnt = sta_info->stats[tid].ampdu_ack_len;
                txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
                sta_info->stats[tid].clear = true;
        }
@@ -524,6 +527,59 @@ next:
        }
 }
 
+static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
+{
+       struct carl9170_sta_tid *iter;
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *txinfo;
+       struct carl9170_tx_info *arinfo;
+       struct _carl9170_tx_superframe *super;
+       struct ieee80211_sta *sta;
+       struct ieee80211_vif *vif;
+       struct ieee80211_hdr *hdr;
+       unsigned int vif_id;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
+               if (iter->state < CARL9170_TID_STATE_IDLE)
+                       continue;
+
+               spin_lock_bh(&iter->lock);
+               skb = skb_peek(&iter->queue);
+               if (!skb)
+                       goto unlock;
+
+               txinfo = IEEE80211_SKB_CB(skb);
+               arinfo = (void *)txinfo->rate_driver_data;
+               if (time_is_after_jiffies(arinfo->timeout +
+                   msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
+                       goto unlock;
+
+               super = (void *) skb->data;
+               hdr = (void *) super->frame_data;
+
+               vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
+                        CARL9170_TX_SUPER_MISC_VIF_ID_S;
+
+               if (WARN_ON(vif_id >= AR9170_MAX_VIRTUAL_MAC))
+                       goto unlock;
+
+               vif = rcu_dereference(ar->vif_priv[vif_id].vif);
+               if (WARN_ON(!vif))
+                       goto unlock;
+
+               sta = ieee80211_find_sta(vif, hdr->addr1);
+               if (WARN_ON(!sta))
+                       goto unlock;
+
+               ieee80211_stop_tx_ba_session(sta, iter->tid);
+unlock:
+               spin_unlock_bh(&iter->lock);
+
+       }
+       rcu_read_unlock();
+}
+
 void carl9170_tx_janitor(struct work_struct *work)
 {
        struct ar9170 *ar = container_of(work, struct ar9170,
@@ -534,6 +590,7 @@ void carl9170_tx_janitor(struct work_struct *work)
        ar->tx_janitor_last_run = jiffies;
 
        carl9170_check_queue_stop_timeout(ar);
+       carl9170_tx_ampdu_timeout(ar);
 
        if (!atomic_read(&ar->tx_total_queued))
                return;
@@ -842,10 +899,8 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
                if (unlikely(!sta || !cvif))
                        goto err_out;
 
-               factor = min_t(unsigned int, 1u,
-                        info->control.sta->ht_cap.ampdu_factor);
-
-               density = info->control.sta->ht_cap.ampdu_density;
+               factor = min_t(unsigned int, 1u, sta->ht_cap.ampdu_factor);
+               density = sta->ht_cap.ampdu_density;
 
                if (density) {
                        /*
@@ -1206,6 +1261,7 @@ static void carl9170_tx(struct ar9170 *ar)
 static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
        struct ieee80211_sta *sta, struct sk_buff *skb)
 {
+       struct _carl9170_tx_superframe *super = (void *) super;
        struct carl9170_sta_info *sta_info;
        struct carl9170_sta_tid *agg;
        struct sk_buff *iter;
@@ -1274,6 +1330,7 @@ err_unlock:
 
 err_unlock_rcu:
        rcu_read_unlock();
+       super->f.mac_control &= ~cpu_to_le16(AR9170_TX_MAC_AGGR);
        carl9170_tx_status(ar, skb, false);
        ar->tx_dropped++;
        return false;
@@ -1302,9 +1359,6 @@ int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
         */
 
        if (info->flags & IEEE80211_TX_CTL_AMPDU) {
-               if (WARN_ON_ONCE(!sta))
-                       goto err_free;
-
                run = carl9170_tx_ampdu_queue(ar, sta, skb);
                if (run)
                        carl9170_tx_ampdu(ar);
index 7504ed14c72575a7befb91b0c0d9dd2684660af9..a268053e18e5e1e2f430deb19d2edc1ab158fd1d 100644 (file)
@@ -433,7 +433,7 @@ static void carl9170_usb_rx_complete(struct urb *urb)
                         * device.
                         */
 
-                       carl9170_restart(ar, CARL9170_RR_SLOW_SYSTEM);
+                       ieee80211_queue_work(ar->hw, &ar->ping_work);
                }
        } else {
                /*
index ff53f078a0b5eaff1080bd7e27840940ae1cc0d1..ee0f84f2a2f6e791b64ac627e1934597d19b2d53 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __CARL9170_SHARED_VERSION_H
 #define __CARL9170_SHARED_VERSION_H
 #define CARL9170FW_VERSION_YEAR 10
-#define CARL9170FW_VERSION_MONTH 9
-#define CARL9170FW_VERSION_DAY 28
-#define CARL9170FW_VERSION_GIT "1.8.8.3"
+#define CARL9170FW_VERSION_MONTH 10
+#define CARL9170FW_VERSION_DAY 29
+#define CARL9170FW_VERSION_GIT "1.9.0"
 #endif /* __CARL9170_SHARED_VERSION_H */
index 64e4af2c2887338b597412421f717099ff5bc078..f207007ee391d31baaf040a35d1623cd3ec8a22e 100644 (file)
@@ -70,11 +70,13 @@ enum ATH_DEBUG {
 #ifdef CONFIG_ATH_DEBUG
 void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
        __attribute__ ((format (printf, 3, 4)));
+#define ATH_DBG_WARN(foo, arg...) WARN(foo, arg)
 #else
 static inline void __attribute__ ((format (printf, 3, 4)))
 ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
 {
 }
+#define ATH_DBG_WARN(foo, arg)
 #endif /* CONFIG_ATH_DEBUG */
 
 /** Returns string describing opmode, or NULL if unknown mode. */
index bd21a4d82085b5131cb89497bff4d939275c3748..62e3dac8f92a688e38f53d110a0a1478afe33899 100644 (file)
@@ -67,7 +67,8 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
 }
 EXPORT_SYMBOL(ath_hw_keyreset);
 
-bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
+static bool ath_hw_keysetmac(struct ath_common *common,
+                            u16 entry, const u8 *mac)
 {
        u32 macHi, macLo;
        u32 unicast_flag = AR_KEYTABLE_VALID;
@@ -107,9 +108,9 @@ bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
        return true;
 }
 
-bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
-                                const struct ath_keyval *k,
-                                const u8 *mac)
+static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
+                                     const struct ath_keyval *k,
+                                     const u8 *mac)
 {
        void *ah = common->ah;
        u32 key0, key1, key2, key3, key4;
index 72821c456b02fcf480525846ffce55e185486c73..9aad2ca3c112461fa886d1c339fb67d79c521df8 100644 (file)
 #define B43_BFH_FEM_BT                 0x0040  /* has FEM and switch to share antenna
                                                 * with bluetooth */
 
+/* SPROM boardflags2_lo values */
+#define B43_BFL2_RXBB_INT_REG_DIS      0x0001  /* external RX BB regulator present */
+#define B43_BFL2_APLL_WAR              0x0002  /* alternative A-band PLL settings implemented */
+#define B43_BFL2_TXPWRCTRL_EN          0x0004  /* permits enabling TX Power Control */
+#define B43_BFL2_2X4_DIV               0x0008  /* 2x4 diversity switch */
+#define B43_BFL2_5G_PWRGAIN            0x0010  /* supports 5G band power gain */
+#define B43_BFL2_PCIEWAR_OVR           0x0020  /* overrides ASPM and Clkreq settings */
+#define B43_BFL2_CAESERS_BRD           0x0040  /* is Caesers board (unused) */
+#define B43_BFL2_BTC3WIRE              0x0080  /* used 3-wire bluetooth coexist */
+#define B43_BFL2_SKWRKFEM_BRD          0x0100  /* 4321mcm93 uses Skyworks FEM */
+#define B43_BFL2_SPUR_WAR              0x0200  /* has a workaround for clock-harmonic spurs */
+#define B43_BFL2_GPLL_WAR              0x0400  /* altenative G-band PLL settings implemented */
+
 /* GPIO register offset, in both ChipCommon and PCI core. */
 #define B43_GPIO_CONTROL               0x6c
 
index 10d0aaf754c5ab22bca4709e0fcc6813ac71fc0a..3d5566e7af0ad51ea08088c8adaa03f1c2a1e02b 100644 (file)
@@ -415,11 +415,6 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
 
 static void free_ringmemory(struct b43_dmaring *ring)
 {
-       gfp_t flags = GFP_KERNEL;
-
-       if (ring->type == B43_DMA_64BIT)
-               flags |= GFP_DMA;
-
        dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
                          ring->descbase, ring->dmabase);
 }
index e0f2d122e124486b7e1405eabd48ce80cdde54bb..6facb8ab05d1a475db48dcbd8c688ab64e59c0f2 100644 (file)
@@ -191,7 +191,8 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
                                binfo->type != 0x46D ||
                                binfo->rev < 0x41);
        else
-               workaround = ((sprom->boardflags_hi & B43_BFH_NOPA) == 0);
+               workaround =
+                       !(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS);
 
        b43_radio_mask(dev, B2055_MASTER1, 0xFFF3);
        if (workaround) {
@@ -240,10 +241,13 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
 static void b43_radio_init2055(struct b43_wldev *dev)
 {
        b43_radio_init2055_pre(dev);
-       if (b43_status(dev) < B43_STAT_INITIALIZED)
-               b2055_upload_inittab(dev, 0, 1);
-       else
-               b2055_upload_inittab(dev, 0/*FIXME on 5ghz band*/, 0);
+       if (b43_status(dev) < B43_STAT_INITIALIZED) {
+               /* Follow wl, not specs. Do not force uploading all regs */
+               b2055_upload_inittab(dev, 0, 0);
+       } else {
+               bool ghz5 = b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ;
+               b2055_upload_inittab(dev, ghz5, 0);
+       }
        b43_radio_init2055_post(dev);
 }
 
index 1b5316586cbffd6134909b919c1da3db28d8ebfd..0d6771515bce6dea10734dbe695fcbb09411ca91 100644 (file)
@@ -244,7 +244,7 @@ static const struct b2055_inittab_entry b2055_inittab [] = {
   [0xCB]                       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
   [0xCC]                       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
   [B2055_C1_LNA_GAINBST]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
-  [0xCE]                       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+  [0xCE]                       = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
   [0xCF]                       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
   [0xD0]                       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
   [0xD1]                       = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
@@ -256,7 +256,7 @@ static const struct b2055_inittab_entry b2055_inittab [] = {
   [0xD7]                       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
   [0xD8]                       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
   [B2055_C2_LNA_GAINBST]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
-  [0xDA]                       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+  [0xDA]                       = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
   [0xDB]                       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
   [0xDC]                       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
   [0xDD]                       = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
@@ -1299,7 +1299,7 @@ void b2055_upload_inittab(struct b43_wldev *dev,
                          bool ghz5, bool ignore_uploadflag)
 {
        const struct b2055_inittab_entry *e;
-       unsigned int i;
+       unsigned int i, writes = 0;
        u16 value;
 
        for (i = 0; i < ARRAY_SIZE(b2055_inittab); i++) {
@@ -1312,6 +1312,8 @@ void b2055_upload_inittab(struct b43_wldev *dev,
                        else
                                value = e->ghz2;
                        b43_radio_write16(dev, i, value);
+                       if (++writes % 4 == 0)
+                               b43_read32(dev, B43_MMIO_MACCTL); /* flush */
                }
        }
 }
index d8563192ce56cc538086e3171fd68fed6472c24f..f710c01f2cc4b9d46fe3c763eaf31a2b92eff636 100644 (file)
 #include "radio_2056.h"
 #include "phy_common.h"
 
+#define RADIOREGS3(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
+                  r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
+                  r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, \
+                  r30, r31, r32, r33, r34, r35, r36) \
+       .radio_syn_pll_vcocal1          = r00,  \
+       .radio_syn_pll_vcocal2          = r01,  \
+       .radio_syn_pll_refdiv           = r02,  \
+       .radio_syn_pll_mmd2             = r03,  \
+       .radio_syn_pll_mmd1             = r04,  \
+       .radio_syn_pll_loopfilter1      = r05,  \
+       .radio_syn_pll_loopfilter2      = r06,  \
+       .radio_syn_pll_loopfilter3      = r07,  \
+       .radio_syn_pll_loopfilter4      = r08,  \
+       .radio_syn_pll_loopfilter5      = r09,  \
+       .radio_syn_reserved_addr27      = r10,  \
+       .radio_syn_reserved_addr28      = r11,  \
+       .radio_syn_reserved_addr29      = r12,  \
+       .radio_syn_logen_vcobuf1        = r13,  \
+       .radio_syn_logen_mixer2         = r14,  \
+       .radio_syn_logen_buf3           = r15,  \
+       .radio_syn_logen_buf4           = r16,  \
+       .radio_rx0_lnaa_tune            = r17,  \
+       .radio_rx0_lnag_tune            = r18,  \
+       .radio_tx0_intpaa_boost_tune    = r19,  \
+       .radio_tx0_intpag_boost_tune    = r20,  \
+       .radio_tx0_pada_boost_tune      = r21,  \
+       .radio_tx0_padg_boost_tune      = r22,  \
+       .radio_tx0_pgaa_boost_tune      = r23,  \
+       .radio_tx0_pgag_boost_tune      = r24,  \
+       .radio_tx0_mixa_boost_tune      = r25,  \
+       .radio_tx0_mixg_boost_tune      = r26,  \
+       .radio_rx1_lnaa_tune            = r27,  \
+       .radio_rx1_lnag_tune            = r28,  \
+       .radio_tx1_intpaa_boost_tune    = r29,  \
+       .radio_tx1_intpag_boost_tune    = r30,  \
+       .radio_tx1_pada_boost_tune      = r31,  \
+       .radio_tx1_padg_boost_tune      = r32,  \
+       .radio_tx1_pgaa_boost_tune      = r33,  \
+       .radio_tx1_pgag_boost_tune      = r34,  \
+       .radio_tx1_mixa_boost_tune      = r35,  \
+       .radio_tx1_mixg_boost_tune      = r36
+
+#define PHYREGS(r0, r1, r2, r3, r4, r5)        \
+       .phy_regs.phy_bw1a      = r0,   \
+       .phy_regs.phy_bw2       = r1,   \
+       .phy_regs.phy_bw3       = r2,   \
+       .phy_regs.phy_bw4       = r3,   \
+       .phy_regs.phy_bw5       = r4,   \
+       .phy_regs.phy_bw6       = r5
+
 static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = {
 };
 
+/* TODO: add support for rev4+ devices by searching in rev4+ tables */
 const struct b43_nphy_channeltab_entry_rev3 *
 b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
 {
index fda6dafecb8cb683faef59118a38661e04e340c9..302600c0afa49e7206886a5e0bd021df1439dfd4 100644 (file)
@@ -4,6 +4,9 @@
 
   Copyright (c) 2010 RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>
 
+  Some parts of the code in this file are derived from the brcm80211
+  driver  Copyright (c) 2010 Broadcom Corporation
+
   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2 of the License, or
 
 #include "tables_nphy.h"
 
+#define B2056_SYN                      (0x0 << 12)
+#define B2056_TX0                      (0x2 << 12)
+#define B2056_TX1                      (0x3 << 12)
+#define B2056_RX0                      (0x6 << 12)
+#define B2056_RX1                      (0x7 << 12)
+#define B2056_ALLTX                    (0xE << 12)
+#define B2056_ALLRX                    (0xF << 12)
+
+#define B2056_SYN_RESERVED_ADDR0       0x00
+#define B2056_SYN_IDCODE               0x01
+#define B2056_SYN_RESERVED_ADDR2       0x02
+#define B2056_SYN_RESERVED_ADDR3       0x03
+#define B2056_SYN_RESERVED_ADDR4       0x04
+#define B2056_SYN_RESERVED_ADDR5       0x05
+#define B2056_SYN_RESERVED_ADDR6       0x06
+#define B2056_SYN_RESERVED_ADDR7       0x07
+#define B2056_SYN_COM_CTRL             0x08
+#define B2056_SYN_COM_PU               0x09
+#define B2056_SYN_COM_OVR              0x0A
+#define B2056_SYN_COM_RESET            0x0B
+#define B2056_SYN_COM_RCAL             0x0C
+#define B2056_SYN_COM_RC_RXLPF         0x0D
+#define B2056_SYN_COM_RC_TXLPF         0x0E
+#define B2056_SYN_COM_RC_RXHPF         0x0F
+#define B2056_SYN_RESERVED_ADDR16      0x10
+#define B2056_SYN_RESERVED_ADDR17      0x11
+#define B2056_SYN_RESERVED_ADDR18      0x12
+#define B2056_SYN_RESERVED_ADDR19      0x13
+#define B2056_SYN_RESERVED_ADDR20      0x14
+#define B2056_SYN_RESERVED_ADDR21      0x15
+#define B2056_SYN_RESERVED_ADDR22      0x16
+#define B2056_SYN_RESERVED_ADDR23      0x17
+#define B2056_SYN_RESERVED_ADDR24      0x18
+#define B2056_SYN_RESERVED_ADDR25      0x19
+#define B2056_SYN_RESERVED_ADDR26      0x1A
+#define B2056_SYN_RESERVED_ADDR27      0x1B
+#define B2056_SYN_RESERVED_ADDR28      0x1C
+#define B2056_SYN_RESERVED_ADDR29      0x1D
+#define B2056_SYN_RESERVED_ADDR30      0x1E
+#define B2056_SYN_RESERVED_ADDR31      0x1F
+#define B2056_SYN_GPIO_MASTER1         0x20
+#define B2056_SYN_GPIO_MASTER2         0x21
+#define B2056_SYN_TOPBIAS_MASTER       0x22
+#define B2056_SYN_TOPBIAS_RCAL         0x23
+#define B2056_SYN_AFEREG               0x24
+#define B2056_SYN_TEMPPROCSENSE                0x25
+#define B2056_SYN_TEMPPROCSENSEIDAC    0x26
+#define B2056_SYN_TEMPPROCSENSERCAL    0x27
+#define B2056_SYN_LPO                  0x28
+#define B2056_SYN_VDDCAL_MASTER                0x29
+#define B2056_SYN_VDDCAL_IDAC          0x2A
+#define B2056_SYN_VDDCAL_STATUS                0x2B
+#define B2056_SYN_RCAL_MASTER          0x2C
+#define B2056_SYN_RCAL_CODE_OUT                0x2D
+#define B2056_SYN_RCCAL_CTRL0          0x2E
+#define B2056_SYN_RCCAL_CTRL1          0x2F
+#define B2056_SYN_RCCAL_CTRL2          0x30
+#define B2056_SYN_RCCAL_CTRL3          0x31
+#define B2056_SYN_RCCAL_CTRL4          0x32
+#define B2056_SYN_RCCAL_CTRL5          0x33
+#define B2056_SYN_RCCAL_CTRL6          0x34
+#define B2056_SYN_RCCAL_CTRL7          0x35
+#define B2056_SYN_RCCAL_CTRL8          0x36
+#define B2056_SYN_RCCAL_CTRL9          0x37
+#define B2056_SYN_RCCAL_CTRL10         0x38
+#define B2056_SYN_RCCAL_CTRL11         0x39
+#define B2056_SYN_ZCAL_SPARE1          0x3A
+#define B2056_SYN_ZCAL_SPARE2          0x3B
+#define B2056_SYN_PLL_MAST1            0x3C
+#define B2056_SYN_PLL_MAST2            0x3D
+#define B2056_SYN_PLL_MAST3            0x3E
+#define B2056_SYN_PLL_BIAS_RESET       0x3F
+#define B2056_SYN_PLL_XTAL0            0x40
+#define B2056_SYN_PLL_XTAL1            0x41
+#define B2056_SYN_PLL_XTAL3            0x42
+#define B2056_SYN_PLL_XTAL4            0x43
+#define B2056_SYN_PLL_XTAL5            0x44
+#define B2056_SYN_PLL_XTAL6            0x45
+#define B2056_SYN_PLL_REFDIV           0x46
+#define B2056_SYN_PLL_PFD              0x47
+#define B2056_SYN_PLL_CP1              0x48
+#define B2056_SYN_PLL_CP2              0x49
+#define B2056_SYN_PLL_CP3              0x4A
+#define B2056_SYN_PLL_LOOPFILTER1      0x4B
+#define B2056_SYN_PLL_LOOPFILTER2      0x4C
+#define B2056_SYN_PLL_LOOPFILTER3      0x4D
+#define B2056_SYN_PLL_LOOPFILTER4      0x4E
+#define B2056_SYN_PLL_LOOPFILTER5      0x4F
+#define B2056_SYN_PLL_MMD1             0x50
+#define B2056_SYN_PLL_MMD2             0x51
+#define B2056_SYN_PLL_VCO1             0x52
+#define B2056_SYN_PLL_VCO2             0x53
+#define B2056_SYN_PLL_MONITOR1         0x54
+#define B2056_SYN_PLL_MONITOR2         0x55
+#define B2056_SYN_PLL_VCOCAL1          0x56
+#define B2056_SYN_PLL_VCOCAL2          0x57
+#define B2056_SYN_PLL_VCOCAL4          0x58
+#define B2056_SYN_PLL_VCOCAL5          0x59
+#define B2056_SYN_PLL_VCOCAL6          0x5A
+#define B2056_SYN_PLL_VCOCAL7          0x5B
+#define B2056_SYN_PLL_VCOCAL8          0x5C
+#define B2056_SYN_PLL_VCOCAL9          0x5D
+#define B2056_SYN_PLL_VCOCAL10         0x5E
+#define B2056_SYN_PLL_VCOCAL11         0x5F
+#define B2056_SYN_PLL_VCOCAL12         0x60
+#define B2056_SYN_PLL_VCOCAL13         0x61
+#define B2056_SYN_PLL_VREG             0x62
+#define B2056_SYN_PLL_STATUS1          0x63
+#define B2056_SYN_PLL_STATUS2          0x64
+#define B2056_SYN_PLL_STATUS3          0x65
+#define B2056_SYN_LOGEN_PU0            0x66
+#define B2056_SYN_LOGEN_PU1            0x67
+#define B2056_SYN_LOGEN_PU2            0x68
+#define B2056_SYN_LOGEN_PU3            0x69
+#define B2056_SYN_LOGEN_PU5            0x6A
+#define B2056_SYN_LOGEN_PU6            0x6B
+#define B2056_SYN_LOGEN_PU7            0x6C
+#define B2056_SYN_LOGEN_PU8            0x6D
+#define B2056_SYN_LOGEN_BIAS_RESET     0x6E
+#define B2056_SYN_LOGEN_RCCR1          0x6F
+#define B2056_SYN_LOGEN_VCOBUF1                0x70
+#define B2056_SYN_LOGEN_MIXER1         0x71
+#define B2056_SYN_LOGEN_MIXER2         0x72
+#define B2056_SYN_LOGEN_BUF1           0x73
+#define B2056_SYN_LOGENBUF2            0x74
+#define B2056_SYN_LOGEN_BUF3           0x75
+#define B2056_SYN_LOGEN_BUF4           0x76
+#define B2056_SYN_LOGEN_DIV1           0x77
+#define B2056_SYN_LOGEN_DIV2           0x78
+#define B2056_SYN_LOGEN_DIV3           0x79
+#define B2056_SYN_LOGEN_ACL1           0x7A
+#define B2056_SYN_LOGEN_ACL2           0x7B
+#define B2056_SYN_LOGEN_ACL3           0x7C
+#define B2056_SYN_LOGEN_ACL4           0x7D
+#define B2056_SYN_LOGEN_ACL5           0x7E
+#define B2056_SYN_LOGEN_ACL6           0x7F
+#define B2056_SYN_LOGEN_ACLOUT         0x80
+#define B2056_SYN_LOGEN_ACLCAL1                0x81
+#define B2056_SYN_LOGEN_ACLCAL2                0x82
+#define B2056_SYN_LOGEN_ACLCAL3                0x83
+#define B2056_SYN_CALEN                        0x84
+#define B2056_SYN_LOGEN_PEAKDET1       0x85
+#define B2056_SYN_LOGEN_CORE_ACL_OVR   0x86
+#define B2056_SYN_LOGEN_RX_DIFF_ACL_OVR        0x87
+#define B2056_SYN_LOGEN_TX_DIFF_ACL_OVR        0x88
+#define B2056_SYN_LOGEN_RX_CMOS_ACL_OVR        0x89
+#define B2056_SYN_LOGEN_TX_CMOS_ACL_OVR        0x8A
+#define B2056_SYN_LOGEN_VCOBUF2                0x8B
+#define B2056_SYN_LOGEN_MIXER3         0x8C
+#define B2056_SYN_LOGEN_BUF5           0x8D
+#define B2056_SYN_LOGEN_BUF6           0x8E
+#define B2056_SYN_LOGEN_CBUFRX1                0x8F
+#define B2056_SYN_LOGEN_CBUFRX2                0x90
+#define B2056_SYN_LOGEN_CBUFRX3                0x91
+#define B2056_SYN_LOGEN_CBUFRX4                0x92
+#define B2056_SYN_LOGEN_CBUFTX1                0x93
+#define B2056_SYN_LOGEN_CBUFTX2                0x94
+#define B2056_SYN_LOGEN_CBUFTX3                0x95
+#define B2056_SYN_LOGEN_CBUFTX4                0x96
+#define B2056_SYN_LOGEN_CMOSRX1                0x97
+#define B2056_SYN_LOGEN_CMOSRX2                0x98
+#define B2056_SYN_LOGEN_CMOSRX3                0x99
+#define B2056_SYN_LOGEN_CMOSRX4                0x9A
+#define B2056_SYN_LOGEN_CMOSTX1                0x9B
+#define B2056_SYN_LOGEN_CMOSTX2                0x9C
+#define B2056_SYN_LOGEN_CMOSTX3                0x9D
+#define B2056_SYN_LOGEN_CMOSTX4                0x9E
+#define B2056_SYN_LOGEN_VCOBUF2_OVRVAL 0x9F
+#define B2056_SYN_LOGEN_MIXER3_OVRVAL  0xA0
+#define B2056_SYN_LOGEN_BUF5_OVRVAL    0xA1
+#define B2056_SYN_LOGEN_BUF6_OVRVAL    0xA2
+#define B2056_SYN_LOGEN_CBUFRX1_OVRVAL 0xA3
+#define B2056_SYN_LOGEN_CBUFRX2_OVRVAL 0xA4
+#define B2056_SYN_LOGEN_CBUFRX3_OVRVAL 0xA5
+#define B2056_SYN_LOGEN_CBUFRX4_OVRVAL 0xA6
+#define B2056_SYN_LOGEN_CBUFTX1_OVRVAL 0xA7
+#define B2056_SYN_LOGEN_CBUFTX2_OVRVAL 0xA8
+#define B2056_SYN_LOGEN_CBUFTX3_OVRVAL 0xA9
+#define B2056_SYN_LOGEN_CBUFTX4_OVRVAL 0xAA
+#define B2056_SYN_LOGEN_CMOSRX1_OVRVAL 0xAB
+#define B2056_SYN_LOGEN_CMOSRX2_OVRVAL 0xAC
+#define B2056_SYN_LOGEN_CMOSRX3_OVRVAL 0xAD
+#define B2056_SYN_LOGEN_CMOSRX4_OVRVAL 0xAE
+#define B2056_SYN_LOGEN_CMOSTX1_OVRVAL 0xAF
+#define B2056_SYN_LOGEN_CMOSTX2_OVRVAL 0xB0
+#define B2056_SYN_LOGEN_CMOSTX3_OVRVAL 0xB1
+#define B2056_SYN_LOGEN_CMOSTX4_OVRVAL 0xB2
+#define B2056_SYN_LOGEN_ACL_WAITCNT    0xB3
+#define B2056_SYN_LOGEN_CORE_CALVALID  0xB4
+#define B2056_SYN_LOGEN_RX_CMOS_CALVALID       0xB5
+#define B2056_SYN_LOGEN_TX_CMOS_VALID  0xB6
+
+#define B2056_TX_RESERVED_ADDR0                0x00
+#define B2056_TX_IDCODE                        0x01
+#define B2056_TX_RESERVED_ADDR2                0x02
+#define B2056_TX_RESERVED_ADDR3                0x03
+#define B2056_TX_RESERVED_ADDR4                0x04
+#define B2056_TX_RESERVED_ADDR5                0x05
+#define B2056_TX_RESERVED_ADDR6                0x06
+#define B2056_TX_RESERVED_ADDR7                0x07
+#define B2056_TX_COM_CTRL              0x08
+#define B2056_TX_COM_PU                        0x09
+#define B2056_TX_COM_OVR               0x0A
+#define B2056_TX_COM_RESET             0x0B
+#define B2056_TX_COM_RCAL              0x0C
+#define B2056_TX_COM_RC_RXLPF          0x0D
+#define B2056_TX_COM_RC_TXLPF          0x0E
+#define B2056_TX_COM_RC_RXHPF          0x0F
+#define B2056_TX_RESERVED_ADDR16       0x10
+#define B2056_TX_RESERVED_ADDR17       0x11
+#define B2056_TX_RESERVED_ADDR18       0x12
+#define B2056_TX_RESERVED_ADDR19       0x13
+#define B2056_TX_RESERVED_ADDR20       0x14
+#define B2056_TX_RESERVED_ADDR21       0x15
+#define B2056_TX_RESERVED_ADDR22       0x16
+#define B2056_TX_RESERVED_ADDR23       0x17
+#define B2056_TX_RESERVED_ADDR24       0x18
+#define B2056_TX_RESERVED_ADDR25       0x19
+#define B2056_TX_RESERVED_ADDR26       0x1A
+#define B2056_TX_RESERVED_ADDR27       0x1B
+#define B2056_TX_RESERVED_ADDR28       0x1C
+#define B2056_TX_RESERVED_ADDR29       0x1D
+#define B2056_TX_RESERVED_ADDR30       0x1E
+#define B2056_TX_RESERVED_ADDR31       0x1F
+#define B2056_TX_IQCAL_GAIN_BW         0x20
+#define B2056_TX_LOFT_FINE_I           0x21
+#define B2056_TX_LOFT_FINE_Q           0x22
+#define B2056_TX_LOFT_COARSE_I         0x23
+#define B2056_TX_LOFT_COARSE_Q         0x24
+#define B2056_TX_TX_COM_MASTER1                0x25
+#define B2056_TX_TX_COM_MASTER2                0x26
+#define B2056_TX_RXIQCAL_TXMUX         0x27
+#define B2056_TX_TX_SSI_MASTER         0x28
+#define B2056_TX_IQCAL_VCM_HG          0x29
+#define B2056_TX_IQCAL_IDAC            0x2A
+#define B2056_TX_TSSI_VCM              0x2B
+#define B2056_TX_TX_AMP_DET            0x2C
+#define B2056_TX_TX_SSI_MUX            0x2D
+#define B2056_TX_TSSIA                 0x2E
+#define B2056_TX_TSSIG                 0x2F
+#define B2056_TX_TSSI_MISC1            0x30
+#define B2056_TX_TSSI_MISC2            0x31
+#define B2056_TX_TSSI_MISC3            0x32
+#define B2056_TX_PA_SPARE1             0x33
+#define B2056_TX_PA_SPARE2             0x34
+#define B2056_TX_INTPAA_MASTER         0x35
+#define B2056_TX_INTPAA_GAIN           0x36
+#define B2056_TX_INTPAA_BOOST_TUNE     0x37
+#define B2056_TX_INTPAA_IAUX_STAT      0x38
+#define B2056_TX_INTPAA_IAUX_DYN       0x39
+#define B2056_TX_INTPAA_IMAIN_STAT     0x3A
+#define B2056_TX_INTPAA_IMAIN_DYN      0x3B
+#define B2056_TX_INTPAA_CASCBIAS       0x3C
+#define B2056_TX_INTPAA_PASLOPE                0x3D
+#define B2056_TX_INTPAA_PA_MISC                0x3E
+#define B2056_TX_INTPAG_MASTER         0x3F
+#define B2056_TX_INTPAG_GAIN           0x40
+#define B2056_TX_INTPAG_BOOST_TUNE     0x41
+#define B2056_TX_INTPAG_IAUX_STAT      0x42
+#define B2056_TX_INTPAG_IAUX_DYN       0x43
+#define B2056_TX_INTPAG_IMAIN_STAT     0x44
+#define B2056_TX_INTPAG_IMAIN_DYN      0x45
+#define B2056_TX_INTPAG_CASCBIAS       0x46
+#define B2056_TX_INTPAG_PASLOPE                0x47
+#define B2056_TX_INTPAG_PA_MISC                0x48
+#define B2056_TX_PADA_MASTER           0x49
+#define B2056_TX_PADA_IDAC             0x4A
+#define B2056_TX_PADA_CASCBIAS         0x4B
+#define B2056_TX_PADA_GAIN             0x4C
+#define B2056_TX_PADA_BOOST_TUNE       0x4D
+#define B2056_TX_PADA_SLOPE            0x4E
+#define B2056_TX_PADG_MASTER           0x4F
+#define B2056_TX_PADG_IDAC             0x50
+#define B2056_TX_PADG_CASCBIAS         0x51
+#define B2056_TX_PADG_GAIN             0x52
+#define B2056_TX_PADG_BOOST_TUNE       0x53
+#define B2056_TX_PADG_SLOPE            0x54
+#define B2056_TX_PGAA_MASTER           0x55
+#define B2056_TX_PGAA_IDAC             0x56
+#define B2056_TX_PGAA_GAIN             0x57
+#define B2056_TX_PGAA_BOOST_TUNE       0x58
+#define B2056_TX_PGAA_SLOPE            0x59
+#define B2056_TX_PGAA_MISC             0x5A
+#define B2056_TX_PGAG_MASTER           0x5B
+#define B2056_TX_PGAG_IDAC             0x5C
+#define B2056_TX_PGAG_GAIN             0x5D
+#define B2056_TX_PGAG_BOOST_TUNE       0x5E
+#define B2056_TX_PGAG_SLOPE            0x5F
+#define B2056_TX_PGAG_MISC             0x60
+#define B2056_TX_MIXA_MASTER           0x61
+#define B2056_TX_MIXA_BOOST_TUNE       0x62
+#define B2056_TX_MIXG                  0x63
+#define B2056_TX_MIXG_BOOST_TUNE       0x64
+#define B2056_TX_BB_GM_MASTER          0x65
+#define B2056_TX_GMBB_GM               0x66
+#define B2056_TX_GMBB_IDAC             0x67
+#define B2056_TX_TXLPF_MASTER          0x68
+#define B2056_TX_TXLPF_RCCAL           0x69
+#define B2056_TX_TXLPF_RCCAL_OFF0      0x6A
+#define B2056_TX_TXLPF_RCCAL_OFF1      0x6B
+#define B2056_TX_TXLPF_RCCAL_OFF2      0x6C
+#define B2056_TX_TXLPF_RCCAL_OFF3      0x6D
+#define B2056_TX_TXLPF_RCCAL_OFF4      0x6E
+#define B2056_TX_TXLPF_RCCAL_OFF5      0x6F
+#define B2056_TX_TXLPF_RCCAL_OFF6      0x70
+#define B2056_TX_TXLPF_BW              0x71
+#define B2056_TX_TXLPF_GAIN            0x72
+#define B2056_TX_TXLPF_IDAC            0x73
+#define B2056_TX_TXLPF_IDAC_0          0x74
+#define B2056_TX_TXLPF_IDAC_1          0x75
+#define B2056_TX_TXLPF_IDAC_2          0x76
+#define B2056_TX_TXLPF_IDAC_3          0x77
+#define B2056_TX_TXLPF_IDAC_4          0x78
+#define B2056_TX_TXLPF_IDAC_5          0x79
+#define B2056_TX_TXLPF_IDAC_6          0x7A
+#define B2056_TX_TXLPF_OPAMP_IDAC      0x7B
+#define B2056_TX_TXLPF_MISC            0x7C
+#define B2056_TX_TXSPARE1              0x7D
+#define B2056_TX_TXSPARE2              0x7E
+#define B2056_TX_TXSPARE3              0x7F
+#define B2056_TX_TXSPARE4              0x80
+#define B2056_TX_TXSPARE5              0x81
+#define B2056_TX_TXSPARE6              0x82
+#define B2056_TX_TXSPARE7              0x83
+#define B2056_TX_TXSPARE8              0x84
+#define B2056_TX_TXSPARE9              0x85
+#define B2056_TX_TXSPARE10             0x86
+#define B2056_TX_TXSPARE11             0x87
+#define B2056_TX_TXSPARE12             0x88
+#define B2056_TX_TXSPARE13             0x89
+#define B2056_TX_TXSPARE14             0x8A
+#define B2056_TX_TXSPARE15             0x8B
+#define B2056_TX_TXSPARE16             0x8C
+#define B2056_TX_STATUS_INTPA_GAIN     0x8D
+#define B2056_TX_STATUS_PAD_GAIN       0x8E
+#define B2056_TX_STATUS_PGA_GAIN       0x8F
+#define B2056_TX_STATUS_GM_TXLPF_GAIN  0x90
+#define B2056_TX_STATUS_TXLPF_BW       0x91
+#define B2056_TX_STATUS_TXLPF_RC       0x92
+#define B2056_TX_GMBB_IDAC0            0x93
+#define B2056_TX_GMBB_IDAC1            0x94
+#define B2056_TX_GMBB_IDAC2            0x95
+#define B2056_TX_GMBB_IDAC3            0x96
+#define B2056_TX_GMBB_IDAC4            0x97
+#define B2056_TX_GMBB_IDAC5            0x98
+#define B2056_TX_GMBB_IDAC6            0x99
+#define B2056_TX_GMBB_IDAC7            0x9A
+
+#define B2056_RX_RESERVED_ADDR0                0x00
+#define B2056_RX_IDCODE                        0x01
+#define B2056_RX_RESERVED_ADDR2                0x02
+#define B2056_RX_RESERVED_ADDR3                0x03
+#define B2056_RX_RESERVED_ADDR4                0x04
+#define B2056_RX_RESERVED_ADDR5                0x05
+#define B2056_RX_RESERVED_ADDR6                0x06
+#define B2056_RX_RESERVED_ADDR7                0x07
+#define B2056_RX_COM_CTRL              0x08
+#define B2056_RX_COM_PU                        0x09
+#define B2056_RX_COM_OVR               0x0A
+#define B2056_RX_COM_RESET             0x0B
+#define B2056_RX_COM_RCAL              0x0C
+#define B2056_RX_COM_RC_RXLPF          0x0D
+#define B2056_RX_COM_RC_TXLPF          0x0E
+#define B2056_RX_COM_RC_RXHPF          0x0F
+#define B2056_RX_RESERVED_ADDR16       0x10
+#define B2056_RX_RESERVED_ADDR17       0x11
+#define B2056_RX_RESERVED_ADDR18       0x12
+#define B2056_RX_RESERVED_ADDR19       0x13
+#define B2056_RX_RESERVED_ADDR20       0x14
+#define B2056_RX_RESERVED_ADDR21       0x15
+#define B2056_RX_RESERVED_ADDR22       0x16
+#define B2056_RX_RESERVED_ADDR23       0x17
+#define B2056_RX_RESERVED_ADDR24       0x18
+#define B2056_RX_RESERVED_ADDR25       0x19
+#define B2056_RX_RESERVED_ADDR26       0x1A
+#define B2056_RX_RESERVED_ADDR27       0x1B
+#define B2056_RX_RESERVED_ADDR28       0x1C
+#define B2056_RX_RESERVED_ADDR29       0x1D
+#define B2056_RX_RESERVED_ADDR30       0x1E
+#define B2056_RX_RESERVED_ADDR31       0x1F
+#define B2056_RX_RXIQCAL_RXMUX         0x20
+#define B2056_RX_RSSI_PU               0x21
+#define B2056_RX_RSSI_SEL              0x22
+#define B2056_RX_RSSI_GAIN             0x23
+#define B2056_RX_RSSI_NB_IDAC          0x24
+#define B2056_RX_RSSI_WB2I_IDAC_1      0x25
+#define B2056_RX_RSSI_WB2I_IDAC_2      0x26
+#define B2056_RX_RSSI_WB2Q_IDAC_1      0x27
+#define B2056_RX_RSSI_WB2Q_IDAC_2      0x28
+#define B2056_RX_RSSI_POLE             0x29
+#define B2056_RX_RSSI_WB1_IDAC         0x2A
+#define B2056_RX_RSSI_MISC             0x2B
+#define B2056_RX_LNAA_MASTER           0x2C
+#define B2056_RX_LNAA_TUNE             0x2D
+#define B2056_RX_LNAA_GAIN             0x2E
+#define B2056_RX_LNA_A_SLOPE           0x2F
+#define B2056_RX_BIASPOLE_LNAA1_IDAC   0x30
+#define B2056_RX_LNAA2_IDAC            0x31
+#define B2056_RX_LNA1A_MISC            0x32
+#define B2056_RX_LNAG_MASTER           0x33
+#define B2056_RX_LNAG_TUNE             0x34
+#define B2056_RX_LNAG_GAIN             0x35
+#define B2056_RX_LNA_G_SLOPE           0x36
+#define B2056_RX_BIASPOLE_LNAG1_IDAC   0x37
+#define B2056_RX_LNAG2_IDAC            0x38
+#define B2056_RX_LNA1G_MISC            0x39
+#define B2056_RX_MIXA_MASTER           0x3A
+#define B2056_RX_MIXA_VCM              0x3B
+#define B2056_RX_MIXA_CTRLPTAT         0x3C
+#define B2056_RX_MIXA_LOB_BIAS         0x3D
+#define B2056_RX_MIXA_CORE_IDAC                0x3E
+#define B2056_RX_MIXA_CMFB_IDAC                0x3F
+#define B2056_RX_MIXA_BIAS_AUX         0x40
+#define B2056_RX_MIXA_BIAS_MAIN                0x41
+#define B2056_RX_MIXA_BIAS_MISC                0x42
+#define B2056_RX_MIXA_MAST_BIAS                0x43
+#define B2056_RX_MIXG_MASTER           0x44
+#define B2056_RX_MIXG_VCM              0x45
+#define B2056_RX_MIXG_CTRLPTAT         0x46
+#define B2056_RX_MIXG_LOB_BIAS         0x47
+#define B2056_RX_MIXG_CORE_IDAC                0x48
+#define B2056_RX_MIXG_CMFB_IDAC                0x49
+#define B2056_RX_MIXG_BIAS_AUX         0x4A
+#define B2056_RX_MIXG_BIAS_MAIN                0x4B
+#define B2056_RX_MIXG_BIAS_MISC                0x4C
+#define B2056_RX_MIXG_MAST_BIAS                0x4D
+#define B2056_RX_TIA_MASTER            0x4E
+#define B2056_RX_TIA_IOPAMP            0x4F
+#define B2056_RX_TIA_QOPAMP            0x50
+#define B2056_RX_TIA_IMISC             0x51
+#define B2056_RX_TIA_QMISC             0x52
+#define B2056_RX_TIA_GAIN              0x53
+#define B2056_RX_TIA_SPARE1            0x54
+#define B2056_RX_TIA_SPARE2            0x55
+#define B2056_RX_BB_LPF_MASTER         0x56
+#define B2056_RX_AACI_MASTER           0x57
+#define B2056_RX_RXLPF_IDAC            0x58
+#define B2056_RX_RXLPF_OPAMPBIAS_LOWQ  0x59
+#define B2056_RX_RXLPF_OPAMPBIAS_HIGHQ 0x5A
+#define B2056_RX_RXLPF_BIAS_DCCANCEL   0x5B
+#define B2056_RX_RXLPF_OUTVCM          0x5C
+#define B2056_RX_RXLPF_INVCM_BODY      0x5D
+#define B2056_RX_RXLPF_CC_OP           0x5E
+#define B2056_RX_RXLPF_GAIN            0x5F
+#define B2056_RX_RXLPF_Q_BW            0x60
+#define B2056_RX_RXLPF_HP_CORNER_BW    0x61
+#define B2056_RX_RXLPF_RCCAL_HPC       0x62
+#define B2056_RX_RXHPF_OFF0            0x63
+#define B2056_RX_RXHPF_OFF1            0x64
+#define B2056_RX_RXHPF_OFF2            0x65
+#define B2056_RX_RXHPF_OFF3            0x66
+#define B2056_RX_RXHPF_OFF4            0x67
+#define B2056_RX_RXHPF_OFF5            0x68
+#define B2056_RX_RXHPF_OFF6            0x69
+#define B2056_RX_RXHPF_OFF7            0x6A
+#define B2056_RX_RXLPF_RCCAL_LPC       0x6B
+#define B2056_RX_RXLPF_OFF_0           0x6C
+#define B2056_RX_RXLPF_OFF_1           0x6D
+#define B2056_RX_RXLPF_OFF_2           0x6E
+#define B2056_RX_RXLPF_OFF_3           0x6F
+#define B2056_RX_RXLPF_OFF_4           0x70
+#define B2056_RX_UNUSED                        0x71
+#define B2056_RX_VGA_MASTER            0x72
+#define B2056_RX_VGA_BIAS              0x73
+#define B2056_RX_VGA_BIAS_DCCANCEL     0x74
+#define B2056_RX_VGA_GAIN              0x75
+#define B2056_RX_VGA_HP_CORNER_BW      0x76
+#define B2056_RX_VGABUF_BIAS           0x77
+#define B2056_RX_VGABUF_GAIN_BW                0x78
+#define B2056_RX_TXFBMIX_A             0x79
+#define B2056_RX_TXFBMIX_G             0x7A
+#define B2056_RX_RXSPARE1              0x7B
+#define B2056_RX_RXSPARE2              0x7C
+#define B2056_RX_RXSPARE3              0x7D
+#define B2056_RX_RXSPARE4              0x7E
+#define B2056_RX_RXSPARE5              0x7F
+#define B2056_RX_RXSPARE6              0x80
+#define B2056_RX_RXSPARE7              0x81
+#define B2056_RX_RXSPARE8              0x82
+#define B2056_RX_RXSPARE9              0x83
+#define B2056_RX_RXSPARE10             0x84
+#define B2056_RX_RXSPARE11             0x85
+#define B2056_RX_RXSPARE12             0x86
+#define B2056_RX_RXSPARE13             0x87
+#define B2056_RX_RXSPARE14             0x88
+#define B2056_RX_RXSPARE15             0x89
+#define B2056_RX_RXSPARE16             0x8A
+#define B2056_RX_STATUS_LNAA_GAIN      0x8B
+#define B2056_RX_STATUS_LNAG_GAIN      0x8C
+#define B2056_RX_STATUS_MIXTIA_GAIN    0x8D
+#define B2056_RX_STATUS_RXLPF_GAIN     0x8E
+#define B2056_RX_STATUS_VGA_BUF_GAIN   0x8F
+#define B2056_RX_STATUS_RXLPF_Q                0x90
+#define B2056_RX_STATUS_RXLPF_BUF_BW   0x91
+#define B2056_RX_STATUS_RXLPF_VGA_HPC  0x92
+#define B2056_RX_STATUS_RXLPF_RC       0x93
+#define B2056_RX_STATUS_HPC_RC         0x94
+
+#define B2056_LNA1_A_PU                        0x01
+#define B2056_LNA2_A_PU                        0x02
+#define B2056_LNA1_G_PU                        0x01
+#define B2056_LNA2_G_PU                        0x02
+#define B2056_MIXA_PU_I                        0x01
+#define B2056_MIXA_PU_Q                        0x02
+#define B2056_MIXA_PU_GM               0x10
+#define B2056_MIXG_PU_I                        0x01
+#define B2056_MIXG_PU_Q                        0x02
+#define B2056_MIXG_PU_GM               0x10
+#define B2056_TIA_PU                   0x01
+#define B2056_BB_LPF_PU                        0x20
+#define B2056_W1_PU                    0x02
+#define B2056_W2_PU                    0x04
+#define B2056_NB_PU                    0x08
+#define B2056_RSSI_W1_SEL              0x02
+#define B2056_RSSI_W2_SEL              0x04
+#define B2056_RSSI_NB_SEL              0x08
+#define B2056_VCM_MASK                 0x1C
+#define B2056_RSSI_VCM_SHIFT           0x02
+
+#define B2056_SYN                      (0x0 << 12)
+#define B2056_TX0                      (0x2 << 12)
+#define B2056_TX1                      (0x3 << 12)
+#define B2056_RX0                      (0x6 << 12)
+#define B2056_RX1                      (0x7 << 12)
+#define B2056_ALLTX                    (0xE << 12)
+#define B2056_ALLRX                    (0xF << 12)
+
+#define B2056_SYN_RESERVED_ADDR0       0x00
+#define B2056_SYN_IDCODE               0x01
+#define B2056_SYN_RESERVED_ADDR2       0x02
+#define B2056_SYN_RESERVED_ADDR3       0x03
+#define B2056_SYN_RESERVED_ADDR4       0x04
+#define B2056_SYN_RESERVED_ADDR5       0x05
+#define B2056_SYN_RESERVED_ADDR6       0x06
+#define B2056_SYN_RESERVED_ADDR7       0x07
+#define B2056_SYN_COM_CTRL             0x08
+#define B2056_SYN_COM_PU               0x09
+#define B2056_SYN_COM_OVR              0x0A
+#define B2056_SYN_COM_RESET            0x0B
+#define B2056_SYN_COM_RCAL             0x0C
+#define B2056_SYN_COM_RC_RXLPF         0x0D
+#define B2056_SYN_COM_RC_TXLPF         0x0E
+#define B2056_SYN_COM_RC_RXHPF         0x0F
+#define B2056_SYN_RESERVED_ADDR16      0x10
+#define B2056_SYN_RESERVED_ADDR17      0x11
+#define B2056_SYN_RESERVED_ADDR18      0x12
+#define B2056_SYN_RESERVED_ADDR19      0x13
+#define B2056_SYN_RESERVED_ADDR20      0x14
+#define B2056_SYN_RESERVED_ADDR21      0x15
+#define B2056_SYN_RESERVED_ADDR22      0x16
+#define B2056_SYN_RESERVED_ADDR23      0x17
+#define B2056_SYN_RESERVED_ADDR24      0x18
+#define B2056_SYN_RESERVED_ADDR25      0x19
+#define B2056_SYN_RESERVED_ADDR26      0x1A
+#define B2056_SYN_RESERVED_ADDR27      0x1B
+#define B2056_SYN_RESERVED_ADDR28      0x1C
+#define B2056_SYN_RESERVED_ADDR29      0x1D
+#define B2056_SYN_RESERVED_ADDR30      0x1E
+#define B2056_SYN_RESERVED_ADDR31      0x1F
+#define B2056_SYN_GPIO_MASTER1         0x20
+#define B2056_SYN_GPIO_MASTER2         0x21
+#define B2056_SYN_TOPBIAS_MASTER       0x22
+#define B2056_SYN_TOPBIAS_RCAL         0x23
+#define B2056_SYN_AFEREG               0x24
+#define B2056_SYN_TEMPPROCSENSE                0x25
+#define B2056_SYN_TEMPPROCSENSEIDAC    0x26
+#define B2056_SYN_TEMPPROCSENSERCAL    0x27
+#define B2056_SYN_LPO                  0x28
+#define B2056_SYN_VDDCAL_MASTER                0x29
+#define B2056_SYN_VDDCAL_IDAC          0x2A
+#define B2056_SYN_VDDCAL_STATUS                0x2B
+#define B2056_SYN_RCAL_MASTER          0x2C
+#define B2056_SYN_RCAL_CODE_OUT                0x2D
+#define B2056_SYN_RCCAL_CTRL0          0x2E
+#define B2056_SYN_RCCAL_CTRL1          0x2F
+#define B2056_SYN_RCCAL_CTRL2          0x30
+#define B2056_SYN_RCCAL_CTRL3          0x31
+#define B2056_SYN_RCCAL_CTRL4          0x32
+#define B2056_SYN_RCCAL_CTRL5          0x33
+#define B2056_SYN_RCCAL_CTRL6          0x34
+#define B2056_SYN_RCCAL_CTRL7          0x35
+#define B2056_SYN_RCCAL_CTRL8          0x36
+#define B2056_SYN_RCCAL_CTRL9          0x37
+#define B2056_SYN_RCCAL_CTRL10         0x38
+#define B2056_SYN_RCCAL_CTRL11         0x39
+#define B2056_SYN_ZCAL_SPARE1          0x3A
+#define B2056_SYN_ZCAL_SPARE2          0x3B
+#define B2056_SYN_PLL_MAST1            0x3C
+#define B2056_SYN_PLL_MAST2            0x3D
+#define B2056_SYN_PLL_MAST3            0x3E
+#define B2056_SYN_PLL_BIAS_RESET       0x3F
+#define B2056_SYN_PLL_XTAL0            0x40
+#define B2056_SYN_PLL_XTAL1            0x41
+#define B2056_SYN_PLL_XTAL3            0x42
+#define B2056_SYN_PLL_XTAL4            0x43
+#define B2056_SYN_PLL_XTAL5            0x44
+#define B2056_SYN_PLL_XTAL6            0x45
+#define B2056_SYN_PLL_REFDIV           0x46
+#define B2056_SYN_PLL_PFD              0x47
+#define B2056_SYN_PLL_CP1              0x48
+#define B2056_SYN_PLL_CP2              0x49
+#define B2056_SYN_PLL_CP3              0x4A
+#define B2056_SYN_PLL_LOOPFILTER1      0x4B
+#define B2056_SYN_PLL_LOOPFILTER2      0x4C
+#define B2056_SYN_PLL_LOOPFILTER3      0x4D
+#define B2056_SYN_PLL_LOOPFILTER4      0x4E
+#define B2056_SYN_PLL_LOOPFILTER5      0x4F
+#define B2056_SYN_PLL_MMD1             0x50
+#define B2056_SYN_PLL_MMD2             0x51
+#define B2056_SYN_PLL_VCO1             0x52
+#define B2056_SYN_PLL_VCO2             0x53
+#define B2056_SYN_PLL_MONITOR1         0x54
+#define B2056_SYN_PLL_MONITOR2         0x55
+#define B2056_SYN_PLL_VCOCAL1          0x56
+#define B2056_SYN_PLL_VCOCAL2          0x57
+#define B2056_SYN_PLL_VCOCAL4          0x58
+#define B2056_SYN_PLL_VCOCAL5          0x59
+#define B2056_SYN_PLL_VCOCAL6          0x5A
+#define B2056_SYN_PLL_VCOCAL7          0x5B
+#define B2056_SYN_PLL_VCOCAL8          0x5C
+#define B2056_SYN_PLL_VCOCAL9          0x5D
+#define B2056_SYN_PLL_VCOCAL10         0x5E
+#define B2056_SYN_PLL_VCOCAL11         0x5F
+#define B2056_SYN_PLL_VCOCAL12         0x60
+#define B2056_SYN_PLL_VCOCAL13         0x61
+#define B2056_SYN_PLL_VREG             0x62
+#define B2056_SYN_PLL_STATUS1          0x63
+#define B2056_SYN_PLL_STATUS2          0x64
+#define B2056_SYN_PLL_STATUS3          0x65
+#define B2056_SYN_LOGEN_PU0            0x66
+#define B2056_SYN_LOGEN_PU1            0x67
+#define B2056_SYN_LOGEN_PU2            0x68
+#define B2056_SYN_LOGEN_PU3            0x69
+#define B2056_SYN_LOGEN_PU5            0x6A
+#define B2056_SYN_LOGEN_PU6            0x6B
+#define B2056_SYN_LOGEN_PU7            0x6C
+#define B2056_SYN_LOGEN_PU8            0x6D
+#define B2056_SYN_LOGEN_BIAS_RESET     0x6E
+#define B2056_SYN_LOGEN_RCCR1          0x6F
+#define B2056_SYN_LOGEN_VCOBUF1                0x70
+#define B2056_SYN_LOGEN_MIXER1         0x71
+#define B2056_SYN_LOGEN_MIXER2         0x72
+#define B2056_SYN_LOGEN_BUF1           0x73
+#define B2056_SYN_LOGENBUF2            0x74
+#define B2056_SYN_LOGEN_BUF3           0x75
+#define B2056_SYN_LOGEN_BUF4           0x76
+#define B2056_SYN_LOGEN_DIV1           0x77
+#define B2056_SYN_LOGEN_DIV2           0x78
+#define B2056_SYN_LOGEN_DIV3           0x79
+#define B2056_SYN_LOGEN_ACL1           0x7A
+#define B2056_SYN_LOGEN_ACL2           0x7B
+#define B2056_SYN_LOGEN_ACL3           0x7C
+#define B2056_SYN_LOGEN_ACL4           0x7D
+#define B2056_SYN_LOGEN_ACL5           0x7E
+#define B2056_SYN_LOGEN_ACL6           0x7F
+#define B2056_SYN_LOGEN_ACLOUT         0x80
+#define B2056_SYN_LOGEN_ACLCAL1                0x81
+#define B2056_SYN_LOGEN_ACLCAL2                0x82
+#define B2056_SYN_LOGEN_ACLCAL3                0x83
+#define B2056_SYN_CALEN                        0x84
+#define B2056_SYN_LOGEN_PEAKDET1       0x85
+#define B2056_SYN_LOGEN_CORE_ACL_OVR   0x86
+#define B2056_SYN_LOGEN_RX_DIFF_ACL_OVR        0x87
+#define B2056_SYN_LOGEN_TX_DIFF_ACL_OVR        0x88
+#define B2056_SYN_LOGEN_RX_CMOS_ACL_OVR        0x89
+#define B2056_SYN_LOGEN_TX_CMOS_ACL_OVR        0x8A
+#define B2056_SYN_LOGEN_VCOBUF2                0x8B
+#define B2056_SYN_LOGEN_MIXER3         0x8C
+#define B2056_SYN_LOGEN_BUF5           0x8D
+#define B2056_SYN_LOGEN_BUF6           0x8E
+#define B2056_SYN_LOGEN_CBUFRX1                0x8F
+#define B2056_SYN_LOGEN_CBUFRX2                0x90
+#define B2056_SYN_LOGEN_CBUFRX3                0x91
+#define B2056_SYN_LOGEN_CBUFRX4                0x92
+#define B2056_SYN_LOGEN_CBUFTX1                0x93
+#define B2056_SYN_LOGEN_CBUFTX2                0x94
+#define B2056_SYN_LOGEN_CBUFTX3                0x95
+#define B2056_SYN_LOGEN_CBUFTX4                0x96
+#define B2056_SYN_LOGEN_CMOSRX1                0x97
+#define B2056_SYN_LOGEN_CMOSRX2                0x98
+#define B2056_SYN_LOGEN_CMOSRX3                0x99
+#define B2056_SYN_LOGEN_CMOSRX4                0x9A
+#define B2056_SYN_LOGEN_CMOSTX1                0x9B
+#define B2056_SYN_LOGEN_CMOSTX2                0x9C
+#define B2056_SYN_LOGEN_CMOSTX3                0x9D
+#define B2056_SYN_LOGEN_CMOSTX4                0x9E
+#define B2056_SYN_LOGEN_VCOBUF2_OVRVAL 0x9F
+#define B2056_SYN_LOGEN_MIXER3_OVRVAL  0xA0
+#define B2056_SYN_LOGEN_BUF5_OVRVAL    0xA1
+#define B2056_SYN_LOGEN_BUF6_OVRVAL    0xA2
+#define B2056_SYN_LOGEN_CBUFRX1_OVRVAL 0xA3
+#define B2056_SYN_LOGEN_CBUFRX2_OVRVAL 0xA4
+#define B2056_SYN_LOGEN_CBUFRX3_OVRVAL 0xA5
+#define B2056_SYN_LOGEN_CBUFRX4_OVRVAL 0xA6
+#define B2056_SYN_LOGEN_CBUFTX1_OVRVAL 0xA7
+#define B2056_SYN_LOGEN_CBUFTX2_OVRVAL 0xA8
+#define B2056_SYN_LOGEN_CBUFTX3_OVRVAL 0xA9
+#define B2056_SYN_LOGEN_CBUFTX4_OVRVAL 0xAA
+#define B2056_SYN_LOGEN_CMOSRX1_OVRVAL 0xAB
+#define B2056_SYN_LOGEN_CMOSRX2_OVRVAL 0xAC
+#define B2056_SYN_LOGEN_CMOSRX3_OVRVAL 0xAD
+#define B2056_SYN_LOGEN_CMOSRX4_OVRVAL 0xAE
+#define B2056_SYN_LOGEN_CMOSTX1_OVRVAL 0xAF
+#define B2056_SYN_LOGEN_CMOSTX2_OVRVAL 0xB0
+#define B2056_SYN_LOGEN_CMOSTX3_OVRVAL 0xB1
+#define B2056_SYN_LOGEN_CMOSTX4_OVRVAL 0xB2
+#define B2056_SYN_LOGEN_ACL_WAITCNT    0xB3
+#define B2056_SYN_LOGEN_CORE_CALVALID  0xB4
+#define B2056_SYN_LOGEN_RX_CMOS_CALVALID       0xB5
+#define B2056_SYN_LOGEN_TX_CMOS_VALID  0xB6
+
+#define B2056_TX_RESERVED_ADDR0                0x00
+#define B2056_TX_IDCODE                        0x01
+#define B2056_TX_RESERVED_ADDR2                0x02
+#define B2056_TX_RESERVED_ADDR3                0x03
+#define B2056_TX_RESERVED_ADDR4                0x04
+#define B2056_TX_RESERVED_ADDR5                0x05
+#define B2056_TX_RESERVED_ADDR6                0x06
+#define B2056_TX_RESERVED_ADDR7                0x07
+#define B2056_TX_COM_CTRL              0x08
+#define B2056_TX_COM_PU                        0x09
+#define B2056_TX_COM_OVR               0x0A
+#define B2056_TX_COM_RESET             0x0B
+#define B2056_TX_COM_RCAL              0x0C
+#define B2056_TX_COM_RC_RXLPF          0x0D
+#define B2056_TX_COM_RC_TXLPF          0x0E
+#define B2056_TX_COM_RC_RXHPF          0x0F
+#define B2056_TX_RESERVED_ADDR16       0x10
+#define B2056_TX_RESERVED_ADDR17       0x11
+#define B2056_TX_RESERVED_ADDR18       0x12
+#define B2056_TX_RESERVED_ADDR19       0x13
+#define B2056_TX_RESERVED_ADDR20       0x14
+#define B2056_TX_RESERVED_ADDR21       0x15
+#define B2056_TX_RESERVED_ADDR22       0x16
+#define B2056_TX_RESERVED_ADDR23       0x17
+#define B2056_TX_RESERVED_ADDR24       0x18
+#define B2056_TX_RESERVED_ADDR25       0x19
+#define B2056_TX_RESERVED_ADDR26       0x1A
+#define B2056_TX_RESERVED_ADDR27       0x1B
+#define B2056_TX_RESERVED_ADDR28       0x1C
+#define B2056_TX_RESERVED_ADDR29       0x1D
+#define B2056_TX_RESERVED_ADDR30       0x1E
+#define B2056_TX_RESERVED_ADDR31       0x1F
+#define B2056_TX_IQCAL_GAIN_BW         0x20
+#define B2056_TX_LOFT_FINE_I           0x21
+#define B2056_TX_LOFT_FINE_Q           0x22
+#define B2056_TX_LOFT_COARSE_I         0x23
+#define B2056_TX_LOFT_COARSE_Q         0x24
+#define B2056_TX_TX_COM_MASTER1                0x25
+#define B2056_TX_TX_COM_MASTER2                0x26
+#define B2056_TX_RXIQCAL_TXMUX         0x27
+#define B2056_TX_TX_SSI_MASTER         0x28
+#define B2056_TX_IQCAL_VCM_HG          0x29
+#define B2056_TX_IQCAL_IDAC            0x2A
+#define B2056_TX_TSSI_VCM              0x2B
+#define B2056_TX_TX_AMP_DET            0x2C
+#define B2056_TX_TX_SSI_MUX            0x2D
+#define B2056_TX_TSSIA                 0x2E
+#define B2056_TX_TSSIG                 0x2F
+#define B2056_TX_TSSI_MISC1            0x30
+#define B2056_TX_TSSI_MISC2            0x31
+#define B2056_TX_TSSI_MISC3            0x32
+#define B2056_TX_PA_SPARE1             0x33
+#define B2056_TX_PA_SPARE2             0x34
+#define B2056_TX_INTPAA_MASTER         0x35
+#define B2056_TX_INTPAA_GAIN           0x36
+#define B2056_TX_INTPAA_BOOST_TUNE     0x37
+#define B2056_TX_INTPAA_IAUX_STAT      0x38
+#define B2056_TX_INTPAA_IAUX_DYN       0x39
+#define B2056_TX_INTPAA_IMAIN_STAT     0x3A
+#define B2056_TX_INTPAA_IMAIN_DYN      0x3B
+#define B2056_TX_INTPAA_CASCBIAS       0x3C
+#define B2056_TX_INTPAA_PASLOPE                0x3D
+#define B2056_TX_INTPAA_PA_MISC                0x3E
+#define B2056_TX_INTPAG_MASTER         0x3F
+#define B2056_TX_INTPAG_GAIN           0x40
+#define B2056_TX_INTPAG_BOOST_TUNE     0x41
+#define B2056_TX_INTPAG_IAUX_STAT      0x42
+#define B2056_TX_INTPAG_IAUX_DYN       0x43
+#define B2056_TX_INTPAG_IMAIN_STAT     0x44
+#define B2056_TX_INTPAG_IMAIN_DYN      0x45
+#define B2056_TX_INTPAG_CASCBIAS       0x46
+#define B2056_TX_INTPAG_PASLOPE                0x47
+#define B2056_TX_INTPAG_PA_MISC                0x48
+#define B2056_TX_PADA_MASTER           0x49
+#define B2056_TX_PADA_IDAC             0x4A
+#define B2056_TX_PADA_CASCBIAS         0x4B
+#define B2056_TX_PADA_GAIN             0x4C
+#define B2056_TX_PADA_BOOST_TUNE       0x4D
+#define B2056_TX_PADA_SLOPE            0x4E
+#define B2056_TX_PADG_MASTER           0x4F
+#define B2056_TX_PADG_IDAC             0x50
+#define B2056_TX_PADG_CASCBIAS         0x51
+#define B2056_TX_PADG_GAIN             0x52
+#define B2056_TX_PADG_BOOST_TUNE       0x53
+#define B2056_TX_PADG_SLOPE            0x54
+#define B2056_TX_PGAA_MASTER           0x55
+#define B2056_TX_PGAA_IDAC             0x56
+#define B2056_TX_PGAA_GAIN             0x57
+#define B2056_TX_PGAA_BOOST_TUNE       0x58
+#define B2056_TX_PGAA_SLOPE            0x59
+#define B2056_TX_PGAA_MISC             0x5A
+#define B2056_TX_PGAG_MASTER           0x5B
+#define B2056_TX_PGAG_IDAC             0x5C
+#define B2056_TX_PGAG_GAIN             0x5D
+#define B2056_TX_PGAG_BOOST_TUNE       0x5E
+#define B2056_TX_PGAG_SLOPE            0x5F
+#define B2056_TX_PGAG_MISC             0x60
+#define B2056_TX_MIXA_MASTER           0x61
+#define B2056_TX_MIXA_BOOST_TUNE       0x62
+#define B2056_TX_MIXG                  0x63
+#define B2056_TX_MIXG_BOOST_TUNE       0x64
+#define B2056_TX_BB_GM_MASTER          0x65
+#define B2056_TX_GMBB_GM               0x66
+#define B2056_TX_GMBB_IDAC             0x67
+#define B2056_TX_TXLPF_MASTER          0x68
+#define B2056_TX_TXLPF_RCCAL           0x69
+#define B2056_TX_TXLPF_RCCAL_OFF0      0x6A
+#define B2056_TX_TXLPF_RCCAL_OFF1      0x6B
+#define B2056_TX_TXLPF_RCCAL_OFF2      0x6C
+#define B2056_TX_TXLPF_RCCAL_OFF3      0x6D
+#define B2056_TX_TXLPF_RCCAL_OFF4      0x6E
+#define B2056_TX_TXLPF_RCCAL_OFF5      0x6F
+#define B2056_TX_TXLPF_RCCAL_OFF6      0x70
+#define B2056_TX_TXLPF_BW              0x71
+#define B2056_TX_TXLPF_GAIN            0x72
+#define B2056_TX_TXLPF_IDAC            0x73
+#define B2056_TX_TXLPF_IDAC_0          0x74
+#define B2056_TX_TXLPF_IDAC_1          0x75
+#define B2056_TX_TXLPF_IDAC_2          0x76
+#define B2056_TX_TXLPF_IDAC_3          0x77
+#define B2056_TX_TXLPF_IDAC_4          0x78
+#define B2056_TX_TXLPF_IDAC_5          0x79
+#define B2056_TX_TXLPF_IDAC_6          0x7A
+#define B2056_TX_TXLPF_OPAMP_IDAC      0x7B
+#define B2056_TX_TXLPF_MISC            0x7C
+#define B2056_TX_TXSPARE1              0x7D
+#define B2056_TX_TXSPARE2              0x7E
+#define B2056_TX_TXSPARE3              0x7F
+#define B2056_TX_TXSPARE4              0x80
+#define B2056_TX_TXSPARE5              0x81
+#define B2056_TX_TXSPARE6              0x82
+#define B2056_TX_TXSPARE7              0x83
+#define B2056_TX_TXSPARE8              0x84
+#define B2056_TX_TXSPARE9              0x85
+#define B2056_TX_TXSPARE10             0x86
+#define B2056_TX_TXSPARE11             0x87
+#define B2056_TX_TXSPARE12             0x88
+#define B2056_TX_TXSPARE13             0x89
+#define B2056_TX_TXSPARE14             0x8A
+#define B2056_TX_TXSPARE15             0x8B
+#define B2056_TX_TXSPARE16             0x8C
+#define B2056_TX_STATUS_INTPA_GAIN     0x8D
+#define B2056_TX_STATUS_PAD_GAIN       0x8E
+#define B2056_TX_STATUS_PGA_GAIN       0x8F
+#define B2056_TX_STATUS_GM_TXLPF_GAIN  0x90
+#define B2056_TX_STATUS_TXLPF_BW       0x91
+#define B2056_TX_STATUS_TXLPF_RC       0x92
+#define B2056_TX_GMBB_IDAC0            0x93
+#define B2056_TX_GMBB_IDAC1            0x94
+#define B2056_TX_GMBB_IDAC2            0x95
+#define B2056_TX_GMBB_IDAC3            0x96
+#define B2056_TX_GMBB_IDAC4            0x97
+#define B2056_TX_GMBB_IDAC5            0x98
+#define B2056_TX_GMBB_IDAC6            0x99
+#define B2056_TX_GMBB_IDAC7            0x9A
+
+#define B2056_RX_RESERVED_ADDR0                0x00
+#define B2056_RX_IDCODE                        0x01
+#define B2056_RX_RESERVED_ADDR2                0x02
+#define B2056_RX_RESERVED_ADDR3                0x03
+#define B2056_RX_RESERVED_ADDR4                0x04
+#define B2056_RX_RESERVED_ADDR5                0x05
+#define B2056_RX_RESERVED_ADDR6                0x06
+#define B2056_RX_RESERVED_ADDR7                0x07
+#define B2056_RX_COM_CTRL              0x08
+#define B2056_RX_COM_PU                        0x09
+#define B2056_RX_COM_OVR               0x0A
+#define B2056_RX_COM_RESET             0x0B
+#define B2056_RX_COM_RCAL              0x0C
+#define B2056_RX_COM_RC_RXLPF          0x0D
+#define B2056_RX_COM_RC_TXLPF          0x0E
+#define B2056_RX_COM_RC_RXHPF          0x0F
+#define B2056_RX_RESERVED_ADDR16       0x10
+#define B2056_RX_RESERVED_ADDR17       0x11
+#define B2056_RX_RESERVED_ADDR18       0x12
+#define B2056_RX_RESERVED_ADDR19       0x13
+#define B2056_RX_RESERVED_ADDR20       0x14
+#define B2056_RX_RESERVED_ADDR21       0x15
+#define B2056_RX_RESERVED_ADDR22       0x16
+#define B2056_RX_RESERVED_ADDR23       0x17
+#define B2056_RX_RESERVED_ADDR24       0x18
+#define B2056_RX_RESERVED_ADDR25       0x19
+#define B2056_RX_RESERVED_ADDR26       0x1A
+#define B2056_RX_RESERVED_ADDR27       0x1B
+#define B2056_RX_RESERVED_ADDR28       0x1C
+#define B2056_RX_RESERVED_ADDR29       0x1D
+#define B2056_RX_RESERVED_ADDR30       0x1E
+#define B2056_RX_RESERVED_ADDR31       0x1F
+#define B2056_RX_RXIQCAL_RXMUX         0x20
+#define B2056_RX_RSSI_PU               0x21
+#define B2056_RX_RSSI_SEL              0x22
+#define B2056_RX_RSSI_GAIN             0x23
+#define B2056_RX_RSSI_NB_IDAC          0x24
+#define B2056_RX_RSSI_WB2I_IDAC_1      0x25
+#define B2056_RX_RSSI_WB2I_IDAC_2      0x26
+#define B2056_RX_RSSI_WB2Q_IDAC_1      0x27
+#define B2056_RX_RSSI_WB2Q_IDAC_2      0x28
+#define B2056_RX_RSSI_POLE             0x29
+#define B2056_RX_RSSI_WB1_IDAC         0x2A
+#define B2056_RX_RSSI_MISC             0x2B
+#define B2056_RX_LNAA_MASTER           0x2C
+#define B2056_RX_LNAA_TUNE             0x2D
+#define B2056_RX_LNAA_GAIN             0x2E
+#define B2056_RX_LNA_A_SLOPE           0x2F
+#define B2056_RX_BIASPOLE_LNAA1_IDAC   0x30
+#define B2056_RX_LNAA2_IDAC            0x31
+#define B2056_RX_LNA1A_MISC            0x32
+#define B2056_RX_LNAG_MASTER           0x33
+#define B2056_RX_LNAG_TUNE             0x34
+#define B2056_RX_LNAG_GAIN             0x35
+#define B2056_RX_LNA_G_SLOPE           0x36
+#define B2056_RX_BIASPOLE_LNAG1_IDAC   0x37
+#define B2056_RX_LNAG2_IDAC            0x38
+#define B2056_RX_LNA1G_MISC            0x39
+#define B2056_RX_MIXA_MASTER           0x3A
+#define B2056_RX_MIXA_VCM              0x3B
+#define B2056_RX_MIXA_CTRLPTAT         0x3C
+#define B2056_RX_MIXA_LOB_BIAS         0x3D
+#define B2056_RX_MIXA_CORE_IDAC                0x3E
+#define B2056_RX_MIXA_CMFB_IDAC                0x3F
+#define B2056_RX_MIXA_BIAS_AUX         0x40
+#define B2056_RX_MIXA_BIAS_MAIN                0x41
+#define B2056_RX_MIXA_BIAS_MISC                0x42
+#define B2056_RX_MIXA_MAST_BIAS                0x43
+#define B2056_RX_MIXG_MASTER           0x44
+#define B2056_RX_MIXG_VCM              0x45
+#define B2056_RX_MIXG_CTRLPTAT         0x46
+#define B2056_RX_MIXG_LOB_BIAS         0x47
+#define B2056_RX_MIXG_CORE_IDAC                0x48
+#define B2056_RX_MIXG_CMFB_IDAC                0x49
+#define B2056_RX_MIXG_BIAS_AUX         0x4A
+#define B2056_RX_MIXG_BIAS_MAIN                0x4B
+#define B2056_RX_MIXG_BIAS_MISC                0x4C
+#define B2056_RX_MIXG_MAST_BIAS                0x4D
+#define B2056_RX_TIA_MASTER            0x4E
+#define B2056_RX_TIA_IOPAMP            0x4F
+#define B2056_RX_TIA_QOPAMP            0x50
+#define B2056_RX_TIA_IMISC             0x51
+#define B2056_RX_TIA_QMISC             0x52
+#define B2056_RX_TIA_GAIN              0x53
+#define B2056_RX_TIA_SPARE1            0x54
+#define B2056_RX_TIA_SPARE2            0x55
+#define B2056_RX_BB_LPF_MASTER         0x56
+#define B2056_RX_AACI_MASTER           0x57
+#define B2056_RX_RXLPF_IDAC            0x58
+#define B2056_RX_RXLPF_OPAMPBIAS_LOWQ  0x59
+#define B2056_RX_RXLPF_OPAMPBIAS_HIGHQ 0x5A
+#define B2056_RX_RXLPF_BIAS_DCCANCEL   0x5B
+#define B2056_RX_RXLPF_OUTVCM          0x5C
+#define B2056_RX_RXLPF_INVCM_BODY      0x5D
+#define B2056_RX_RXLPF_CC_OP           0x5E
+#define B2056_RX_RXLPF_GAIN            0x5F
+#define B2056_RX_RXLPF_Q_BW            0x60
+#define B2056_RX_RXLPF_HP_CORNER_BW    0x61
+#define B2056_RX_RXLPF_RCCAL_HPC       0x62
+#define B2056_RX_RXHPF_OFF0            0x63
+#define B2056_RX_RXHPF_OFF1            0x64
+#define B2056_RX_RXHPF_OFF2            0x65
+#define B2056_RX_RXHPF_OFF3            0x66
+#define B2056_RX_RXHPF_OFF4            0x67
+#define B2056_RX_RXHPF_OFF5            0x68
+#define B2056_RX_RXHPF_OFF6            0x69
+#define B2056_RX_RXHPF_OFF7            0x6A
+#define B2056_RX_RXLPF_RCCAL_LPC       0x6B
+#define B2056_RX_RXLPF_OFF_0           0x6C
+#define B2056_RX_RXLPF_OFF_1           0x6D
+#define B2056_RX_RXLPF_OFF_2           0x6E
+#define B2056_RX_RXLPF_OFF_3           0x6F
+#define B2056_RX_RXLPF_OFF_4           0x70
+#define B2056_RX_UNUSED                        0x71
+#define B2056_RX_VGA_MASTER            0x72
+#define B2056_RX_VGA_BIAS              0x73
+#define B2056_RX_VGA_BIAS_DCCANCEL     0x74
+#define B2056_RX_VGA_GAIN              0x75
+#define B2056_RX_VGA_HP_CORNER_BW      0x76
+#define B2056_RX_VGABUF_BIAS           0x77
+#define B2056_RX_VGABUF_GAIN_BW                0x78
+#define B2056_RX_TXFBMIX_A             0x79
+#define B2056_RX_TXFBMIX_G             0x7A
+#define B2056_RX_RXSPARE1              0x7B
+#define B2056_RX_RXSPARE2              0x7C
+#define B2056_RX_RXSPARE3              0x7D
+#define B2056_RX_RXSPARE4              0x7E
+#define B2056_RX_RXSPARE5              0x7F
+#define B2056_RX_RXSPARE6              0x80
+#define B2056_RX_RXSPARE7              0x81
+#define B2056_RX_RXSPARE8              0x82
+#define B2056_RX_RXSPARE9              0x83
+#define B2056_RX_RXSPARE10             0x84
+#define B2056_RX_RXSPARE11             0x85
+#define B2056_RX_RXSPARE12             0x86
+#define B2056_RX_RXSPARE13             0x87
+#define B2056_RX_RXSPARE14             0x88
+#define B2056_RX_RXSPARE15             0x89
+#define B2056_RX_RXSPARE16             0x8A
+#define B2056_RX_STATUS_LNAA_GAIN      0x8B
+#define B2056_RX_STATUS_LNAG_GAIN      0x8C
+#define B2056_RX_STATUS_MIXTIA_GAIN    0x8D
+#define B2056_RX_STATUS_RXLPF_GAIN     0x8E
+#define B2056_RX_STATUS_VGA_BUF_GAIN   0x8F
+#define B2056_RX_STATUS_RXLPF_Q                0x90
+#define B2056_RX_STATUS_RXLPF_BUF_BW   0x91
+#define B2056_RX_STATUS_RXLPF_VGA_HPC  0x92
+#define B2056_RX_STATUS_RXLPF_RC       0x93
+#define B2056_RX_STATUS_HPC_RC         0x94
+
+#define B2056_LNA1_A_PU                        0x01
+#define B2056_LNA2_A_PU                        0x02
+#define B2056_LNA1_G_PU                        0x01
+#define B2056_LNA2_G_PU                        0x02
+#define B2056_MIXA_PU_I                        0x01
+#define B2056_MIXA_PU_Q                        0x02
+#define B2056_MIXA_PU_GM               0x10
+#define B2056_MIXG_PU_I                        0x01
+#define B2056_MIXG_PU_Q                        0x02
+#define B2056_MIXG_PU_GM               0x10
+#define B2056_TIA_PU                   0x01
+#define B2056_BB_LPF_PU                        0x20
+#define B2056_W1_PU                    0x02
+#define B2056_W2_PU                    0x04
+#define B2056_NB_PU                    0x08
+#define B2056_RSSI_W1_SEL              0x02
+#define B2056_RSSI_W2_SEL              0x04
+#define B2056_RSSI_NB_SEL              0x08
+#define B2056_VCM_MASK                 0x1C
+#define B2056_RSSI_VCM_SHIFT           0x02
+
 struct b43_nphy_channeltab_entry_rev3 {
-       /* The channel number */
-       u8 channel;
        /* The channel frequency in MHz */
        u16 freq;
        /* Radio register values on channelswitch */
-       /* TODO */
+       u8 radio_syn_pll_vcocal1;
+       u8 radio_syn_pll_vcocal2;
+       u8 radio_syn_pll_refdiv;
+       u8 radio_syn_pll_mmd2;
+       u8 radio_syn_pll_mmd1;
+       u8 radio_syn_pll_loopfilter1;
+       u8 radio_syn_pll_loopfilter2;
+       u8 radio_syn_pll_loopfilter3;
+       u8 radio_syn_pll_loopfilter4;
+       u8 radio_syn_pll_loopfilter5;
+       u8 radio_syn_reserved_addr27;
+       u8 radio_syn_reserved_addr28;
+       u8 radio_syn_reserved_addr29;
+       u8 radio_syn_logen_vcobuf1;
+       u8 radio_syn_logen_mixer2;
+       u8 radio_syn_logen_buf3;
+       u8 radio_syn_logen_buf4;
+       u8 radio_rx0_lnaa_tune;
+       u8 radio_rx0_lnag_tune;
+       u8 radio_tx0_intpaa_boost_tune;
+       u8 radio_tx0_intpag_boost_tune;
+       u8 radio_tx0_pada_boost_tune;
+       u8 radio_tx0_padg_boost_tune;
+       u8 radio_tx0_pgaa_boost_tune;
+       u8 radio_tx0_pgag_boost_tune;
+       u8 radio_tx0_mixa_boost_tune;
+       u8 radio_tx0_mixg_boost_tune;
+       u8 radio_rx1_lnaa_tune;
+       u8 radio_rx1_lnag_tune;
+       u8 radio_tx1_intpaa_boost_tune;
+       u8 radio_tx1_intpag_boost_tune;
+       u8 radio_tx1_pada_boost_tune;
+       u8 radio_tx1_padg_boost_tune;
+       u8 radio_tx1_pgaa_boost_tune;
+       u8 radio_tx1_pgag_boost_tune;
+       u8 radio_tx1_mixa_boost_tune;
+       u8 radio_tx1_mixg_boost_tune;
        /* PHY register values on channelswitch */
        struct b43_phy_n_sfo_cfg phy_regs;
 };
index d579df72b783566b557f60d66c87ed167bc46c82..b90f223fb31cb87463263ae6ffad5e71b367525c 100644 (file)
@@ -29,7 +29,7 @@
 /* Returns TRUE, if the radio is enabled in hardware. */
 bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
 {
-       if (dev->phy.rev >= 3) {
+       if (dev->dev->id.revision >= 3) {
                if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI)
                      & B43legacy_MMIO_RADIO_HWENABLED_HI_MASK))
                        return 1;
index b82364258dc5e180133985df9a2d1a48a8a2ba95..ed424574160ebd834f005203f49815ace833695b 100644 (file)
@@ -106,6 +106,9 @@ config IWL5000
                Intel WiFi Link 1000BGN
                Intel Wireless WiFi 5150AGN
                Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
+               Intel 6000 Gen 2 Series Wi-Fi Adapters (6000G2A and 6000G2B)
+               Intel WIreless WiFi Link 6050BGN Gen 2 Adapter
+               Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
 
 config IWL3945
        tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
index 63edbe2e557f7544163597741ef44484f23a06bf..01aa2468bd6946a5fc263d468bcbbbffb654a711 100644 (file)
@@ -2,6 +2,8 @@ obj-$(CONFIG_IWLWIFI)   += iwlcore.o
 iwlcore-objs           := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
 iwlcore-objs           += iwl-rx.o iwl-tx.o iwl-sta.o
 iwlcore-objs           += iwl-scan.o iwl-led.o
+iwlcore-$(CONFIG_IWL3945) += iwl-legacy.o
+iwlcore-$(CONFIG_IWL4965) += iwl-legacy.o
 iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
 iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
 
@@ -9,13 +11,14 @@ CFLAGS_iwl-devtrace.o := -I$(src)
 
 # AGN
 obj-$(CONFIG_IWLAGN)   += iwlagn.o
-iwlagn-objs            := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
-iwlagn-objs            += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
+iwlagn-objs            := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
+iwlagn-objs            += iwl-agn-ucode.o iwl-agn-tx.o
 iwlagn-objs            += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o
 iwlagn-objs            += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
 iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
 
 iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
+iwlagn-$(CONFIG_IWL5000) += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
 iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
 iwlagn-$(CONFIG_IWL5000) += iwl-6000.o
 iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
index db540910b1104d42b5d20d15e542c3fc5da7649c..068f1e1e3297afc4b3ea4ddf98e24ef55992bafc 100644 (file)
@@ -211,14 +211,16 @@ static struct iwl_lib_ops iwl1000_lib = {
                .calib_version  = iwlagn_eeprom_calib_version,
                .query_addr = iwlagn_eeprom_query_addr,
        },
-       .post_associate = iwl_post_associate,
-       .isr = iwl_isr_ict,
-       .config_ap = iwl_config_ap,
+       .isr_ops = {
+               .isr = iwl_isr_ict,
+               .free = iwl_free_isr_ict,
+               .alloc = iwl_alloc_isr_ict,
+               .reset = iwl_reset_ict,
+               .disable = iwl_disable_ict,
+       },
        .temp_ops = {
                .temperature = iwlagn_temperature,
         },
-       .manage_ibss_station = iwlagn_manage_ibss_station,
-       .update_bcast_stations = iwl_update_bcast_stations,
        .debugfs_ops = {
                .rx_stats_read = iwl_ucode_rx_stats_read,
                .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -243,6 +245,7 @@ static const struct iwl_ops iwl1000_ops = {
        .hcmd = &iwlagn_hcmd,
        .utils = &iwlagn_hcmd_utils,
        .led = &iwlagn_led_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
 };
 
 static struct iwl_base_params iwl1000_base_params = {
index 176e525776734bbabbdcf3f0801b3720779688b6..ebac04b7887c47f78a7704e75646cc27b07909ea 100644 (file)
@@ -51,6 +51,7 @@
 #include "iwl-led.h"
 #include "iwl-3945-led.h"
 #include "iwl-3945-debugfs.h"
+#include "iwl-legacy.h"
 
 #define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np)    \
        [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,   \
@@ -1451,6 +1452,10 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
        };
        u16 chan;
 
+       if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
+                     "TX Power requested while scanning!\n"))
+               return -EAGAIN;
+
        chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
 
        txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
@@ -2722,10 +2727,9 @@ static struct iwl_lib_ops iwl3945_lib = {
        },
        .send_tx_power  = iwl3945_send_tx_power,
        .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
-       .post_associate = iwl3945_post_associate,
-       .isr = iwl_isr_legacy,
-       .config_ap = iwl3945_config_ap,
-       .manage_ibss_station = iwl3945_manage_ibss_station,
+       .isr_ops = {
+               .isr = iwl_isr_legacy,
+       },
        .recover_from_tx_stall = iwl_bg_monitor_recover,
        .check_plcp_health = iwl3945_good_plcp_health,
 
@@ -2736,10 +2740,16 @@ static struct iwl_lib_ops iwl3945_lib = {
        },
 };
 
+static const struct iwl_legacy_ops iwl3945_legacy_ops = {
+       .post_associate = iwl3945_post_associate,
+       .config_ap = iwl3945_config_ap,
+       .manage_ibss_station = iwl3945_manage_ibss_station,
+};
+
 static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
        .get_hcmd_size = iwl3945_get_hcmd_size,
        .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
-       .tx_cmd_protection = iwlcore_tx_cmd_protection,
+       .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
        .request_scan = iwl3945_request_scan,
        .post_scan = iwl3945_post_scan,
 };
@@ -2749,6 +2759,8 @@ static const struct iwl_ops iwl3945_ops = {
        .hcmd = &iwl3945_hcmd,
        .utils = &iwl3945_hcmd_utils,
        .led = &iwl3945_led_ops,
+       .legacy = &iwl3945_legacy_ops,
+       .ieee80211_ops = &iwl3945_hw_ops,
 };
 
 static struct iwl_base_params iwl3945_base_params = {
index 09391f0ee61f03c9d486a75f5f3c195b04381e24..3eef1eb74a78f8b9f825d132e50a95c4b587d662 100644 (file)
@@ -264,10 +264,8 @@ void iwl3945_reply_statistics(struct iwl_priv *priv,
                              struct iwl_rx_mem_buffer *rxb);
 extern void iwl3945_disable_events(struct iwl_priv *priv);
 extern int iwl4965_get_temperature(const struct iwl_priv *priv);
-extern void iwl3945_post_associate(struct iwl_priv *priv,
-                                  struct ieee80211_vif *vif);
-extern void iwl3945_config_ap(struct iwl_priv *priv,
-                             struct ieee80211_vif *vif);
+extern void iwl3945_post_associate(struct iwl_priv *priv);
+extern void iwl3945_config_ap(struct iwl_priv *priv);
 
 extern int iwl3945_commit_rxon(struct iwl_priv *priv,
                               struct iwl_rxon_context *ctx);
@@ -282,6 +280,8 @@ extern int iwl3945_commit_rxon(struct iwl_priv *priv,
  */
 extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
 
+extern struct ieee80211_ops iwl3945_hw_ops;
+
 /*
  * Forward declare iwl-3945.c functions for iwl-base.c
  */
index b207e3e9299f11dd6b8d02edf7e4b148b07c98d4..4748d067eb1dd577759b9061c128d521bfb0b2ce 100644 (file)
@@ -48,6 +48,7 @@
 #include "iwl-agn-led.h"
 #include "iwl-agn.h"
 #include "iwl-agn-debugfs.h"
+#include "iwl-legacy.h"
 
 static int iwl4965_send_tx_power(struct iwl_priv *priv);
 static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -1377,13 +1378,9 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
        u8 ctrl_chan_high = 0;
        struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
 
-       if (test_bit(STATUS_SCANNING, &priv->status)) {
-               /* If this gets hit a lot, switch it to a BUG() and catch
-                * the stack trace to find out who is calling this during
-                * a scan. */
-               IWL_WARN(priv, "TX Power requested while scanning!\n");
+       if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
+                     "TX Power requested while scanning!\n"))
                return -EAGAIN;
-       }
 
        band = priv->band == IEEE80211_BAND_2GHZ;
 
@@ -1447,6 +1444,142 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
        return ret;
 }
 
+static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       /* cast away the const for active_rxon in this function */
+       struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
+       int ret;
+       bool new_assoc =
+               !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
+
+       if (!iwl_is_alive(priv))
+               return -EBUSY;
+
+       if (!ctx->is_active)
+               return 0;
+
+       /* always get timestamp with Rx frame */
+       ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+
+       ret = iwl_check_rxon_cmd(priv, ctx);
+       if (ret) {
+               IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
+               return -EINVAL;
+       }
+
+       /*
+        * receive commit_rxon request
+        * abort any previous channel switch if still in process
+        */
+       if (priv->switch_rxon.switch_in_progress &&
+           (priv->switch_rxon.channel != ctx->staging.channel)) {
+               IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
+                     le16_to_cpu(priv->switch_rxon.channel));
+               iwl_chswitch_done(priv, false);
+       }
+
+       /* If we don't need to send a full RXON, we can use
+        * iwl_rxon_assoc_cmd which is used to reconfigure filter
+        * and other flags for the current radio configuration. */
+       if (!iwl_full_rxon_required(priv, ctx)) {
+               ret = iwl_send_rxon_assoc(priv, ctx);
+               if (ret) {
+                       IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
+                       return ret;
+               }
+
+               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+               iwl_print_rx_config_cmd(priv, ctx);
+               return 0;
+       }
+
+       /* If we are currently associated and the new config requires
+        * an RXON_ASSOC and the new config wants the associated mask enabled,
+        * we must clear the associated from the active configuration
+        * before we apply the new config */
+       if (iwl_is_associated_ctx(ctx) && new_assoc) {
+               IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
+               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+               ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
+                                      sizeof(struct iwl_rxon_cmd),
+                                      active_rxon);
+
+               /* If the mask clearing failed then we set
+                * active_rxon back to what it was previously */
+               if (ret) {
+                       active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+                       IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
+                       return ret;
+               }
+               iwl_clear_ucode_stations(priv, ctx);
+               iwl_restore_stations(priv, ctx);
+               ret = iwl_restore_default_wep_keys(priv, ctx);
+               if (ret) {
+                       IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+                       return ret;
+               }
+       }
+
+       IWL_DEBUG_INFO(priv, "Sending RXON\n"
+                      "* with%s RXON_FILTER_ASSOC_MSK\n"
+                      "* channel = %d\n"
+                      "* bssid = %pM\n",
+                      (new_assoc ? "" : "out"),
+                      le16_to_cpu(ctx->staging.channel),
+                      ctx->staging.bssid_addr);
+
+       iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
+
+       /* Apply the new configuration
+        * RXON unassoc clears the station table in uCode so restoration of
+        * stations is needed after it (the RXON command) completes
+        */
+       if (!new_assoc) {
+               ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
+                             sizeof(struct iwl_rxon_cmd), &ctx->staging);
+               if (ret) {
+                       IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
+                       return ret;
+               }
+               IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
+               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+               iwl_clear_ucode_stations(priv, ctx);
+               iwl_restore_stations(priv, ctx);
+               ret = iwl_restore_default_wep_keys(priv, ctx);
+               if (ret) {
+                       IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+                       return ret;
+               }
+       }
+       if (new_assoc) {
+               priv->start_calib = 0;
+               /* Apply the new configuration
+                * RXON assoc doesn't clear the station table in uCode,
+                */
+               ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
+                             sizeof(struct iwl_rxon_cmd), &ctx->staging);
+               if (ret) {
+                       IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
+                       return ret;
+               }
+               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+       }
+       iwl_print_rx_config_cmd(priv, ctx);
+
+       iwl_init_sensitivity(priv);
+
+       /* If we issue a new RXON command which required a tune then we must
+        * send a new TXPOWER command or we won't be able to Tx any frames */
+       ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
+       if (ret) {
+               IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
                                     struct ieee80211_channel_switch *ch_switch)
 {
@@ -1553,22 +1686,6 @@ static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
                        tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
 }
 
-/**
- * sign_extend - Sign extend a value using specified bit as sign-bit
- *
- * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
- * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
- *
- * @param oper value to sign extend
- * @param index 0 based bit index (0<=index<32) to sign bit
- */
-static s32 sign_extend(u32 oper, int index)
-{
-       u8 shift = 31 - index;
-
-       return (s32)(oper << shift) >> shift;
-}
-
 /**
  * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
  * @statistics: Provides the temperature reading from the uCode
@@ -1606,9 +1723,9 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
         * "initialize" ALIVE response.
         */
        if (!test_bit(STATUS_TEMPERATURE, &priv->status))
-               vt = sign_extend(R4, 23);
+               vt = sign_extend32(R4, 23);
        else
-               vt = sign_extend(le32_to_cpu(priv->_agn.statistics.
+               vt = sign_extend32(le32_to_cpu(priv->_agn.statistics.
                                 general.common.temperature), 23);
 
        IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
@@ -2216,7 +2333,7 @@ static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
 
 static struct iwl_hcmd_ops iwl4965_hcmd = {
        .rxon_assoc = iwl4965_send_rxon_assoc,
-       .commit_rxon = iwlagn_commit_rxon,
+       .commit_rxon = iwl4965_commit_rxon,
        .set_rxon_chain = iwlagn_set_rxon_chain,
        .send_bt_config = iwl_send_bt_config,
 };
@@ -2233,12 +2350,155 @@ static void iwl4965_post_scan(struct iwl_priv *priv)
                iwlcore_commit_rxon(priv, ctx);
 }
 
+static void iwl4965_post_associate(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct ieee80211_vif *vif = ctx->vif;
+       struct ieee80211_conf *conf = NULL;
+       int ret = 0;
+
+       if (!vif || !priv->is_open)
+               return;
+
+       if (vif->type == NL80211_IFTYPE_AP) {
+               IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
+               return;
+       }
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       iwl_scan_cancel_timeout(priv, 200);
+
+       conf = ieee80211_get_hw_conf(priv->hw);
+
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       iwlcore_commit_rxon(priv, ctx);
+
+       ret = iwl_send_rxon_timing(priv, ctx);
+       if (ret)
+               IWL_WARN(priv, "RXON timing - "
+                           "Attempting to continue.\n");
+
+       ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+       iwl_set_rxon_ht(priv, &priv->current_ht_config);
+
+       if (priv->cfg->ops->hcmd->set_rxon_chain)
+               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+       ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+
+       IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
+                       vif->bss_conf.aid, vif->bss_conf.beacon_int);
+
+       if (vif->bss_conf.use_short_preamble)
+               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+       else
+               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+       if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+               if (vif->bss_conf.use_short_slot)
+                       ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+       }
+
+       iwlcore_commit_rxon(priv, ctx);
+
+       IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
+                       vif->bss_conf.aid, ctx->active.bssid_addr);
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               iwlagn_send_beacon_cmd(priv);
+               break;
+       default:
+               IWL_ERR(priv, "%s Should not be called in %d mode\n",
+                         __func__, vif->type);
+               break;
+       }
+
+       /* the chain noise calibration will enabled PM upon completion
+        * If chain noise has already been run, then we need to enable
+        * power management here */
+       if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
+               iwl_power_update_mode(priv, false);
+
+       /* Enable Rx differential gain and sensitivity calibrations */
+       iwl_chain_noise_reset(priv);
+       priv->start_calib = 1;
+}
+
+static void iwl4965_config_ap(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct ieee80211_vif *vif = ctx->vif;
+       int ret = 0;
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       /* The following should be done only at AP bring up */
+       if (!iwl_is_associated_ctx(ctx)) {
+
+               /* RXON - unassoc (to set timing command) */
+               ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+               iwlcore_commit_rxon(priv, ctx);
+
+               /* RXON Timing */
+               ret = iwl_send_rxon_timing(priv, ctx);
+               if (ret)
+                       IWL_WARN(priv, "RXON timing failed - "
+                                       "Attempting to continue.\n");
+
+               /* AP has all antennas */
+               priv->chain_noise_data.active_chains =
+                       priv->hw_params.valid_rx_ant;
+               iwl_set_rxon_ht(priv, &priv->current_ht_config);
+               if (priv->cfg->ops->hcmd->set_rxon_chain)
+                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+               ctx->staging.assoc_id = 0;
+
+               if (vif->bss_conf.use_short_preamble)
+                       ctx->staging.flags |=
+                               RXON_FLG_SHORT_PREAMBLE_MSK;
+               else
+                       ctx->staging.flags &=
+                               ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+               if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+                       if (vif->bss_conf.use_short_slot)
+                               ctx->staging.flags |=
+                                       RXON_FLG_SHORT_SLOT_MSK;
+                       else
+                               ctx->staging.flags &=
+                                       ~RXON_FLG_SHORT_SLOT_MSK;
+               }
+               /* need to send beacon cmd before committing assoc RXON! */
+               iwlagn_send_beacon_cmd(priv);
+               /* restore RXON assoc */
+               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+               iwlcore_commit_rxon(priv, ctx);
+       }
+       iwlagn_send_beacon_cmd(priv);
+
+       /* FIXME - we need to add code here to detect a totally new
+        * configuration, reset the AP, unassoc, rxon timing, assoc,
+        * clear sta table, add BCAST sta... */
+}
+
 static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
        .get_hcmd_size = iwl4965_get_hcmd_size,
        .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
        .chain_noise_reset = iwl4965_chain_noise_reset,
        .gain_computation = iwl4965_gain_computation,
-       .tx_cmd_protection = iwlcore_tx_cmd_protection,
+       .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
        .calc_rssi = iwl4965_calc_rssi,
        .request_scan = iwlagn_request_scan,
        .post_scan = iwl4965_post_scan,
@@ -2285,14 +2545,12 @@ static struct iwl_lib_ops iwl4965_lib = {
        },
        .send_tx_power  = iwl4965_send_tx_power,
        .update_chain_flags = iwl_update_chain_flags,
-       .post_associate = iwl_post_associate,
-       .config_ap = iwl_config_ap,
-       .isr = iwl_isr_legacy,
+       .isr_ops = {
+               .isr = iwl_isr_legacy,
+       },
        .temp_ops = {
                .temperature = iwl4965_temperature_calib,
        },
-       .manage_ibss_station = iwlagn_manage_ibss_station,
-       .update_bcast_stations = iwl_update_bcast_stations,
        .debugfs_ops = {
                .rx_stats_read = iwl_ucode_rx_stats_read,
                .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -2304,11 +2562,43 @@ static struct iwl_lib_ops iwl4965_lib = {
        .check_plcp_health = iwl_good_plcp_health,
 };
 
+static const struct iwl_legacy_ops iwl4965_legacy_ops = {
+       .post_associate = iwl4965_post_associate,
+       .config_ap = iwl4965_config_ap,
+       .manage_ibss_station = iwlagn_manage_ibss_station,
+       .update_bcast_stations = iwl_update_bcast_stations,
+};
+
+struct ieee80211_ops iwl4965_hw_ops = {
+       .tx = iwlagn_mac_tx,
+       .start = iwlagn_mac_start,
+       .stop = iwlagn_mac_stop,
+       .add_interface = iwl_mac_add_interface,
+       .remove_interface = iwl_mac_remove_interface,
+       .change_interface = iwl_mac_change_interface,
+       .config = iwl_legacy_mac_config,
+       .configure_filter = iwlagn_configure_filter,
+       .set_key = iwlagn_mac_set_key,
+       .update_tkip_key = iwlagn_mac_update_tkip_key,
+       .conf_tx = iwl_mac_conf_tx,
+       .reset_tsf = iwl_legacy_mac_reset_tsf,
+       .bss_info_changed = iwl_legacy_mac_bss_info_changed,
+       .ampdu_action = iwlagn_mac_ampdu_action,
+       .hw_scan = iwl_mac_hw_scan,
+       .sta_add = iwlagn_mac_sta_add,
+       .sta_remove = iwl_mac_sta_remove,
+       .channel_switch = iwlagn_mac_channel_switch,
+       .flush = iwlagn_mac_flush,
+       .tx_last_beacon = iwl_mac_tx_last_beacon,
+};
+
 static const struct iwl_ops iwl4965_ops = {
        .lib = &iwl4965_lib,
        .hcmd = &iwl4965_hcmd,
        .utils = &iwl4965_hcmd_utils,
        .led = &iwlagn_led_ops,
+       .legacy = &iwl4965_legacy_ops,
+       .ieee80211_ops = &iwl4965_hw_ops,
 };
 
 static struct iwl_base_params iwl4965_base_params = {
index fd9fbc93ea1b21d20b9d494f7f7a6a04422e679b..ad43f0fdf91998fc601e14fb615d03784a339a97 100644 (file)
@@ -385,14 +385,16 @@ static struct iwl_lib_ops iwl5000_lib = {
                .calib_version  = iwlagn_eeprom_calib_version,
                .query_addr = iwlagn_eeprom_query_addr,
        },
-       .post_associate = iwl_post_associate,
-       .isr = iwl_isr_ict,
-       .config_ap = iwl_config_ap,
+       .isr_ops = {
+               .isr = iwl_isr_ict,
+               .free = iwl_free_isr_ict,
+               .alloc = iwl_alloc_isr_ict,
+               .reset = iwl_reset_ict,
+               .disable = iwl_disable_ict,
+       },
        .temp_ops = {
                .temperature = iwlagn_temperature,
         },
-       .manage_ibss_station = iwlagn_manage_ibss_station,
-       .update_bcast_stations = iwl_update_bcast_stations,
        .debugfs_ops = {
                .rx_stats_read = iwl_ucode_rx_stats_read,
                .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -453,14 +455,16 @@ static struct iwl_lib_ops iwl5150_lib = {
                .calib_version  = iwlagn_eeprom_calib_version,
                .query_addr = iwlagn_eeprom_query_addr,
        },
-       .post_associate = iwl_post_associate,
-       .isr = iwl_isr_ict,
-       .config_ap = iwl_config_ap,
+       .isr_ops = {
+               .isr = iwl_isr_ict,
+               .free = iwl_free_isr_ict,
+               .alloc = iwl_alloc_isr_ict,
+               .reset = iwl_reset_ict,
+               .disable = iwl_disable_ict,
+       },
        .temp_ops = {
                .temperature = iwl5150_temperature,
         },
-       .manage_ibss_station = iwlagn_manage_ibss_station,
-       .update_bcast_stations = iwl_update_bcast_stations,
        .debugfs_ops = {
                .rx_stats_read = iwl_ucode_rx_stats_read,
                .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -485,6 +489,7 @@ static const struct iwl_ops iwl5000_ops = {
        .hcmd = &iwlagn_hcmd,
        .utils = &iwlagn_hcmd_utils,
        .led = &iwlagn_led_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
 };
 
 static const struct iwl_ops iwl5150_ops = {
@@ -492,6 +497,7 @@ static const struct iwl_ops iwl5150_ops = {
        .hcmd = &iwlagn_hcmd,
        .utils = &iwlagn_hcmd_utils,
        .led = &iwlagn_led_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
 };
 
 static struct iwl_base_params iwl5000_base_params = {
index 11e6532fc573d1ed93d5ec4aaf886342d0fe8073..c7ff1bdf42cd02b59ce5f0a8655bcb6867161b5c 100644 (file)
 #define IWL6000_UCODE_API_MAX 4
 #define IWL6050_UCODE_API_MAX 5
 #define IWL6000G2_UCODE_API_MAX 5
-#define IWL130_UCODE_API_MAX 5
 
 /* Lowest firmware API version supported */
 #define IWL6000_UCODE_API_MIN 4
 #define IWL6050_UCODE_API_MIN 4
 #define IWL6000G2_UCODE_API_MIN 4
-#define IWL130_UCODE_API_MIN 5
 
 #define IWL6000_FW_PRE "iwlwifi-6000-"
 #define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
 #define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode"
 #define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api)
 
-#define IWL130_FW_PRE "iwlwifi-130-"
-#define _IWL130_MODULE_FIRMWARE(api) IWL130_FW_PRE #api ".ucode"
-#define IWL130_MODULE_FIRMWARE(api) _IWL130_MODULE_FIRMWARE(api)
-
 static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
 {
        /* want Celsius */
@@ -328,14 +322,16 @@ static struct iwl_lib_ops iwl6000_lib = {
                .query_addr = iwlagn_eeprom_query_addr,
                .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
        },
-       .post_associate = iwl_post_associate,
-       .isr = iwl_isr_ict,
-       .config_ap = iwl_config_ap,
+       .isr_ops = {
+               .isr = iwl_isr_ict,
+               .free = iwl_free_isr_ict,
+               .alloc = iwl_alloc_isr_ict,
+               .reset = iwl_reset_ict,
+               .disable = iwl_disable_ict,
+       },
        .temp_ops = {
                .temperature = iwlagn_temperature,
         },
-       .manage_ibss_station = iwlagn_manage_ibss_station,
-       .update_bcast_stations = iwl_update_bcast_stations,
        .debugfs_ops = {
                .rx_stats_read = iwl_ucode_rx_stats_read,
                .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -399,14 +395,16 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
                .query_addr = iwlagn_eeprom_query_addr,
                .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
        },
-       .post_associate = iwl_post_associate,
-       .isr = iwl_isr_ict,
-       .config_ap = iwl_config_ap,
+       .isr_ops = {
+               .isr = iwl_isr_ict,
+               .free = iwl_free_isr_ict,
+               .alloc = iwl_alloc_isr_ict,
+               .reset = iwl_reset_ict,
+               .disable = iwl_disable_ict,
+       },
        .temp_ops = {
                .temperature = iwlagn_temperature,
         },
-       .manage_ibss_station = iwlagn_manage_ibss_station,
-       .update_bcast_stations = iwl_update_bcast_stations,
        .debugfs_ops = {
                .rx_stats_read = iwl_ucode_rx_stats_read,
                .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -439,6 +437,7 @@ static const struct iwl_ops iwl6000_ops = {
        .hcmd = &iwlagn_hcmd,
        .utils = &iwlagn_hcmd_utils,
        .led = &iwlagn_led_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
 };
 
 static const struct iwl_ops iwl6050_ops = {
@@ -447,6 +446,7 @@ static const struct iwl_ops iwl6050_ops = {
        .utils = &iwlagn_hcmd_utils,
        .led = &iwlagn_led_ops,
        .nic = &iwl6050_nic_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
 };
 
 static const struct iwl_ops iwl6050g2_ops = {
@@ -455,6 +455,7 @@ static const struct iwl_ops iwl6050g2_ops = {
        .utils = &iwlagn_hcmd_utils,
        .led = &iwlagn_led_ops,
        .nic = &iwl6050g2_nic_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
 };
 
 static const struct iwl_ops iwl6000g2b_ops = {
@@ -462,6 +463,7 @@ static const struct iwl_ops iwl6000g2b_ops = {
        .hcmd = &iwlagn_bt_hcmd,
        .utils = &iwlagn_hcmd_utils,
        .led = &iwlagn_led_ops,
+       .ieee80211_ops = &iwlagn_hw_ops,
 };
 
 static struct iwl_base_params iwl6000_base_params = {
@@ -485,6 +487,7 @@ static struct iwl_base_params iwl6000_base_params = {
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
+       .shadow_reg_enable = true,
 };
 
 static struct iwl_base_params iwl6050_base_params = {
@@ -508,6 +511,7 @@ static struct iwl_base_params iwl6050_base_params = {
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
+       .shadow_reg_enable = true,
 };
 static struct iwl_base_params iwl6000_coex_base_params = {
        .eeprom_size = OTP_LOW_IMAGE_SIZE,
@@ -530,6 +534,7 @@ static struct iwl_base_params iwl6000_coex_base_params = {
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
+       .shadow_reg_enable = true,
 };
 
 static struct iwl_ht_params iwl6000_ht_params = {
@@ -842,8 +847,8 @@ struct iwl_cfg iwl6000_3agn_cfg = {
 struct iwl_cfg iwl130_bgn_cfg = {
        .name = "Intel(R) 130 Series 1x1 BGN",
        .fw_name_pre = IWL6000G2B_FW_PRE,
-       .ucode_api_max = IWL130_UCODE_API_MAX,
-       .ucode_api_min = IWL130_UCODE_API_MIN,
+       .ucode_api_max = IWL6000G2_UCODE_API_MAX,
+       .ucode_api_min = IWL6000G2_UCODE_API_MIN,
        .sku = IWL_SKU_G|IWL_SKU_N,
        .valid_tx_ant = ANT_A,
        .valid_rx_ant = ANT_A,
@@ -862,8 +867,8 @@ struct iwl_cfg iwl130_bgn_cfg = {
 struct iwl_cfg iwl130_bg_cfg = {
        .name = "Intel(R) 130 Series 1x2 BG",
        .fw_name_pre = IWL6000G2B_FW_PRE,
-       .ucode_api_max = IWL130_UCODE_API_MAX,
-       .ucode_api_min = IWL130_UCODE_API_MIN,
+       .ucode_api_max = IWL6000G2_UCODE_API_MAX,
+       .ucode_api_min = IWL6000G2_UCODE_API_MIN,
        .sku = IWL_SKU_G,
        .valid_tx_ant = ANT_A,
        .valid_rx_ant = ANT_A,
@@ -882,4 +887,3 @@ MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL130_MODULE_FIRMWARE(IWL130_UCODE_API_MAX));
index e2019e756936e2280fa8f1365fe31ca0bb295a51..d16bb5ede01474d9b30631b1ee769890aa257a8f 100644 (file)
@@ -732,8 +732,122 @@ static inline u8 find_first_chain(u8 mask)
        return CHAIN_C;
 }
 
+/**
+ * Run disconnected antenna algorithm to find out which antennas are
+ * disconnected.
+ */
+static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
+                                    struct iwl_chain_noise_data *data)
+{
+       u32 active_chains = 0;
+       u32 max_average_sig;
+       u16 max_average_sig_antenna_i;
+       u8 num_tx_chains;
+       u8 first_chain;
+       u16 i = 0;
+
+       average_sig[0] = data->chain_signal_a /
+                        priv->cfg->base_params->chain_noise_num_beacons;
+       average_sig[1] = data->chain_signal_b /
+                        priv->cfg->base_params->chain_noise_num_beacons;
+       average_sig[2] = data->chain_signal_c /
+                        priv->cfg->base_params->chain_noise_num_beacons;
+
+       if (average_sig[0] >= average_sig[1]) {
+               max_average_sig = average_sig[0];
+               max_average_sig_antenna_i = 0;
+               active_chains = (1 << max_average_sig_antenna_i);
+       } else {
+               max_average_sig = average_sig[1];
+               max_average_sig_antenna_i = 1;
+               active_chains = (1 << max_average_sig_antenna_i);
+       }
+
+       if (average_sig[2] >= max_average_sig) {
+               max_average_sig = average_sig[2];
+               max_average_sig_antenna_i = 2;
+               active_chains = (1 << max_average_sig_antenna_i);
+       }
+
+       IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
+                    average_sig[0], average_sig[1], average_sig[2]);
+       IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
+                    max_average_sig, max_average_sig_antenna_i);
+
+       /* Compare signal strengths for all 3 receivers. */
+       for (i = 0; i < NUM_RX_CHAINS; i++) {
+               if (i != max_average_sig_antenna_i) {
+                       s32 rssi_delta = (max_average_sig - average_sig[i]);
+
+                       /* If signal is very weak, compared with
+                        * strongest, mark it as disconnected. */
+                       if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
+                               data->disconn_array[i] = 1;
+                       else
+                               active_chains |= (1 << i);
+                       IWL_DEBUG_CALIB(priv, "i = %d  rssiDelta = %d  "
+                            "disconn_array[i] = %d\n",
+                            i, rssi_delta, data->disconn_array[i]);
+               }
+       }
+
+       /*
+        * The above algorithm sometimes fails when the ucode
+        * reports 0 for all chains. It's not clear why that
+        * happens to start with, but it is then causing trouble
+        * because this can make us enable more chains than the
+        * hardware really has.
+        *
+        * To be safe, simply mask out any chains that we know
+        * are not on the device.
+        */
+       active_chains &= priv->hw_params.valid_rx_ant;
+
+       num_tx_chains = 0;
+       for (i = 0; i < NUM_RX_CHAINS; i++) {
+               /* loops on all the bits of
+                * priv->hw_setting.valid_tx_ant */
+               u8 ant_msk = (1 << i);
+               if (!(priv->hw_params.valid_tx_ant & ant_msk))
+                       continue;
+
+               num_tx_chains++;
+               if (data->disconn_array[i] == 0)
+                       /* there is a Tx antenna connected */
+                       break;
+               if (num_tx_chains == priv->hw_params.tx_chains_num &&
+                   data->disconn_array[i]) {
+                       /*
+                        * If all chains are disconnected
+                        * connect the first valid tx chain
+                        */
+                       first_chain =
+                               find_first_chain(priv->cfg->valid_tx_ant);
+                       data->disconn_array[first_chain] = 0;
+                       active_chains |= BIT(first_chain);
+                       IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected \
+                                       W/A - declare %d as connected\n",
+                                       first_chain);
+                       break;
+               }
+       }
+
+       if (active_chains != priv->hw_params.valid_rx_ant &&
+           active_chains != priv->chain_noise_data.active_chains)
+               IWL_DEBUG_CALIB(priv,
+                               "Detected that not all antennas are connected! "
+                               "Connected: %#x, valid: %#x.\n",
+                               active_chains, priv->hw_params.valid_rx_ant);
+
+       /* Save for use within RXON, TX, SCAN commands, etc. */
+       data->active_chains = active_chains;
+       IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
+                       active_chains);
+}
+
+
 /*
- * Accumulate 20 beacons of signal and noise statistics for each of
+ * Accumulate 16 beacons of signal and noise statistics for each of
  *   3 receivers/antennas/rx-chains, then figure out:
  * 1)  Which antennas are connected.
  * 2)  Differential rx gain settings to balance the 3 receivers.
@@ -750,8 +864,6 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
        u32 chain_sig_c;
        u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
        u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
-       u32 max_average_sig;
-       u16 max_average_sig_antenna_i;
        u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
        u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
        u16 i = 0;
@@ -759,11 +871,9 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
        u16 stat_chnum = INITIALIZATION_VALUE;
        u8 rxon_band24;
        u8 stat_band24;
-       u32 active_chains = 0;
-       u8 num_tx_chains;
        unsigned long flags;
        struct statistics_rx_non_phy *rx_info;
-       u8 first_chain;
+
        /*
         * MULTI-FIXME:
         * When we support multiple interfaces on different channels,
@@ -869,108 +979,16 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
                return;
 
        /* Analyze signal for disconnected antenna */
-       average_sig[0] = data->chain_signal_a /
-                        priv->cfg->base_params->chain_noise_num_beacons;
-       average_sig[1] = data->chain_signal_b /
-                        priv->cfg->base_params->chain_noise_num_beacons;
-       average_sig[2] = data->chain_signal_c /
-                        priv->cfg->base_params->chain_noise_num_beacons;
-
-       if (average_sig[0] >= average_sig[1]) {
-               max_average_sig = average_sig[0];
-               max_average_sig_antenna_i = 0;
-               active_chains = (1 << max_average_sig_antenna_i);
-       } else {
-               max_average_sig = average_sig[1];
-               max_average_sig_antenna_i = 1;
-               active_chains = (1 << max_average_sig_antenna_i);
-       }
-
-       if (average_sig[2] >= max_average_sig) {
-               max_average_sig = average_sig[2];
-               max_average_sig_antenna_i = 2;
-               active_chains = (1 << max_average_sig_antenna_i);
-       }
-
-       IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
-                    average_sig[0], average_sig[1], average_sig[2]);
-       IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
-                    max_average_sig, max_average_sig_antenna_i);
-
-       /* Compare signal strengths for all 3 receivers. */
-       for (i = 0; i < NUM_RX_CHAINS; i++) {
-               if (i != max_average_sig_antenna_i) {
-                       s32 rssi_delta = (max_average_sig - average_sig[i]);
-
-                       /* If signal is very weak, compared with
-                        * strongest, mark it as disconnected. */
-                       if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
-                               data->disconn_array[i] = 1;
-                       else
-                               active_chains |= (1 << i);
-                       IWL_DEBUG_CALIB(priv, "i = %d  rssiDelta = %d  "
-                            "disconn_array[i] = %d\n",
-                            i, rssi_delta, data->disconn_array[i]);
-               }
-       }
-
-       /*
-        * The above algorithm sometimes fails when the ucode
-        * reports 0 for all chains. It's not clear why that
-        * happens to start with, but it is then causing trouble
-        * because this can make us enable more chains than the
-        * hardware really has.
-        *
-        * To be safe, simply mask out any chains that we know
-        * are not on the device.
-        */
        if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist &&
-           priv->bt_full_concurrent) {
-               /* operated as 1x1 in full concurrency mode */
-               active_chains &= first_antenna(priv->hw_params.valid_rx_ant);
+           priv->cfg->bt_params->advanced_bt_coexist) {
+               /* Disable disconnected antenna algorithm for advanced
+                  bt coex, assuming valid antennas are connected */
+               data->active_chains = priv->hw_params.valid_rx_ant;
+               for (i = 0; i < NUM_RX_CHAINS; i++)
+                       if (!(data->active_chains & (1<<i)))
+                               data->disconn_array[i] = 1;
        } else
-               active_chains &= priv->hw_params.valid_rx_ant;
-
-       num_tx_chains = 0;
-       for (i = 0; i < NUM_RX_CHAINS; i++) {
-               /* loops on all the bits of
-                * priv->hw_setting.valid_tx_ant */
-               u8 ant_msk = (1 << i);
-               if (!(priv->hw_params.valid_tx_ant & ant_msk))
-                       continue;
-
-               num_tx_chains++;
-               if (data->disconn_array[i] == 0)
-                       /* there is a Tx antenna connected */
-                       break;
-               if (num_tx_chains == priv->hw_params.tx_chains_num &&
-                   data->disconn_array[i]) {
-                       /*
-                        * If all chains are disconnected
-                        * connect the first valid tx chain
-                        */
-                       first_chain =
-                               find_first_chain(priv->cfg->valid_tx_ant);
-                       data->disconn_array[first_chain] = 0;
-                       active_chains |= BIT(first_chain);
-                       IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected W/A - declare %d as connected\n",
-                                       first_chain);
-                       break;
-               }
-       }
-
-       if (active_chains != priv->hw_params.valid_rx_ant &&
-           active_chains != priv->chain_noise_data.active_chains)
-               IWL_DEBUG_CALIB(priv,
-                               "Detected that not all antennas are connected! "
-                               "Connected: %#x, valid: %#x.\n",
-                               active_chains, priv->hw_params.valid_rx_ant);
-
-       /* Save for use within RXON, TX, SCAN commands, etc. */
-       priv->chain_noise_data.active_chains = active_chains;
-       IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
-                       active_chains);
+               iwl_find_disconn_antenna(priv, average_sig, data);
 
        /* Analyze noise for rx balance */
        average_noise[0] = data->chain_noise_a /
index b555edd533547e3bf98eed8e948a276ef141aace..ca3530c4295a2c285118047512c2e743b633bf0f 100644 (file)
@@ -496,6 +496,10 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
        struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
        u8 tx_ant_cfg_cmd;
 
+       if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
+                     "TX Power requested while scanning!\n"))
+               return -EAGAIN;
+
        /* half dBm need to multiply */
        tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
 
@@ -522,9 +526,8 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
        else
                tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
 
-       return  iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
-                                      sizeof(tx_power_cmd), &tx_power_cmd,
-                                      NULL);
+       return iwl_send_cmd_pdu(priv, tx_ant_cfg_cmd, sizeof(tx_power_cmd),
+                               &tx_power_cmd);
 }
 
 void iwlagn_temperature(struct iwl_priv *priv)
@@ -750,6 +753,12 @@ int iwlagn_hw_nic_init(struct iwl_priv *priv)
        } else
                iwlagn_txq_ctx_reset(priv);
 
+       if (priv->cfg->base_params->shadow_reg_enable) {
+               /* enable shadow regs in HW */
+               iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
+                       0x800FFFFF);
+       }
+
        set_bit(STATUS_INIT, &priv->status);
 
        return 0;
@@ -1584,22 +1593,6 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        return ret;
 }
 
-void iwlagn_post_scan(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx;
-
-       /*
-        * Since setting the RXON may have been deferred while
-        * performing the scan, fire one off if needed
-        */
-       for_each_context(priv, ctx)
-               if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
-                       iwlagn_commit_rxon(priv, ctx);
-
-       if (priv->cfg->ops->hcmd->set_pan_params)
-               priv->cfg->ops->hcmd->set_pan_params(priv);
-}
-
 int iwlagn_manage_ibss_station(struct iwl_priv *priv,
                               struct ieee80211_vif *vif, bool add)
 {
@@ -1884,12 +1877,20 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
        struct iwl_rxon_context *ctx;
        int smps_request = -1;
 
+       /*
+        * Note: bt_traffic_load can be overridden by scan complete and
+        * coex profile notifications. Ignore that since only bad consequence
+        * can be not matching debug print with actual state.
+        */
        IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n",
                       priv->bt_traffic_load);
 
        switch (priv->bt_traffic_load) {
        case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
-               smps_request = IEEE80211_SMPS_AUTOMATIC;
+               if (priv->bt_status)
+                       smps_request = IEEE80211_SMPS_DYNAMIC;
+               else
+                       smps_request = IEEE80211_SMPS_AUTOMATIC;
                break;
        case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
                smps_request = IEEE80211_SMPS_DYNAMIC;
@@ -1906,6 +1907,16 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
 
        mutex_lock(&priv->mutex);
 
+       /*
+        * We can not send command to firmware while scanning. When the scan
+        * complete we will schedule this work again. We do check with mutex
+        * locked to prevent new scan request to arrive. We do not check
+        * STATUS_SCANNING to avoid race when queue_work two times from
+        * different notifications, but quit and not perform any work at all.
+        */
+       if (test_bit(STATUS_SCAN_HW, &priv->status))
+               goto out;
+
        if (priv->cfg->ops->lib->update_chain_flags)
                priv->cfg->ops->lib->update_chain_flags(priv);
 
@@ -1915,7 +1926,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
                                ieee80211_request_smps(ctx->vif, smps_request);
                }
        }
-
+out:
        mutex_unlock(&priv->mutex);
 }
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
new file mode 100644 (file)
index 0000000..2d927a9
--- /dev/null
@@ -0,0 +1,619 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include "iwl-dev.h"
+#include "iwl-agn.h"
+#include "iwl-sta.h"
+#include "iwl-core.h"
+#include "iwl-agn-calib.h"
+
+static int iwlagn_disable_bss(struct iwl_priv *priv,
+                             struct iwl_rxon_context *ctx,
+                             struct iwl_rxon_cmd *send)
+{
+       __le32 old_filter = send->filter_flags;
+       int ret;
+
+       send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
+
+       send->filter_flags = old_filter;
+
+       if (ret)
+               IWL_ERR(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
+
+       return ret;
+}
+
+static int iwlagn_disable_pan(struct iwl_priv *priv,
+                             struct iwl_rxon_context *ctx,
+                             struct iwl_rxon_cmd *send)
+{
+       __le32 old_filter = send->filter_flags;
+       u8 old_dev_type = send->dev_type;
+       int ret;
+
+       send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       send->dev_type = RXON_DEV_TYPE_P2P;
+       ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
+
+       send->filter_flags = old_filter;
+       send->dev_type = old_dev_type;
+
+       if (ret)
+               IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
+
+       /* FIXME: WAIT FOR PAN DISABLE */
+       msleep(300);
+
+       return ret;
+}
+
+static int iwlagn_update_beacon(struct iwl_priv *priv,
+                               struct ieee80211_vif *vif)
+{
+       lockdep_assert_held(&priv->mutex);
+
+       dev_kfree_skb(priv->beacon_skb);
+       priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
+       if (!priv->beacon_skb)
+               return -ENOMEM;
+       return iwlagn_send_beacon_cmd(priv);
+}
+
+/**
+ * iwlagn_commit_rxon - commit staging_rxon to hardware
+ *
+ * The RXON command in staging_rxon is committed to the hardware and
+ * the active_rxon structure is updated with the new data.  This
+ * function correctly transitions out of the RXON_ASSOC_MSK state if
+ * a HW tune is required based on the RXON structure changes.
+ */
+int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       /* cast away the const for active_rxon in this function */
+       struct iwl_rxon_cmd *active = (void *)&ctx->active;
+       bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
+       int ret;
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (!iwl_is_alive(priv))
+               return -EBUSY;
+
+       /* This function hardcodes a bunch of dual-mode assumptions */
+       BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+
+       if (!ctx->is_active)
+               return 0;
+
+       /* always get timestamp with Rx frame */
+       ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+
+       if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
+           !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
+               ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+       else
+               ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+       ret = iwl_check_rxon_cmd(priv, ctx);
+       if (ret) {
+               IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
+               return -EINVAL;
+       }
+
+       /*
+        * receive commit_rxon request
+        * abort any previous channel switch if still in process
+        */
+       if (priv->switch_rxon.switch_in_progress &&
+           (priv->switch_rxon.channel != ctx->staging.channel)) {
+               IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
+                     le16_to_cpu(priv->switch_rxon.channel));
+               iwl_chswitch_done(priv, false);
+       }
+
+       /*
+        * If we don't need to send a full RXON, we can use
+        * iwl_rxon_assoc_cmd which is used to reconfigure filter
+        * and other flags for the current radio configuration.
+        */
+       if (!iwl_full_rxon_required(priv, ctx)) {
+               ret = iwl_send_rxon_assoc(priv, ctx);
+               if (ret) {
+                       IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
+                       return ret;
+               }
+
+               memcpy(active, &ctx->staging, sizeof(*active));
+               iwl_print_rx_config_cmd(priv, ctx);
+               return 0;
+       }
+
+       if (priv->cfg->ops->hcmd->set_pan_params) {
+               ret = priv->cfg->ops->hcmd->set_pan_params(priv);
+               if (ret)
+                       return ret;
+       }
+
+       iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
+
+       IWL_DEBUG_INFO(priv,
+                      "Going to commit RXON\n"
+                      "  * with%s RXON_FILTER_ASSOC_MSK\n"
+                      "  * channel = %d\n"
+                      "  * bssid = %pM\n",
+                      (new_assoc ? "" : "out"),
+                      le16_to_cpu(ctx->staging.channel),
+                      ctx->staging.bssid_addr);
+
+       /*
+        * Always clear associated first, but with the correct config.
+        * This is required as for example station addition for the
+        * AP station must be done after the BSSID is set to correctly
+        * set up filters in the device.
+        */
+       if (ctx->ctxid == IWL_RXON_CTX_BSS)
+               ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
+       else
+               ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
+       if (ret)
+               return ret;
+
+       memcpy(active, &ctx->staging, sizeof(*active));
+
+       /*
+        * Un-assoc RXON clears the station table and WEP
+        * keys, so we have to restore those afterwards.
+        */
+       iwl_clear_ucode_stations(priv, ctx);
+       iwl_restore_stations(priv, ctx);
+       ret = iwl_restore_default_wep_keys(priv, ctx);
+       if (ret) {
+               IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+               return ret;
+       }
+
+       /* RXON timing must be before associated RXON */
+       ret = iwl_send_rxon_timing(priv, ctx);
+       if (ret) {
+               IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
+               return ret;
+       }
+
+       if (new_assoc) {
+               /*
+                * We'll run into this code path when beaconing is
+                * enabled, but then we also need to send the beacon
+                * to the device.
+                */
+               if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
+                       ret = iwlagn_update_beacon(priv, ctx->vif);
+                       if (ret) {
+                               IWL_ERR(priv,
+                                       "Error sending required beacon (%d)!\n",
+                                       ret);
+                               return ret;
+                       }
+               }
+
+               priv->start_calib = 0;
+               /*
+                * Apply the new configuration.
+                *
+                * Associated RXON doesn't clear the station table in uCode,
+                * so we don't need to restore stations etc. after this.
+                */
+               ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
+                             sizeof(struct iwl_rxon_cmd), &ctx->staging);
+               if (ret) {
+                       IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
+                       return ret;
+               }
+               memcpy(active, &ctx->staging, sizeof(*active));
+
+               /* IBSS beacon needs to be sent after setting assoc */
+               if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
+                       if (iwlagn_update_beacon(priv, ctx->vif))
+                               IWL_ERR(priv, "Error sending IBSS beacon\n");
+       }
+
+       iwl_print_rx_config_cmd(priv, ctx);
+
+       iwl_init_sensitivity(priv);
+
+       /*
+        * If we issue a new RXON command which required a tune then we must
+        * send a new TXPOWER command or we won't be able to Tx any frames.
+        *
+        * FIXME: which RXON requires a tune? Can we optimise this out in
+        *        some cases?
+        */
+       ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
+       if (ret) {
+               IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void iwlagn_update_qos(struct iwl_priv *priv,
+                             struct iwl_rxon_context *ctx)
+{
+       int ret;
+
+       if (!ctx->is_active)
+               return;
+
+       ctx->qos_data.def_qos_parm.qos_flags = 0;
+
+       if (ctx->qos_data.qos_active)
+               ctx->qos_data.def_qos_parm.qos_flags |=
+                       QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+       if (ctx->ht.enabled)
+               ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+       IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+                     ctx->qos_data.qos_active,
+                     ctx->qos_data.def_qos_parm.qos_flags);
+
+       ret = iwl_send_cmd_pdu(priv, ctx->qos_cmd,
+                              sizeof(struct iwl_qosparam_cmd),
+                              &ctx->qos_data.def_qos_parm);
+       if (ret)
+               IWL_ERR(priv, "Failed to update QoS\n");
+}
+
+int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx;
+       struct ieee80211_conf *conf = &hw->conf;
+       struct ieee80211_channel *channel = conf->channel;
+       const struct iwl_channel_info *ch_info;
+       int ret = 0;
+       bool ht_changed[NUM_IWL_RXON_CTX] = {};
+
+       IWL_DEBUG_MAC80211(priv, "changed %#x", changed);
+
+       mutex_lock(&priv->mutex);
+
+       if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
+               IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
+               goto out;
+       }
+
+       if (!iwl_is_ready(priv)) {
+               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+               goto out;
+       }
+
+       if (changed & (IEEE80211_CONF_CHANGE_SMPS |
+                      IEEE80211_CONF_CHANGE_CHANNEL)) {
+               /* mac80211 uses static for non-HT which is what we want */
+               priv->current_ht_config.smps = conf->smps_mode;
+
+               /*
+                * Recalculate chain counts.
+                *
+                * If monitor mode is enabled then mac80211 will
+                * set up the SM PS mode to OFF if an HT channel is
+                * configured.
+                */
+               if (priv->cfg->ops->hcmd->set_rxon_chain)
+                       for_each_context(priv, ctx)
+                               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               unsigned long flags;
+
+               ch_info = iwl_get_channel_info(priv, channel->band,
+                                              channel->hw_value);
+               if (!is_channel_valid(ch_info)) {
+                       IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               spin_lock_irqsave(&priv->lock, flags);
+
+               for_each_context(priv, ctx) {
+                       /* Configure HT40 channels */
+                       if (ctx->ht.enabled != conf_is_ht(conf)) {
+                               ctx->ht.enabled = conf_is_ht(conf);
+                               ht_changed[ctx->ctxid] = true;
+                       }
+
+                       if (ctx->ht.enabled) {
+                               if (conf_is_ht40_minus(conf)) {
+                                       ctx->ht.extension_chan_offset =
+                                               IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+                                       ctx->ht.is_40mhz = true;
+                               } else if (conf_is_ht40_plus(conf)) {
+                                       ctx->ht.extension_chan_offset =
+                                               IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+                                       ctx->ht.is_40mhz = true;
+                               } else {
+                                       ctx->ht.extension_chan_offset =
+                                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
+                                       ctx->ht.is_40mhz = false;
+                               }
+                       } else
+                               ctx->ht.is_40mhz = false;
+
+                       /*
+                        * Default to no protection. Protection mode will
+                        * later be set from BSS config in iwl_ht_conf
+                        */
+                       ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+                       /* if we are switching from ht to 2.4 clear flags
+                        * from any ht related info since 2.4 does not
+                        * support ht */
+                       if (le16_to_cpu(ctx->staging.channel) !=
+                           channel->hw_value)
+                               ctx->staging.flags = 0;
+
+                       iwl_set_rxon_channel(priv, channel, ctx);
+                       iwl_set_rxon_ht(priv, &priv->current_ht_config);
+
+                       iwl_set_flags_for_band(priv, ctx, channel->band,
+                                              ctx->vif);
+               }
+
+               spin_unlock_irqrestore(&priv->lock, flags);
+
+               iwl_update_bcast_stations(priv);
+
+               /*
+                * The list of supported rates and rate mask can be different
+                * for each band; since the band may have changed, reset
+                * the rate mask to what mac80211 lists.
+                */
+               iwl_set_rate(priv);
+       }
+
+       if (changed & (IEEE80211_CONF_CHANGE_PS |
+                       IEEE80211_CONF_CHANGE_IDLE)) {
+               ret = iwl_power_update_mode(priv, false);
+               if (ret)
+                       IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_POWER) {
+               IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
+                       priv->tx_power_user_lmt, conf->power_level);
+
+               iwl_set_tx_power(priv, conf->power_level, false);
+       }
+
+       for_each_context(priv, ctx) {
+               if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+                       continue;
+               iwlagn_commit_rxon(priv, ctx);
+               if (ht_changed[ctx->ctxid])
+                       iwlagn_update_qos(priv, ctx);
+       }
+ out:
+       mutex_unlock(&priv->mutex);
+       return ret;
+}
+
+static void iwlagn_check_needed_chains(struct iwl_priv *priv,
+                                      struct iwl_rxon_context *ctx,
+                                      struct ieee80211_bss_conf *bss_conf)
+{
+       struct ieee80211_vif *vif = ctx->vif;
+       struct iwl_rxon_context *tmp;
+       struct ieee80211_sta *sta;
+       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+       bool need_multiple;
+
+       lockdep_assert_held(&priv->mutex);
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               rcu_read_lock();
+               sta = ieee80211_find_sta(vif, bss_conf->bssid);
+               if (sta) {
+                       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+                       int maxstreams;
+
+                       maxstreams = (ht_cap->mcs.tx_params &
+                                     IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+                                       >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+                       maxstreams += 1;
+
+                       need_multiple = true;
+
+                       if ((ht_cap->mcs.rx_mask[1] == 0) &&
+                           (ht_cap->mcs.rx_mask[2] == 0))
+                               need_multiple = false;
+                       if (maxstreams <= 1)
+                               need_multiple = false;
+               } else {
+                       /*
+                        * If at all, this can only happen through a race
+                        * when the AP disconnects us while we're still
+                        * setting up the connection, in that case mac80211
+                        * will soon tell us about that.
+                        */
+                       need_multiple = false;
+               }
+               rcu_read_unlock();
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               /* currently */
+               need_multiple = false;
+               break;
+       default:
+               /* only AP really */
+               need_multiple = true;
+               break;
+       }
+
+       ctx->ht_need_multiple_chains = need_multiple;
+
+       if (!need_multiple) {
+               /* check all contexts */
+               for_each_context(priv, tmp) {
+                       if (!tmp->vif)
+                               continue;
+                       if (tmp->ht_need_multiple_chains) {
+                               need_multiple = true;
+                               break;
+                       }
+               }
+       }
+
+       ht_conf->single_chain_sufficient = !need_multiple;
+}
+
+void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_bss_conf *bss_conf,
+                            u32 changes)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+       int ret;
+       bool force = false;
+
+       mutex_lock(&priv->mutex);
+
+       if (changes & BSS_CHANGED_BEACON_INT)
+               force = true;
+
+       if (changes & BSS_CHANGED_QOS) {
+               ctx->qos_data.qos_active = bss_conf->qos;
+               iwlagn_update_qos(priv, ctx);
+       }
+
+       ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+       if (vif->bss_conf.use_short_preamble)
+               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+       else
+               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+       if (changes & BSS_CHANGED_ASSOC) {
+               if (bss_conf->assoc) {
+                       iwl_led_associate(priv);
+                       priv->timestamp = bss_conf->timestamp;
+                       ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+               } else {
+                       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+                       iwl_led_disassociate(priv);
+               }
+       }
+
+       if (ctx->ht.enabled) {
+               ctx->ht.protection = bss_conf->ht_operation_mode &
+                                       IEEE80211_HT_OP_MODE_PROTECTION;
+               ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode &
+                                       IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+               iwlagn_check_needed_chains(priv, ctx, bss_conf);
+               iwl_set_rxon_ht(priv, &priv->current_ht_config);
+       }
+
+       if (priv->cfg->ops->hcmd->set_rxon_chain)
+               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+       if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
+               ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+       else
+               ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+
+       if (bss_conf->use_cts_prot)
+               ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+       else
+               ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+
+       memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
+
+       if (vif->type == NL80211_IFTYPE_AP ||
+           vif->type == NL80211_IFTYPE_ADHOC) {
+               if (vif->bss_conf.enable_beacon) {
+                       ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+                       priv->beacon_ctx = ctx;
+               } else {
+                       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+                       priv->beacon_ctx = NULL;
+               }
+       }
+
+       if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+               iwlagn_commit_rxon(priv, ctx);
+
+       if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
+               /*
+                * The chain noise calibration will enable PM upon
+                * completion. If calibration has already been run
+                * then we need to enable power management here.
+                */
+               if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
+                       iwl_power_update_mode(priv, false);
+
+               /* Enable RX differential gain and sensitivity calibrations */
+               iwl_chain_noise_reset(priv);
+               priv->start_calib = 1;
+       }
+
+       if (changes & BSS_CHANGED_IBSS) {
+               ret = iwlagn_manage_ibss_station(priv, vif,
+                                                bss_conf->ibss_joined);
+               if (ret)
+                       IWL_ERR(priv, "failed to %s IBSS station %pM\n",
+                               bss_conf->ibss_joined ? "add" : "remove",
+                               bss_conf->bssid);
+       }
+
+       if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_ADHOC &&
+           priv->beacon_ctx) {
+               if (iwlagn_update_beacon(priv, vif))
+                       IWL_ERR(priv, "Error sending IBSS beacon\n");
+       }
+
+       mutex_unlock(&priv->mutex);
+}
+
+void iwlagn_post_scan(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx;
+
+       /*
+        * Since setting the RXON may have been deferred while
+        * performing the scan, fire one off if needed
+        */
+       for_each_context(priv, ctx)
+               if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+                       iwlagn_commit_rxon(priv, ctx);
+
+       if (priv->cfg->ops->hcmd->set_pan_params)
+               priv->cfg->ops->hcmd->set_pan_params(priv);
+}
index 35a30d2e07348e0740e5e538dc42bb3bd32ffefb..35f085ac336b02ca6014b56c3e7ca16678e33a9b 100644 (file)
@@ -684,7 +684,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
        return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 }
 
-void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
+static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
 {
        unsigned long flags;
 
@@ -714,3 +714,33 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
        spin_unlock_irqrestore(&priv->sta_lock, flags);
 
 }
+
+void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
+                          struct ieee80211_vif *vif,
+                          enum sta_notify_cmd cmd,
+                          struct ieee80211_sta *sta)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       int sta_id;
+
+       switch (cmd) {
+       case STA_NOTIFY_SLEEP:
+               WARN_ON(!sta_priv->client);
+               sta_priv->asleep = true;
+               if (atomic_read(&sta_priv->pending_frames) > 0)
+                       ieee80211_sta_block_awake(hw, sta, true);
+               break;
+       case STA_NOTIFY_AWAKE:
+               WARN_ON(!sta_priv->client);
+               if (!sta_priv->asleep)
+                       break;
+               sta_priv->asleep = false;
+               sta_id = iwl_sta_id(sta);
+               if (sta_id != IWL_INVALID_STATION)
+                       iwl_sta_modify_ps_wake(priv, sta_id);
+               break;
+       default:
+               break;
+       }
+}
index c2636a7ab9eed9840f1fe0ec0aa466ec610aee57..007fb20d78ab6c5beab4179713c2a699ea116208 100644 (file)
@@ -90,170 +90,6 @@ MODULE_ALIAS("iwl4965");
 static int iwlagn_ant_coupling;
 static bool iwlagn_bt_ch_announce = 1;
 
-/**
- * iwlagn_commit_rxon - commit staging_rxon to hardware
- *
- * The RXON command in staging_rxon is committed to the hardware and
- * the active_rxon structure is updated with the new data.  This
- * function correctly transitions out of the RXON_ASSOC_MSK state if
- * a HW tune is required based on the RXON structure changes.
- */
-int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-       /* cast away the const for active_rxon in this function */
-       struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
-       int ret;
-       bool new_assoc =
-               !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
-       bool old_assoc = !!(ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK);
-
-       if (!iwl_is_alive(priv))
-               return -EBUSY;
-
-       if (!ctx->is_active)
-               return 0;
-
-       /* always get timestamp with Rx frame */
-       ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
-
-       ret = iwl_check_rxon_cmd(priv, ctx);
-       if (ret) {
-               IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
-               return -EINVAL;
-       }
-
-       /*
-        * receive commit_rxon request
-        * abort any previous channel switch if still in process
-        */
-       if (priv->switch_rxon.switch_in_progress &&
-           (priv->switch_rxon.channel != ctx->staging.channel)) {
-               IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
-                     le16_to_cpu(priv->switch_rxon.channel));
-               iwl_chswitch_done(priv, false);
-       }
-
-       /* If we don't need to send a full RXON, we can use
-        * iwl_rxon_assoc_cmd which is used to reconfigure filter
-        * and other flags for the current radio configuration. */
-       if (!iwl_full_rxon_required(priv, ctx)) {
-               ret = iwl_send_rxon_assoc(priv, ctx);
-               if (ret) {
-                       IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
-                       return ret;
-               }
-
-               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
-               iwl_print_rx_config_cmd(priv, ctx);
-               return 0;
-       }
-
-       /* If we are currently associated and the new config requires
-        * an RXON_ASSOC and the new config wants the associated mask enabled,
-        * we must clear the associated from the active configuration
-        * before we apply the new config */
-       if (iwl_is_associated_ctx(ctx) && new_assoc) {
-               IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
-               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
-               ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
-                                      sizeof(struct iwl_rxon_cmd),
-                                      active_rxon);
-
-               /* If the mask clearing failed then we set
-                * active_rxon back to what it was previously */
-               if (ret) {
-                       active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
-                       IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
-                       return ret;
-               }
-               iwl_clear_ucode_stations(priv, ctx);
-               iwl_restore_stations(priv, ctx);
-               ret = iwl_restore_default_wep_keys(priv, ctx);
-               if (ret) {
-                       IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
-                       return ret;
-               }
-       }
-
-       IWL_DEBUG_INFO(priv, "Sending RXON\n"
-                      "* with%s RXON_FILTER_ASSOC_MSK\n"
-                      "* channel = %d\n"
-                      "* bssid = %pM\n",
-                      (new_assoc ? "" : "out"),
-                      le16_to_cpu(ctx->staging.channel),
-                      ctx->staging.bssid_addr);
-
-       iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
-
-       if (!old_assoc) {
-               /*
-                * First of all, before setting associated, we need to
-                * send RXON timing so the device knows about the DTIM
-                * period and other timing values
-                */
-               ret = iwl_send_rxon_timing(priv, ctx);
-               if (ret) {
-                       IWL_ERR(priv, "Error setting RXON timing!\n");
-                       return ret;
-               }
-       }
-
-       if (priv->cfg->ops->hcmd->set_pan_params) {
-               ret = priv->cfg->ops->hcmd->set_pan_params(priv);
-               if (ret)
-                       return ret;
-       }
-
-       /* Apply the new configuration
-        * RXON unassoc clears the station table in uCode so restoration of
-        * stations is needed after it (the RXON command) completes
-        */
-       if (!new_assoc) {
-               ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
-                             sizeof(struct iwl_rxon_cmd), &ctx->staging);
-               if (ret) {
-                       IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
-                       return ret;
-               }
-               IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
-               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
-               iwl_clear_ucode_stations(priv, ctx);
-               iwl_restore_stations(priv, ctx);
-               ret = iwl_restore_default_wep_keys(priv, ctx);
-               if (ret) {
-                       IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
-                       return ret;
-               }
-       }
-       if (new_assoc) {
-               priv->start_calib = 0;
-               /* Apply the new configuration
-                * RXON assoc doesn't clear the station table in uCode,
-                */
-               ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
-                             sizeof(struct iwl_rxon_cmd), &ctx->staging);
-               if (ret) {
-                       IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
-                       return ret;
-               }
-               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
-       }
-       iwl_print_rx_config_cmd(priv, ctx);
-
-       iwl_init_sensitivity(priv);
-
-       /* If we issue a new RXON command which required a tune then we must
-        * send a new TXPOWER command or we won't be able to Tx any frames */
-       ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
-       if (ret) {
-               IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
-               return ret;
-       }
-
-       return 0;
-}
-
 void iwl_update_chain_flags(struct iwl_priv *priv)
 {
        struct iwl_rxon_context *ctx;
@@ -411,7 +247,8 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
 
        return sizeof(*tx_beacon_cmd) + frame_size;
 }
-static int iwl_send_beacon_cmd(struct iwl_priv *priv)
+
+int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
 {
        struct iwl_frame *frame;
        unsigned int frame_size;
@@ -661,7 +498,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
 
        priv->beacon_skb = beacon;
 
-       iwl_send_beacon_cmd(priv);
+       iwlagn_send_beacon_cmd(priv);
  out:
        mutex_unlock(&priv->mutex);
 }
@@ -2978,7 +2815,8 @@ static void __iwl_down(struct iwl_priv *priv)
                                STATUS_EXIT_PENDING;
 
        /* device going down, Stop using ICT table */
-       iwl_disable_ict(priv);
+       if (priv->cfg->ops->lib->isr_ops.disable)
+               priv->cfg->ops->lib->isr_ops.disable(priv);
 
        iwlagn_txq_ctx_stop(priv);
        iwlagn_rxq_stop(priv);
@@ -3201,7 +3039,8 @@ static void iwl_bg_alive_start(struct work_struct *data)
                return;
 
        /* enable dram interrupt */
-       iwl_reset_ict(priv);
+       if (priv->cfg->ops->lib->isr_ops.reset)
+               priv->cfg->ops->lib->isr_ops.reset(priv);
 
        mutex_lock(&priv->mutex);
        iwl_alive_start(priv);
@@ -3309,92 +3148,6 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
        mutex_unlock(&priv->mutex);
 }
 
-#define IWL_DELAY_NEXT_SCAN (HZ*2)
-
-void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
-       struct iwl_rxon_context *ctx;
-       struct ieee80211_conf *conf = NULL;
-       int ret = 0;
-
-       if (!vif || !priv->is_open)
-               return;
-
-       ctx = iwl_rxon_ctx_from_vif(vif);
-
-       if (vif->type == NL80211_IFTYPE_AP) {
-               IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
-               return;
-       }
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       iwl_scan_cancel_timeout(priv, 200);
-
-       conf = ieee80211_get_hw_conf(priv->hw);
-
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       iwlcore_commit_rxon(priv, ctx);
-
-       ret = iwl_send_rxon_timing(priv, ctx);
-       if (ret)
-               IWL_WARN(priv, "RXON timing - "
-                           "Attempting to continue.\n");
-
-       ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
-       iwl_set_rxon_ht(priv, &priv->current_ht_config);
-
-       if (priv->cfg->ops->hcmd->set_rxon_chain)
-               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
-       ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
-
-       IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
-                       vif->bss_conf.aid, vif->bss_conf.beacon_int);
-
-       if (vif->bss_conf.use_short_preamble)
-               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-       else
-               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
-       if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-               if (vif->bss_conf.use_short_slot)
-                       ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-       }
-
-       iwlcore_commit_rxon(priv, ctx);
-
-       IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
-                       vif->bss_conf.aid, ctx->active.bssid_addr);
-
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               break;
-       case NL80211_IFTYPE_ADHOC:
-               iwl_send_beacon_cmd(priv);
-               break;
-       default:
-               IWL_ERR(priv, "%s Should not be called in %d mode\n",
-                         __func__, vif->type);
-               break;
-       }
-
-       /* the chain noise calibration will enabled PM upon completion
-        * If chain noise has already been run, then we need to enable
-        * power management here */
-       if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
-               iwl_power_update_mode(priv, false);
-
-       /* Enable Rx differential gain and sensitivity calibrations */
-       iwl_chain_noise_reset(priv);
-       priv->start_calib = 1;
-
-}
-
 /*****************************************************************************
  *
  * mac80211 entry point functions
@@ -3474,7 +3227,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
 }
 
 
-static int iwl_mac_start(struct ieee80211_hw *hw)
+int iwlagn_mac_start(struct ieee80211_hw *hw)
 {
        struct iwl_priv *priv = hw->priv;
        int ret;
@@ -3515,7 +3268,7 @@ out:
        return 0;
 }
 
-static void iwl_mac_stop(struct ieee80211_hw *hw)
+void iwlagn_mac_stop(struct ieee80211_hw *hw)
 {
        struct iwl_priv *priv = hw->priv;
 
@@ -3537,7 +3290,7 @@ static void iwl_mac_stop(struct ieee80211_hw *hw)
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
-static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct iwl_priv *priv = hw->priv;
 
@@ -3553,73 +3306,12 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        return NETDEV_TX_OK;
 }
 
-void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
-       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-       int ret = 0;
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       /* The following should be done only at AP bring up */
-       if (!iwl_is_associated_ctx(ctx)) {
-
-               /* RXON - unassoc (to set timing command) */
-               ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-               iwlcore_commit_rxon(priv, ctx);
-
-               /* RXON Timing */
-               ret = iwl_send_rxon_timing(priv, ctx);
-               if (ret)
-                       IWL_WARN(priv, "RXON timing failed - "
-                                       "Attempting to continue.\n");
-
-               /* AP has all antennas */
-               priv->chain_noise_data.active_chains =
-                       priv->hw_params.valid_rx_ant;
-               iwl_set_rxon_ht(priv, &priv->current_ht_config);
-               if (priv->cfg->ops->hcmd->set_rxon_chain)
-                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
-               ctx->staging.assoc_id = 0;
-
-               if (vif->bss_conf.use_short_preamble)
-                       ctx->staging.flags |=
-                               RXON_FLG_SHORT_PREAMBLE_MSK;
-               else
-                       ctx->staging.flags &=
-                               ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
-               if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-                       if (vif->bss_conf.use_short_slot)
-                               ctx->staging.flags |=
-                                       RXON_FLG_SHORT_SLOT_MSK;
-                       else
-                               ctx->staging.flags &=
-                                       ~RXON_FLG_SHORT_SLOT_MSK;
-               }
-               /* need to send beacon cmd before committing assoc RXON! */
-               iwl_send_beacon_cmd(priv);
-               /* restore RXON assoc */
-               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-               iwlcore_commit_rxon(priv, ctx);
-       }
-       iwl_send_beacon_cmd(priv);
-
-       /* FIXME - we need to add code here to detect a totally new
-        * configuration, reset the AP, unassoc, rxon timing, assoc,
-        * clear sta table, add BCAST sta... */
-}
-
-static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
-                                   struct ieee80211_vif *vif,
-                                   struct ieee80211_key_conf *keyconf,
-                                   struct ieee80211_sta *sta,
-                                   u32 iv32, u16 *phase1key)
+void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_key_conf *keyconf,
+                               struct ieee80211_sta *sta,
+                               u32 iv32, u16 *phase1key)
 {
-
        struct iwl_priv *priv = hw->priv;
        struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
 
@@ -3631,10 +3323,9 @@ static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
-static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-                          struct ieee80211_vif *vif,
-                          struct ieee80211_sta *sta,
-                          struct ieee80211_key_conf *key)
+int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                      struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                      struct ieee80211_key_conf *key)
 {
        struct iwl_priv *priv = hw->priv;
        struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -3701,10 +3392,10 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        return ret;
 }
 
-static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
-                               struct ieee80211_vif *vif,
-                               enum ieee80211_ampdu_mlme_action action,
-                               struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           enum ieee80211_ampdu_mlme_action action,
+                           struct ieee80211_sta *sta, u16 tid, u16 *ssn)
 {
        struct iwl_priv *priv = hw->priv;
        int ret = -EINVAL;
@@ -3785,39 +3476,9 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
        return ret;
 }
 
-static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
-                              struct ieee80211_vif *vif,
-                              enum sta_notify_cmd cmd,
-                              struct ieee80211_sta *sta)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-       int sta_id;
-
-       switch (cmd) {
-       case STA_NOTIFY_SLEEP:
-               WARN_ON(!sta_priv->client);
-               sta_priv->asleep = true;
-               if (atomic_read(&sta_priv->pending_frames) > 0)
-                       ieee80211_sta_block_awake(hw, sta, true);
-               break;
-       case STA_NOTIFY_AWAKE:
-               WARN_ON(!sta_priv->client);
-               if (!sta_priv->asleep)
-                       break;
-               sta_priv->asleep = false;
-               sta_id = iwl_sta_id(sta);
-               if (sta_id != IWL_INVALID_STATION)
-                       iwl_sta_modify_ps_wake(priv, sta_id);
-               break;
-       default:
-               break;
-       }
-}
-
-static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
-                             struct ieee80211_vif *vif,
-                             struct ieee80211_sta *sta)
+int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
+                      struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta)
 {
        struct iwl_priv *priv = hw->priv;
        struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
@@ -3858,8 +3519,8 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
        return 0;
 }
 
-static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
-                                  struct ieee80211_channel_switch *ch_switch)
+void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+                              struct ieee80211_channel_switch *ch_switch)
 {
        struct iwl_priv *priv = hw->priv;
        const struct iwl_channel_info *ch_info;
@@ -3956,10 +3617,10 @@ out_exit:
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
-static void iwlagn_configure_filter(struct ieee80211_hw *hw,
-                                   unsigned int changed_flags,
-                                   unsigned int *total_flags,
-                                   u64 multicast)
+void iwlagn_configure_filter(struct ieee80211_hw *hw,
+                            unsigned int changed_flags,
+                            unsigned int *total_flags,
+                            u64 multicast)
 {
        struct iwl_priv *priv = hw->priv;
        __le32 filter_or = 0, filter_nand = 0;
@@ -3986,7 +3647,11 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
        for_each_context(priv, ctx) {
                ctx->staging.filter_flags &= ~filter_nand;
                ctx->staging.filter_flags |= filter_or;
-               iwlcore_commit_rxon(priv, ctx);
+
+               /*
+                * Not committing directly because hardware can perform a scan,
+                * but we'll eventually commit the filter flags change anyway.
+                */
        }
 
        mutex_unlock(&priv->mutex);
@@ -4001,7 +3666,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
                        FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
 }
 
-static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop)
+void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
 {
        struct iwl_priv *priv = hw->priv;
 
@@ -4179,6 +3844,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
         * this value will get overwritten by channel max power avg
         * from eeprom */
        priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN;
+       priv->tx_power_next = IWLAGN_TX_POWER_TARGET_POWER_MIN;
 
        ret = iwl_init_channel_map(priv);
        if (ret) {
@@ -4209,28 +3875,30 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
        kfree(priv->scan_cmd);
 }
 
-static struct ieee80211_ops iwl_hw_ops = {
-       .tx = iwl_mac_tx,
-       .start = iwl_mac_start,
-       .stop = iwl_mac_stop,
+#ifdef CONFIG_IWL5000
+struct ieee80211_ops iwlagn_hw_ops = {
+       .tx = iwlagn_mac_tx,
+       .start = iwlagn_mac_start,
+       .stop = iwlagn_mac_stop,
        .add_interface = iwl_mac_add_interface,
        .remove_interface = iwl_mac_remove_interface,
-       .config = iwl_mac_config,
+       .change_interface = iwl_mac_change_interface,
+       .config = iwlagn_mac_config,
        .configure_filter = iwlagn_configure_filter,
-       .set_key = iwl_mac_set_key,
-       .update_tkip_key = iwl_mac_update_tkip_key,
+       .set_key = iwlagn_mac_set_key,
+       .update_tkip_key = iwlagn_mac_update_tkip_key,
        .conf_tx = iwl_mac_conf_tx,
-       .reset_tsf = iwl_mac_reset_tsf,
-       .bss_info_changed = iwl_bss_info_changed,
-       .ampdu_action = iwl_mac_ampdu_action,
+       .bss_info_changed = iwlagn_bss_info_changed,
+       .ampdu_action = iwlagn_mac_ampdu_action,
        .hw_scan = iwl_mac_hw_scan,
-       .sta_notify = iwl_mac_sta_notify,
+       .sta_notify = iwlagn_mac_sta_notify,
        .sta_add = iwlagn_mac_sta_add,
        .sta_remove = iwl_mac_sta_remove,
-       .channel_switch = iwl_mac_channel_switch,
-       .flush = iwl_mac_flush,
+       .channel_switch = iwlagn_mac_channel_switch,
+       .flush = iwlagn_mac_flush,
        .tx_last_beacon = iwl_mac_tx_last_beacon,
 };
+#endif
 
 static void iwl_hw_detect(struct iwl_priv *priv)
 {
@@ -4298,10 +3966,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (cfg->mod_params->disable_hw_scan) {
                dev_printk(KERN_DEBUG, &(pdev->dev),
                        "sw scan support is deprecated\n");
-               iwl_hw_ops.hw_scan = NULL;
+#ifdef CONFIG_IWL5000
+               iwlagn_hw_ops.hw_scan = NULL;
+#endif
+#ifdef CONFIG_IWL4965
+               iwl4965_hw_ops.hw_scan = NULL;
+#endif
        }
 
-       hw = iwl_alloc_all(cfg, &iwl_hw_ops);
+       hw = iwl_alloc_all(cfg);
        if (!hw) {
                err = -ENOMEM;
                goto out;
@@ -4333,6 +4006,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                BIT(NL80211_IFTYPE_ADHOC);
        priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
                BIT(NL80211_IFTYPE_STATION);
+       priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
        priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
        priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
        priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
@@ -4500,8 +4174,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_enable_msi(priv->pci_dev);
 
-       iwl_alloc_isr_ict(priv);
-       err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr,
+       if (priv->cfg->ops->lib->isr_ops.alloc)
+               priv->cfg->ops->lib->isr_ops.alloc(priv);
+
+       err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr,
                          IRQF_SHARED, DRV_NAME, priv);
        if (err) {
                IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -4548,7 +4224,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        destroy_workqueue(priv->workqueue);
        priv->workqueue = NULL;
        free_irq(priv->pci_dev->irq, priv);
-       iwl_free_isr_ict(priv);
+       if (priv->cfg->ops->lib->isr_ops.free)
+               priv->cfg->ops->lib->isr_ops.free(priv);
  out_disable_msi:
        pci_disable_msi(priv->pci_dev);
        iwl_uninit_drv(priv);
@@ -4643,7 +4320,8 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
 
        iwl_uninit_drv(priv);
 
-       iwl_free_isr_ict(priv);
+       if (priv->cfg->ops->lib->isr_ops.free)
+               priv->cfg->ops->lib->isr_ops.free(priv);
 
        dev_kfree_skb(priv->beacon_skb);
 
@@ -4735,13 +4413,6 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
 
 /* 6x00 Series Gen2a */
-       {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)},
-       {IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)},
-       {IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)},
-       {IWL_PCI_DEVICE(0x0082, 0x1206, iwl6000g2a_2abg_cfg)},
-       {IWL_PCI_DEVICE(0x0085, 0x1216, iwl6000g2a_2abg_cfg)},
-       {IWL_PCI_DEVICE(0x0082, 0x1226, iwl6000g2a_2abg_cfg)},
-       {IWL_PCI_DEVICE(0x0082, 0x1207, iwl6000g2a_2bg_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6000g2a_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6000g2a_2abg_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6000g2a_2bg_cfg)},
@@ -4751,24 +4422,12 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6000g2a_2abg_cfg)},
 
 /* 6x00 Series Gen2b */
-       {IWL_PCI_DEVICE(0x008F, 0x5105, iwl6000g2b_bgn_cfg)},
-       {IWL_PCI_DEVICE(0x0090, 0x5115, iwl6000g2b_bgn_cfg)},
-       {IWL_PCI_DEVICE(0x008F, 0x5125, iwl6000g2b_bgn_cfg)},
-       {IWL_PCI_DEVICE(0x008F, 0x5107, iwl6000g2b_bg_cfg)},
-       {IWL_PCI_DEVICE(0x008F, 0x5201, iwl6000g2b_2agn_cfg)},
-       {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
-       {IWL_PCI_DEVICE(0x008F, 0x5221, iwl6000g2b_2agn_cfg)},
-       {IWL_PCI_DEVICE(0x008F, 0x5206, iwl6000g2b_2abg_cfg)},
-       {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
-       {IWL_PCI_DEVICE(0x008F, 0x5226, iwl6000g2b_2abg_cfg)},
-       {IWL_PCI_DEVICE(0x008F, 0x5207, iwl6000g2b_2bg_cfg)},
-       {IWL_PCI_DEVICE(0x008A, 0x5301, iwl6000g2b_bgn_cfg)},
        {IWL_PCI_DEVICE(0x008A, 0x5305, iwl6000g2b_bgn_cfg)},
        {IWL_PCI_DEVICE(0x008A, 0x5307, iwl6000g2b_bg_cfg)},
-       {IWL_PCI_DEVICE(0x008A, 0x5321, iwl6000g2b_bgn_cfg)},
        {IWL_PCI_DEVICE(0x008A, 0x5325, iwl6000g2b_bgn_cfg)},
-       {IWL_PCI_DEVICE(0x008B, 0x5311, iwl6000g2b_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x008A, 0x5327, iwl6000g2b_bg_cfg)},
        {IWL_PCI_DEVICE(0x008B, 0x5315, iwl6000g2b_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x008B, 0x5317, iwl6000g2b_bg_cfg)},
        {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6000g2b_2bgn_cfg)},
        {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
@@ -4812,10 +4471,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
 
 /* 100 Series WiFi */
        {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
        {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
+       {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
        {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
-       {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
-       {IWL_PCI_DEVICE(0x08AE, 0x1017, iwl100_bg_cfg)},
+       {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
 
 /* 130 Series WiFi */
        {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
@@ -4836,10 +4496,7 @@ static struct pci_driver iwl_driver = {
        .id_table = iwl_hw_card_ids,
        .probe = iwl_pci_probe,
        .remove = __devexit_p(iwl_pci_remove),
-#ifdef CONFIG_PM
-       .suspend = iwl_pci_suspend,
-       .resume = iwl_pci_resume,
-#endif
+       .driver.pm = IWL_PM_OPS,
 };
 
 static int __init iwl_init(void)
index f525d55f2c0fb208c92b5e60d56ab5386da4acc4..28837a185a28b677d4f7336d72a68024e4121f20 100644 (file)
@@ -102,6 +102,9 @@ extern struct iwl_hcmd_ops iwlagn_hcmd;
 extern struct iwl_hcmd_ops iwlagn_bt_hcmd;
 extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
 
+extern struct ieee80211_ops iwlagn_hw_ops;
+extern struct ieee80211_ops iwl4965_hw_ops;
+
 int iwl_reset_ict(struct iwl_priv *priv);
 void iwl_disable_ict(struct iwl_priv *priv);
 int iwl_alloc_isr_ict(struct iwl_priv *priv);
@@ -132,6 +135,11 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv,
 /* RXON */
 int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
 void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed);
+void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_bss_conf *bss_conf,
+                            u32 changes);
 
 /* uCode */
 int iwlagn_load_ucode(struct iwl_priv *priv);
@@ -249,6 +257,7 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
 int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
                           struct iwl_rxon_context *ctx);
 int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant);
+int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
 
 /* bt coex */
 void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
@@ -292,9 +301,12 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
                         int tid, u16 ssn);
 int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
                        int tid);
-void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id);
 void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
 int iwl_update_bcast_stations(struct iwl_priv *priv);
+void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
+                          struct ieee80211_vif *vif,
+                          enum sta_notify_cmd cmd,
+                          struct ieee80211_sta *sta);
 
 /* rate */
 static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
@@ -318,4 +330,31 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
 int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
 void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
 
+/* mac80211 handlers (for 4965) */
+int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+int iwlagn_mac_start(struct ieee80211_hw *hw);
+void iwlagn_mac_stop(struct ieee80211_hw *hw);
+void iwlagn_configure_filter(struct ieee80211_hw *hw,
+                            unsigned int changed_flags,
+                            unsigned int *total_flags,
+                            u64 multicast);
+int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                      struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                      struct ieee80211_key_conf *key);
+void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_key_conf *keyconf,
+                               struct ieee80211_sta *sta,
+                               u32 iv32, u16 *phase1key);
+int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           enum ieee80211_ampdu_mlme_action action,
+                           struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
+                      struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta);
+void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+                              struct ieee80211_channel_switch *ch_switch);
+void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop);
+
 #endif /* __iwl_agn_h__ */
index 25fb3912342ceff334f6c2d82cb61defc5b0ee75..c884ed385fcfc80e154c5b4520cdb65439639492 100644 (file)
@@ -77,15 +77,15 @@ EXPORT_SYMBOL(iwl_bcast_addr);
 
 
 /* This function both allocates and initializes hw and priv. */
-struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
-               struct ieee80211_ops *hw_ops)
+struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
 {
        struct iwl_priv *priv;
-
        /* mac80211 allocates memory for this device instance, including
         *   space for this driver's private structure */
-       struct ieee80211_hw *hw =
-               ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops);
+       struct ieee80211_hw *hw;
+
+       hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
+                               cfg->ops->ieee80211_ops);
        if (hw == NULL) {
                pr_err("%s: Can not allocate network device\n",
                       cfg->name);
@@ -100,35 +100,6 @@ out:
 }
 EXPORT_SYMBOL(iwl_alloc_all);
 
-/*
- * QoS  support
-*/
-static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       if (!ctx->is_active)
-               return;
-
-       ctx->qos_data.def_qos_parm.qos_flags = 0;
-
-       if (ctx->qos_data.qos_active)
-               ctx->qos_data.def_qos_parm.qos_flags |=
-                       QOS_PARAM_FLG_UPDATE_EDCA_MSK;
-
-       if (ctx->ht.enabled)
-               ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
-
-       IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
-                     ctx->qos_data.qos_active,
-                     ctx->qos_data.def_qos_parm.qos_flags);
-
-       iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
-                              sizeof(struct iwl_qosparam_cmd),
-                              &ctx->qos_data.def_qos_parm, NULL);
-}
-
 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
 static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
@@ -317,40 +288,6 @@ void iwlcore_free_geos(struct iwl_priv *priv)
 }
 EXPORT_SYMBOL(iwlcore_free_geos);
 
-/*
- *  iwlcore_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
- *  function.
- */
-void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
-                              struct ieee80211_tx_info *info,
-                              __le16 fc, __le32 *tx_flags)
-{
-       if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
-               *tx_flags |= TX_CMD_FLG_RTS_MSK;
-               *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
-               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-
-               if (!ieee80211_is_mgmt(fc))
-                       return;
-
-               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
-               case cpu_to_le16(IEEE80211_STYPE_AUTH):
-               case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
-               case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
-               case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
-                       *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-                       *tx_flags |= TX_CMD_FLG_CTS_MSK;
-                       break;
-               }
-       } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
-               *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-               *tx_flags |= TX_CMD_FLG_CTS_MSK;
-               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-       }
-}
-EXPORT_SYMBOL(iwlcore_tx_cmd_protection);
-
-
 static bool iwl_is_channel_extension(struct iwl_priv *priv,
                                     enum ieee80211_band band,
                                     u16 channel, u8 extension_chan_offset)
@@ -1206,8 +1143,16 @@ EXPORT_SYMBOL(iwl_apm_init);
 
 int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
 {
-       int ret = 0;
-       s8 prev_tx_power = priv->tx_power_user_lmt;
+       int ret;
+       s8 prev_tx_power;
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (priv->tx_power_user_lmt == tx_power && !force)
+               return 0;
+
+       if (!priv->cfg->ops->lib->send_tx_power)
+               return -EOPNOTSUPP;
 
        if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
                IWL_WARN(priv,
@@ -1224,93 +1169,29 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
                return -EINVAL;
        }
 
-       if (priv->tx_power_user_lmt != tx_power)
-               force = true;
+       if (!iwl_is_ready_rf(priv))
+               return -EIO;
 
-       /* if nic is not up don't send command */
-       if (iwl_is_ready_rf(priv)) {
-               priv->tx_power_user_lmt = tx_power;
-               if (force && priv->cfg->ops->lib->send_tx_power)
-                       ret = priv->cfg->ops->lib->send_tx_power(priv);
-               else if (!priv->cfg->ops->lib->send_tx_power)
-                       ret = -EOPNOTSUPP;
-               /*
-                * if fail to set tx_power, restore the orig. tx power
-                */
-               if (ret)
-                       priv->tx_power_user_lmt = prev_tx_power;
+       /* scan complete use tx_power_next, need to be updated */
+       priv->tx_power_next = tx_power;
+       if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
+               IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n");
+               return 0;
        }
 
-       /*
-        * Even this is an async host command, the command
-        * will always report success from uCode
-        * So once driver can placing the command into the queue
-        * successfully, driver can use priv->tx_power_user_lmt
-        * to reflect the current tx power
-        */
-       return ret;
-}
-EXPORT_SYMBOL(iwl_set_tx_power);
-
-irqreturn_t iwl_isr_legacy(int irq, void *data)
-{
-       struct iwl_priv *priv = data;
-       u32 inta, inta_mask;
-       u32 inta_fh;
-       unsigned long flags;
-       if (!priv)
-               return IRQ_NONE;
+       prev_tx_power = priv->tx_power_user_lmt;
+       priv->tx_power_user_lmt = tx_power;
 
-       spin_lock_irqsave(&priv->lock, flags);
+       ret = priv->cfg->ops->lib->send_tx_power(priv);
 
-       /* Disable (but don't clear!) interrupts here to avoid
-        *    back-to-back ISRs and sporadic interrupts from our NIC.
-        * If we have something to service, the tasklet will re-enable ints.
-        * If we *don't* have something, we'll re-enable before leaving here. */
-       inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
-       iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
-       /* Discover which interrupts are active/pending */
-       inta = iwl_read32(priv, CSR_INT);
-       inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-
-       /* Ignore interrupt if there's nothing in NIC to service.
-        * This may be due to IRQ shared with another device,
-        * or due to sporadic interrupts thrown from our NIC. */
-       if (!inta && !inta_fh) {
-               IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0, inta_fh == 0\n");
-               goto none;
+       /* if fail to set tx_power, restore the orig. tx power */
+       if (ret) {
+               priv->tx_power_user_lmt = prev_tx_power;
+               priv->tx_power_next = prev_tx_power;
        }
-
-       if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
-               /* Hardware disappeared. It might have already raised
-                * an interrupt */
-               IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
-               goto unplugged;
-       }
-
-       IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
-                     inta, inta_mask, inta_fh);
-
-       inta &= ~CSR_INT_BIT_SCD;
-
-       /* iwl_irq_tasklet() will service interrupts and re-enable them */
-       if (likely(inta || inta_fh))
-               tasklet_schedule(&priv->irq_tasklet);
-
- unplugged:
-       spin_unlock_irqrestore(&priv->lock, flags);
-       return IRQ_HANDLED;
-
- none:
-       /* re-enable interrupts here since we don't have anything to service. */
-       /* only Re-enable if diabled by irq */
-       if (test_bit(STATUS_INT_ENABLED, &priv->status))
-               iwl_enable_interrupts(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-       return IRQ_NONE;
+       return ret;
 }
-EXPORT_SYMBOL(iwl_isr_legacy);
+EXPORT_SYMBOL(iwl_set_tx_power);
 
 void iwl_send_bt_config(struct iwl_priv *priv)
 {
@@ -1452,318 +1333,51 @@ int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
 }
 EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon);
 
-static void iwl_ht_conf(struct iwl_priv *priv,
-                       struct ieee80211_vif *vif)
+static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
 {
-       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-       struct ieee80211_sta *sta;
-       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
-       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
-       IWL_DEBUG_MAC80211(priv, "enter:\n");
-
-       if (!ctx->ht.enabled)
-               return;
-
-       ctx->ht.protection =
-               bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
-       ctx->ht.non_gf_sta_present =
-               !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-
-       ht_conf->single_chain_sufficient = false;
-
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               rcu_read_lock();
-               sta = ieee80211_find_sta(vif, bss_conf->bssid);
-               if (sta) {
-                       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-                       int maxstreams;
-
-                       maxstreams = (ht_cap->mcs.tx_params &
-                                     IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
-                                       >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
-                       maxstreams += 1;
-
-                       if ((ht_cap->mcs.rx_mask[1] == 0) &&
-                           (ht_cap->mcs.rx_mask[2] == 0))
-                               ht_conf->single_chain_sufficient = true;
-                       if (maxstreams <= 1)
-                               ht_conf->single_chain_sufficient = true;
-               } else {
-                       /*
-                        * If at all, this can only happen through a race
-                        * when the AP disconnects us while we're still
-                        * setting up the connection, in that case mac80211
-                        * will soon tell us about that.
-                        */
-                       ht_conf->single_chain_sufficient = true;
-               }
-               rcu_read_unlock();
-               break;
-       case NL80211_IFTYPE_ADHOC:
-               ht_conf->single_chain_sufficient = true;
-               break;
-       default:
-               break;
-       }
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
+       iwl_connection_init_rx_config(priv, ctx);
 
-static inline void iwl_set_no_assoc(struct iwl_priv *priv,
-                                   struct ieee80211_vif *vif)
-{
-       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+       if (priv->cfg->ops->hcmd->set_rxon_chain)
+               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
 
-       iwl_led_disassociate(priv);
-       /*
-        * inform the ucode that there is no longer an
-        * association and that no more packets should be
-        * sent
-        */
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       ctx->staging.assoc_id = 0;
-       iwlcore_commit_rxon(priv, ctx);
+       return iwlcore_commit_rxon(priv, ctx);
 }
 
-static void iwlcore_beacon_update(struct ieee80211_hw *hw,
-                                 struct ieee80211_vif *vif)
+static int iwl_setup_interface(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx)
 {
-       struct iwl_priv *priv = hw->priv;
-       unsigned long flags;
-       __le64 timestamp;
-       struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
-
-       if (!skb)
-               return;
-
-       IWL_DEBUG_ASSOC(priv, "enter\n");
+       struct ieee80211_vif *vif = ctx->vif;
+       int err;
 
        lockdep_assert_held(&priv->mutex);
 
-       if (!priv->beacon_ctx) {
-               IWL_ERR(priv, "update beacon but no beacon context!\n");
-               dev_kfree_skb(skb);
-               return;
-       }
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       if (priv->beacon_skb)
-               dev_kfree_skb(priv->beacon_skb);
-
-       priv->beacon_skb = skb;
-
-       timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
-       priv->timestamp = le64_to_cpu(timestamp);
-
-       IWL_DEBUG_ASSOC(priv, "leave\n");
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       if (!iwl_is_ready_rf(priv)) {
-               IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
-               return;
-       }
-
-       priv->cfg->ops->lib->post_associate(priv, priv->beacon_ctx->vif);
-}
-
-void iwl_bss_info_changed(struct ieee80211_hw *hw,
-                         struct ieee80211_vif *vif,
-                         struct ieee80211_bss_conf *bss_conf,
-                         u32 changes)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-       int ret;
-
-       IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
-
-       if (!iwl_is_alive(priv))
-               return;
-
-       mutex_lock(&priv->mutex);
-
-       if (changes & BSS_CHANGED_QOS) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&priv->lock, flags);
-               ctx->qos_data.qos_active = bss_conf->qos;
-               iwl_update_qos(priv, ctx);
-               spin_unlock_irqrestore(&priv->lock, flags);
-       }
-
-       if (changes & BSS_CHANGED_BEACON_ENABLED) {
-               /*
-                * the add_interface code must make sure we only ever
-                * have a single interface that could be beaconing at
-                * any time.
-                */
-               if (vif->bss_conf.enable_beacon)
-                       priv->beacon_ctx = ctx;
-               else
-                       priv->beacon_ctx = NULL;
-       }
-
-       if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
-               dev_kfree_skb(priv->beacon_skb);
-               priv->beacon_skb = ieee80211_beacon_get(hw, vif);
-       }
-
-       if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
-               iwl_send_rxon_timing(priv, ctx);
-
-       if (changes & BSS_CHANGED_BSSID) {
-               IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
-
-               /*
-                * If there is currently a HW scan going on in the
-                * background then we need to cancel it else the RXON
-                * below/in post_associate will fail.
-                */
-               if (iwl_scan_cancel_timeout(priv, 100)) {
-                       IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
-                       IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
-                       mutex_unlock(&priv->mutex);
-                       return;
-               }
-
-               /* mac80211 only sets assoc when in STATION mode */
-               if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
-                       memcpy(ctx->staging.bssid_addr,
-                              bss_conf->bssid, ETH_ALEN);
-
-                       /* currently needed in a few places */
-                       memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
-               } else {
-                       ctx->staging.filter_flags &=
-                               ~RXON_FILTER_ASSOC_MSK;
-               }
-
-       }
-
        /*
-        * This needs to be after setting the BSSID in case
-        * mac80211 decides to do both changes at once because
-        * it will invoke post_associate.
+        * This variable will be correct only when there's just
+        * a single context, but all code using it is for hardware
+        * that supports only one context.
         */
-       if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
-               iwlcore_beacon_update(hw, vif);
-
-       if (changes & BSS_CHANGED_ERP_PREAMBLE) {
-               IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
-                                  bss_conf->use_short_preamble);
-               if (bss_conf->use_short_preamble)
-                       ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-       }
-
-       if (changes & BSS_CHANGED_ERP_CTS_PROT) {
-               IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
-               if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
-                       ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
-               if (bss_conf->use_cts_prot)
-                       ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
-       }
-
-       if (changes & BSS_CHANGED_BASIC_RATES) {
-               /* XXX use this information
-                *
-                * To do that, remove code from iwl_set_rate() and put something
-                * like this here:
-                *
-               if (A-band)
-                       ctx->staging.ofdm_basic_rates =
-                               bss_conf->basic_rates;
-               else
-                       ctx->staging.ofdm_basic_rates =
-                               bss_conf->basic_rates >> 4;
-                       ctx->staging.cck_basic_rates =
-                               bss_conf->basic_rates & 0xF;
-                */
-       }
-
-       if (changes & BSS_CHANGED_HT) {
-               iwl_ht_conf(priv, vif);
-
-               if (priv->cfg->ops->hcmd->set_rxon_chain)
-                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-       }
-
-       if (changes & BSS_CHANGED_ASSOC) {
-               IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
-               if (bss_conf->assoc) {
-                       priv->timestamp = bss_conf->timestamp;
-
-                       iwl_led_associate(priv);
-
-                       if (!iwl_is_rfkill(priv))
-                               priv->cfg->ops->lib->post_associate(priv, vif);
-               } else
-                       iwl_set_no_assoc(priv, vif);
-       }
-
-       if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
-               IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
-                                  changes);
-               ret = iwl_send_rxon_assoc(priv, ctx);
-               if (!ret) {
-                       /* Sync active_rxon with latest change. */
-                       memcpy((void *)&ctx->active,
-                               &ctx->staging,
-                               sizeof(struct iwl_rxon_cmd));
-               }
-       }
+       priv->iw_mode = vif->type;
 
-       if (changes & BSS_CHANGED_BEACON_ENABLED) {
-               if (vif->bss_conf.enable_beacon) {
-                       memcpy(ctx->staging.bssid_addr,
-                              bss_conf->bssid, ETH_ALEN);
-                       memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
-                       iwl_led_associate(priv);
-                       iwlcore_config_ap(priv, vif);
-               } else
-                       iwl_set_no_assoc(priv, vif);
-       }
+       ctx->is_active = true;
 
-       if (changes & BSS_CHANGED_IBSS) {
-               ret = priv->cfg->ops->lib->manage_ibss_station(priv, vif,
-                                                       bss_conf->ibss_joined);
-               if (ret)
-                       IWL_ERR(priv, "failed to %s IBSS station %pM\n",
-                               bss_conf->ibss_joined ? "add" : "remove",
-                               bss_conf->bssid);
+       err = iwl_set_mode(priv, ctx);
+       if (err) {
+               if (!ctx->always_active)
+                       ctx->is_active = false;
+               return err;
        }
 
-       if (changes & BSS_CHANGED_IDLE &&
-           priv->cfg->ops->hcmd->set_pan_params) {
-               if (priv->cfg->ops->hcmd->set_pan_params(priv))
-                       IWL_ERR(priv, "failed to update PAN params\n");
+       if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
+           vif->type == NL80211_IFTYPE_ADHOC) {
+               /*
+                * pretend to have high BT traffic as long as we
+                * are operating in IBSS mode, as this will cause
+                * the rate scaling etc. to behave as intended.
+                */
+               priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
        }
 
-       mutex_unlock(&priv->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_bss_info_changed);
-
-static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
-       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
-       iwl_connection_init_rx_config(priv, ctx);
-
-       if (priv->cfg->ops->hcmd->set_rxon_chain)
-               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
-       return iwlcore_commit_rxon(priv, ctx);
+       return 0;
 }
 
 int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
@@ -1771,7 +1385,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        struct iwl_priv *priv = hw->priv;
        struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
        struct iwl_rxon_context *tmp, *ctx = NULL;
-       int err = 0;
+       int err;
 
        IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
                           vif->type, vif->addr);
@@ -1813,36 +1427,11 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 
        vif_priv->ctx = ctx;
        ctx->vif = vif;
-       /*
-        * This variable will be correct only when there's just
-        * a single context, but all code using it is for hardware
-        * that supports only one context.
-        */
-       priv->iw_mode = vif->type;
-
-       ctx->is_active = true;
-
-       err = iwl_set_mode(priv, vif);
-       if (err) {
-               if (!ctx->always_active)
-                       ctx->is_active = false;
-               goto out_err;
-       }
-
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist &&
-           vif->type == NL80211_IFTYPE_ADHOC) {
-               /*
-                * pretend to have high BT traffic as long as we
-                * are operating in IBSS mode, as this will cause
-                * the rate scaling etc. to behave as intended.
-                */
-               priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
-       }
 
-       goto out;
+       err = iwl_setup_interface(priv, ctx);
+       if (!err)
+               goto out;
 
- out_err:
        ctx->vif = NULL;
        priv->iw_mode = NL80211_IFTYPE_STATION;
  out:
@@ -1853,27 +1442,24 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 }
 EXPORT_SYMBOL(iwl_mac_add_interface);
 
-void iwl_mac_remove_interface(struct ieee80211_hw *hw,
-                             struct ieee80211_vif *vif)
+static void iwl_teardown_interface(struct iwl_priv *priv,
+                                  struct ieee80211_vif *vif,
+                                  bool mode_change)
 {
-       struct iwl_priv *priv = hw->priv;
        struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
 
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       mutex_lock(&priv->mutex);
-
-       WARN_ON(ctx->vif != vif);
-       ctx->vif = NULL;
+       lockdep_assert_held(&priv->mutex);
 
        if (priv->scan_vif == vif) {
                iwl_scan_cancel_timeout(priv, 200);
                iwl_force_scan_end(priv);
        }
-       iwl_set_mode(priv, vif);
 
-       if (!ctx->always_active)
-               ctx->is_active = false;
+       if (!mode_change) {
+               iwl_set_mode(priv, ctx);
+               if (!ctx->always_active)
+                       ctx->is_active = false;
+       }
 
        /*
         * When removing the IBSS interface, overwrite the
@@ -1884,210 +1470,30 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
         */
        if (vif->type == NL80211_IFTYPE_ADHOC)
                priv->bt_traffic_load = priv->notif_bt_traffic_load;
-
-       memset(priv->bssid, 0, ETH_ALEN);
-       mutex_unlock(&priv->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
 }
-EXPORT_SYMBOL(iwl_mac_remove_interface);
-
-/**
- * iwl_mac_config - mac80211 config callback
- */
-int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
-{
-       struct iwl_priv *priv = hw->priv;
-       const struct iwl_channel_info *ch_info;
-       struct ieee80211_conf *conf = &hw->conf;
-       struct ieee80211_channel *channel = conf->channel;
-       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-       struct iwl_rxon_context *ctx;
-       unsigned long flags = 0;
-       int ret = 0;
-       u16 ch;
-       int scan_active = 0;
-
-       mutex_lock(&priv->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
-                                       channel->hw_value, changed);
-
-       if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
-                       test_bit(STATUS_SCANNING, &priv->status))) {
-               scan_active = 1;
-               IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
-       }
-
-       if (changed & (IEEE80211_CONF_CHANGE_SMPS |
-                      IEEE80211_CONF_CHANGE_CHANNEL)) {
-               /* mac80211 uses static for non-HT which is what we want */
-               priv->current_ht_config.smps = conf->smps_mode;
-
-               /*
-                * Recalculate chain counts.
-                *
-                * If monitor mode is enabled then mac80211 will
-                * set up the SM PS mode to OFF if an HT channel is
-                * configured.
-                */
-               if (priv->cfg->ops->hcmd->set_rxon_chain)
-                       for_each_context(priv, ctx)
-                               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-       }
-
-       /* during scanning mac80211 will delay channel setting until
-        * scan finish with changed = 0
-        */
-       if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
-               if (scan_active)
-                       goto set_ch_out;
-
-               ch = channel->hw_value;
-               ch_info = iwl_get_channel_info(priv, channel->band, ch);
-               if (!is_channel_valid(ch_info)) {
-                       IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
-                       ret = -EINVAL;
-                       goto set_ch_out;
-               }
 
-               spin_lock_irqsave(&priv->lock, flags);
-
-               for_each_context(priv, ctx) {
-                       /* Configure HT40 channels */
-                       ctx->ht.enabled = conf_is_ht(conf);
-                       if (ctx->ht.enabled) {
-                               if (conf_is_ht40_minus(conf)) {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-                                       ctx->ht.is_40mhz = true;
-                               } else if (conf_is_ht40_plus(conf)) {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-                                       ctx->ht.is_40mhz = true;
-                               } else {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
-                                       ctx->ht.is_40mhz = false;
-                               }
-                       } else
-                               ctx->ht.is_40mhz = false;
-
-                       /*
-                        * Default to no protection. Protection mode will
-                        * later be set from BSS config in iwl_ht_conf
-                        */
-                       ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
-
-                       /* if we are switching from ht to 2.4 clear flags
-                        * from any ht related info since 2.4 does not
-                        * support ht */
-                       if ((le16_to_cpu(ctx->staging.channel) != ch))
-                               ctx->staging.flags = 0;
-
-                       iwl_set_rxon_channel(priv, channel, ctx);
-                       iwl_set_rxon_ht(priv, ht_conf);
-
-                       iwl_set_flags_for_band(priv, ctx, channel->band,
-                                              ctx->vif);
-               }
-
-               spin_unlock_irqrestore(&priv->lock, flags);
-
-               if (priv->cfg->ops->lib->update_bcast_stations)
-                       ret = priv->cfg->ops->lib->update_bcast_stations(priv);
-
- set_ch_out:
-               /* The list of supported rates and rate mask can be different
-                * for each band; since the band may have changed, reset
-                * the rate mask to what mac80211 lists */
-               iwl_set_rate(priv);
-       }
-
-       if (changed & (IEEE80211_CONF_CHANGE_PS |
-                       IEEE80211_CONF_CHANGE_IDLE)) {
-               ret = iwl_power_update_mode(priv, false);
-               if (ret)
-                       IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
-       }
-
-       if (changed & IEEE80211_CONF_CHANGE_POWER) {
-               IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
-                       priv->tx_power_user_lmt, conf->power_level);
-
-               iwl_set_tx_power(priv, conf->power_level, false);
-       }
-
-       if (!iwl_is_ready(priv)) {
-               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
-               goto out;
-       }
-
-       if (scan_active)
-               goto out;
-
-       for_each_context(priv, ctx) {
-               if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
-                       iwlcore_commit_rxon(priv, ctx);
-               else
-                       IWL_DEBUG_INFO(priv,
-                               "Not re-sending same RXON configuration.\n");
-       }
-
-out:
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       mutex_unlock(&priv->mutex);
-       return ret;
-}
-EXPORT_SYMBOL(iwl_mac_config);
-
-void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
+void iwl_mac_remove_interface(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif)
 {
        struct iwl_priv *priv = hw->priv;
-       unsigned long flags;
-       /* IBSS can only be the IWL_RXON_CTX_BSS context */
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
 
-       mutex_lock(&priv->mutex);
        IWL_DEBUG_MAC80211(priv, "enter\n");
 
-       spin_lock_irqsave(&priv->lock, flags);
-       memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* new association get rid of ibss beacon skb */
-       if (priv->beacon_skb)
-               dev_kfree_skb(priv->beacon_skb);
-
-       priv->beacon_skb = NULL;
-
-       priv->timestamp = 0;
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       iwl_scan_cancel_timeout(priv, 100);
-       if (!iwl_is_ready_rf(priv)) {
-               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
-               mutex_unlock(&priv->mutex);
-               return;
-       }
+       mutex_lock(&priv->mutex);
 
-       /* we are restarting association process
-        * clear RXON_FILTER_ASSOC_MSK bit
-        */
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       iwlcore_commit_rxon(priv, ctx);
+       WARN_ON(ctx->vif != vif);
+       ctx->vif = NULL;
 
-       iwl_set_rate(priv);
+       iwl_teardown_interface(priv, vif, false);
 
+       memset(priv->bssid, 0, ETH_ALEN);
        mutex_unlock(&priv->mutex);
 
        IWL_DEBUG_MAC80211(priv, "leave\n");
+
 }
-EXPORT_SYMBOL(iwl_mac_reset_tsf);
+EXPORT_SYMBOL(iwl_mac_remove_interface);
 
 int iwl_alloc_txq_mem(struct iwl_priv *priv)
 {
@@ -2431,6 +1837,63 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
        return 0;
 }
 
+int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                            enum nl80211_iftype newtype, bool newp2p)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+       struct iwl_rxon_context *tmp;
+       u32 interface_modes;
+       int err;
+
+       newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+       mutex_lock(&priv->mutex);
+
+       interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+
+       if (!(interface_modes & BIT(newtype))) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       if (ctx->exclusive_interface_modes & BIT(newtype)) {
+               for_each_context(priv, tmp) {
+                       if (ctx == tmp)
+                               continue;
+
+                       if (!tmp->vif)
+                               continue;
+
+                       /*
+                        * The current mode switch would be exclusive, but
+                        * another context is active ... refuse the switch.
+                        */
+                       err = -EBUSY;
+                       goto out;
+               }
+       }
+
+       /* success */
+       iwl_teardown_interface(priv, vif, true);
+       vif->type = newtype;
+       err = iwl_setup_interface(priv, ctx);
+       WARN_ON(err);
+       /*
+        * We've switched internally, but submitting to the
+        * device may have failed for some reason. Mask this
+        * error, because otherwise mac80211 will not switch
+        * (and set the interface type back) and we'll be
+        * out of sync with it.
+        */
+       err = 0;
+
+ out:
+       mutex_unlock(&priv->mutex);
+       return err;
+}
+EXPORT_SYMBOL(iwl_mac_change_interface);
+
 /**
  * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover
  *
@@ -2584,8 +2047,9 @@ EXPORT_SYMBOL(iwl_add_beacon_time);
 
 #ifdef CONFIG_PM
 
-int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+int iwl_pci_suspend(struct device *device)
 {
+       struct pci_dev *pdev = to_pci_dev(device);
        struct iwl_priv *priv = pci_get_drvdata(pdev);
 
        /*
@@ -2597,18 +2061,14 @@ int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
         */
        iwl_apm_stop(priv);
 
-       pci_save_state(pdev);
-       pci_disable_device(pdev);
-       pci_set_power_state(pdev, PCI_D3hot);
-
        return 0;
 }
 EXPORT_SYMBOL(iwl_pci_suspend);
 
-int iwl_pci_resume(struct pci_dev *pdev)
+int iwl_pci_resume(struct device *device)
 {
+       struct pci_dev *pdev = to_pci_dev(device);
        struct iwl_priv *priv = pci_get_drvdata(pdev);
-       int ret;
        bool hw_rfkill = false;
 
        /*
@@ -2617,11 +2077,6 @@ int iwl_pci_resume(struct pci_dev *pdev)
         */
        pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
 
-       pci_set_power_state(pdev, PCI_D0);
-       ret = pci_enable_device(pdev);
-       if (ret)
-               return ret;
-       pci_restore_state(pdev);
        iwl_enable_interrupts(priv);
 
        if (!(iwl_read32(priv, CSR_GP_CNTRL) &
@@ -2639,4 +2094,14 @@ int iwl_pci_resume(struct pci_dev *pdev)
 }
 EXPORT_SYMBOL(iwl_pci_resume);
 
+const struct dev_pm_ops iwl_pm_ops = {
+       .suspend = iwl_pci_suspend,
+       .resume = iwl_pci_resume,
+       .freeze = iwl_pci_suspend,
+       .thaw = iwl_pci_resume,
+       .poweroff = iwl_pci_suspend,
+       .restore = iwl_pci_resume,
+};
+EXPORT_SYMBOL(iwl_pm_ops);
+
 #endif /* CONFIG_PM */
index 64527def059f7de76ac0e4bdcadd0147576805c1..ee8cf240d65d9a1ff6a90c6a8bc1e744bebf1454 100644 (file)
@@ -120,6 +120,14 @@ struct iwl_apm_ops {
        void (*config)(struct iwl_priv *priv);
 };
 
+struct iwl_isr_ops {
+       irqreturn_t (*isr) (int irq, void *data);
+       void (*free)(struct iwl_priv *priv);
+       int (*alloc)(struct iwl_priv *priv);
+       int (*reset)(struct iwl_priv *priv);
+       void (*disable)(struct iwl_priv *priv);
+};
+
 struct iwl_debugfs_ops {
        ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
                                 size_t count, loff_t *ppos);
@@ -193,20 +201,15 @@ struct iwl_lib_ops {
        /* power */
        int (*send_tx_power) (struct iwl_priv *priv);
        void (*update_chain_flags)(struct iwl_priv *priv);
-       void (*post_associate)(struct iwl_priv *priv,
-                              struct ieee80211_vif *vif);
-       void (*config_ap)(struct iwl_priv *priv, struct ieee80211_vif *vif);
-       irqreturn_t (*isr) (int irq, void *data);
+
+       /* isr */
+       struct iwl_isr_ops isr_ops;
 
        /* eeprom operations (as defined in iwl-eeprom.h) */
        struct iwl_eeprom_ops eeprom_ops;
 
        /* temperature */
        struct iwl_temp_ops temp_ops;
-       /* station management */
-       int (*manage_ibss_station)(struct iwl_priv *priv,
-                                  struct ieee80211_vif *vif, bool add);
-       int (*update_bcast_stations)(struct iwl_priv *priv);
        /* recover from tx queue stall */
        void (*recover_from_tx_stall)(unsigned long data);
        /* check for plcp health */
@@ -235,12 +238,23 @@ struct iwl_nic_ops {
        void (*additional_nic_config)(struct iwl_priv *priv);
 };
 
+struct iwl_legacy_ops {
+       void (*post_associate)(struct iwl_priv *priv);
+       void (*config_ap)(struct iwl_priv *priv);
+       /* station management */
+       int (*update_bcast_stations)(struct iwl_priv *priv);
+       int (*manage_ibss_station)(struct iwl_priv *priv,
+                                  struct ieee80211_vif *vif, bool add);
+};
+
 struct iwl_ops {
        const struct iwl_lib_ops *lib;
        const struct iwl_hcmd_ops *hcmd;
        const struct iwl_hcmd_utils_ops *utils;
        const struct iwl_led_ops *led;
        const struct iwl_nic_ops *nic;
+       const struct iwl_legacy_ops *legacy;
+       const struct ieee80211_ops *ieee80211_ops;
 };
 
 struct iwl_mod_params {
@@ -276,6 +290,7 @@ struct iwl_mod_params {
  *     sensitivity calibration operation
  * @chain_noise_calib_by_driver: driver has the capability to perform
  *     chain noise calibration operation
+ * @shadow_reg_enable: HW shadhow register bit
 */
 struct iwl_base_params {
        int eeprom_size;
@@ -306,6 +321,7 @@ struct iwl_base_params {
        const bool ucode_tracing;
        const bool sensitivity_calib_by_driver;
        const bool chain_noise_calib_by_driver;
+       const bool shadow_reg_enable;
 };
 /*
  * @advanced_bt_coexist: support advanced bt coexist
@@ -396,8 +412,7 @@ struct iwl_cfg {
  *   L i b                 *
  ***************************/
 
-struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
-               struct ieee80211_ops *hw_ops);
+struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg);
 int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
                    const struct ieee80211_tx_queue_params *params);
 int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw);
@@ -425,23 +440,16 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
                           u32 decrypt_res,
                           struct ieee80211_rx_status *stats);
 void iwl_irq_handle_error(struct iwl_priv *priv);
-void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
-void iwl_bss_info_changed(struct ieee80211_hw *hw,
-                                    struct ieee80211_vif *vif,
-                                    struct ieee80211_bss_conf *bss_conf,
-                                    u32 changes);
 int iwl_mac_add_interface(struct ieee80211_hw *hw,
                          struct ieee80211_vif *vif);
 void iwl_mac_remove_interface(struct ieee80211_hw *hw,
                              struct ieee80211_vif *vif);
-int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif);
-void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
+int iwl_mac_change_interface(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            enum nl80211_iftype newtype, bool newp2p);
 int iwl_alloc_txq_mem(struct iwl_priv *priv);
 void iwl_free_txq_mem(struct iwl_priv *priv);
-void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
-                              struct ieee80211_tx_info *info,
-                              __le16 fc, __le32 *tx_flags);
+
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 int iwl_alloc_traffic_mem(struct iwl_priv *priv);
 void iwl_free_traffic_mem(struct iwl_priv *priv);
@@ -598,7 +606,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
 /*****************************************************
  * PCI                                              *
  *****************************************************/
-irqreturn_t iwl_isr_legacy(int irq, void *data);
 
 static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
 {
@@ -615,9 +622,17 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
                           u32 addon, u32 beacon_interval);
 
 #ifdef CONFIG_PM
-int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
-int iwl_pci_resume(struct pci_dev *pdev);
-#endif /* CONFIG_PM */
+int iwl_pci_suspend(struct device *device);
+int iwl_pci_resume(struct device *device);
+extern const struct dev_pm_ops iwl_pm_ops;
+
+#define IWL_PM_OPS     (&iwl_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define IWL_PM_OPS     NULL
+
+#endif /* !CONFIG_PM */
 
 /*****************************************************
 *  Error Handling Debugging
@@ -724,11 +739,6 @@ static inline int iwlcore_commit_rxon(struct iwl_priv *priv,
 {
        return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
 }
-static inline void iwlcore_config_ap(struct iwl_priv *priv,
-                                    struct ieee80211_vif *vif)
-{
-       priv->cfg->ops->lib->config_ap(priv, vif);
-}
 static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
                        struct iwl_priv *priv, enum ieee80211_band band)
 {
index 2aa15ab13892541fab426e0dac361e2d3b279adb..b80bf7dff55bda64556ddb63de0c816bdddfe546 100644 (file)
 
 #define CSR_LED_REG             (CSR_BASE+0x094)
 #define CSR_DRAM_INT_TBL_REG   (CSR_BASE+0x0A0)
+#define CSR_MAC_SHADOW_REG_CTRL        (CSR_BASE+0x0A8) /* 6000 and up */
+
 
 /* GIO Chicken Bits (PCI Express bus link power management) */
 #define CSR_GIO_CHICKEN_BITS    (CSR_BASE+0x100)
index 70e07fa48405b5558a4cb95a6535178cb7265f1f..9fcaaf0cfe93c4fa3bcb6bc959d064dbe613e33a 100644 (file)
@@ -1162,6 +1162,8 @@ struct iwl_rxon_context {
         */
        bool always_active, is_active;
 
+       bool ht_need_multiple_chains;
+
        enum iwl_rxon_context_id ctxid;
 
        u32 interface_modes, exclusive_interface_modes;
@@ -1517,6 +1519,7 @@ struct iwl_priv {
        s8 tx_power_user_lmt;
        s8 tx_power_device_lmt;
        s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
+       s8 tx_power_next;
 
 
 #ifdef CONFIG_IWLWIFI_DEBUG
index 86c2b6fed0c6beeeb920ccc4adfb54dd5e0b990f..5a9129219c903c9e01846b4cf6e037e5c653756d 100644 (file)
@@ -134,6 +134,7 @@ int iwl_led_associate(struct iwl_priv *priv)
 
        return 0;
 }
+EXPORT_SYMBOL(iwl_led_associate);
 
 int iwl_led_disassociate(struct iwl_priv *priv)
 {
@@ -141,6 +142,7 @@ int iwl_led_disassociate(struct iwl_priv *priv)
 
        return 0;
 }
+EXPORT_SYMBOL(iwl_led_disassociate);
 
 /*
  * calculate blink rate according to last second Tx/Rx activities
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
new file mode 100644 (file)
index 0000000..a08b4e5
--- /dev/null
@@ -0,0 +1,662 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-helpers.h"
+#include "iwl-legacy.h"
+
+static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+               return;
+
+       if (!ctx->is_active)
+               return;
+
+       ctx->qos_data.def_qos_parm.qos_flags = 0;
+
+       if (ctx->qos_data.qos_active)
+               ctx->qos_data.def_qos_parm.qos_flags |=
+                       QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+       if (ctx->ht.enabled)
+               ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+       IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+                     ctx->qos_data.qos_active,
+                     ctx->qos_data.def_qos_parm.qos_flags);
+
+       iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
+                              sizeof(struct iwl_qosparam_cmd),
+                              &ctx->qos_data.def_qos_parm, NULL);
+}
+
+/**
+ * iwl_legacy_mac_config - mac80211 config callback
+ */
+int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct iwl_priv *priv = hw->priv;
+       const struct iwl_channel_info *ch_info;
+       struct ieee80211_conf *conf = &hw->conf;
+       struct ieee80211_channel *channel = conf->channel;
+       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+       struct iwl_rxon_context *ctx;
+       unsigned long flags = 0;
+       int ret = 0;
+       u16 ch;
+       int scan_active = 0;
+       bool ht_changed[NUM_IWL_RXON_CTX] = {};
+
+       if (WARN_ON(!priv->cfg->ops->legacy))
+               return -EOPNOTSUPP;
+
+       mutex_lock(&priv->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
+                                       channel->hw_value, changed);
+
+       if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
+                       test_bit(STATUS_SCANNING, &priv->status))) {
+               scan_active = 1;
+               IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
+       }
+
+       if (changed & (IEEE80211_CONF_CHANGE_SMPS |
+                      IEEE80211_CONF_CHANGE_CHANNEL)) {
+               /* mac80211 uses static for non-HT which is what we want */
+               priv->current_ht_config.smps = conf->smps_mode;
+
+               /*
+                * Recalculate chain counts.
+                *
+                * If monitor mode is enabled then mac80211 will
+                * set up the SM PS mode to OFF if an HT channel is
+                * configured.
+                */
+               if (priv->cfg->ops->hcmd->set_rxon_chain)
+                       for_each_context(priv, ctx)
+                               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+       }
+
+       /* during scanning mac80211 will delay channel setting until
+        * scan finish with changed = 0
+        */
+       if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+               if (scan_active)
+                       goto set_ch_out;
+
+               ch = channel->hw_value;
+               ch_info = iwl_get_channel_info(priv, channel->band, ch);
+               if (!is_channel_valid(ch_info)) {
+                       IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
+                       ret = -EINVAL;
+                       goto set_ch_out;
+               }
+
+               spin_lock_irqsave(&priv->lock, flags);
+
+               for_each_context(priv, ctx) {
+                       /* Configure HT40 channels */
+                       if (ctx->ht.enabled != conf_is_ht(conf)) {
+                               ctx->ht.enabled = conf_is_ht(conf);
+                               ht_changed[ctx->ctxid] = true;
+                       }
+                       if (ctx->ht.enabled) {
+                               if (conf_is_ht40_minus(conf)) {
+                                       ctx->ht.extension_chan_offset =
+                                               IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+                                       ctx->ht.is_40mhz = true;
+                               } else if (conf_is_ht40_plus(conf)) {
+                                       ctx->ht.extension_chan_offset =
+                                               IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+                                       ctx->ht.is_40mhz = true;
+                               } else {
+                                       ctx->ht.extension_chan_offset =
+                                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
+                                       ctx->ht.is_40mhz = false;
+                               }
+                       } else
+                               ctx->ht.is_40mhz = false;
+
+                       /*
+                        * Default to no protection. Protection mode will
+                        * later be set from BSS config in iwl_ht_conf
+                        */
+                       ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+                       /* if we are switching from ht to 2.4 clear flags
+                        * from any ht related info since 2.4 does not
+                        * support ht */
+                       if ((le16_to_cpu(ctx->staging.channel) != ch))
+                               ctx->staging.flags = 0;
+
+                       iwl_set_rxon_channel(priv, channel, ctx);
+                       iwl_set_rxon_ht(priv, ht_conf);
+
+                       iwl_set_flags_for_band(priv, ctx, channel->band,
+                                              ctx->vif);
+               }
+
+               spin_unlock_irqrestore(&priv->lock, flags);
+
+               if (priv->cfg->ops->legacy->update_bcast_stations)
+                       ret = priv->cfg->ops->legacy->update_bcast_stations(priv);
+
+ set_ch_out:
+               /* The list of supported rates and rate mask can be different
+                * for each band; since the band may have changed, reset
+                * the rate mask to what mac80211 lists */
+               iwl_set_rate(priv);
+       }
+
+       if (changed & (IEEE80211_CONF_CHANGE_PS |
+                       IEEE80211_CONF_CHANGE_IDLE)) {
+               ret = iwl_power_update_mode(priv, false);
+               if (ret)
+                       IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_POWER) {
+               IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
+                       priv->tx_power_user_lmt, conf->power_level);
+
+               iwl_set_tx_power(priv, conf->power_level, false);
+       }
+
+       if (!iwl_is_ready(priv)) {
+               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+               goto out;
+       }
+
+       if (scan_active)
+               goto out;
+
+       for_each_context(priv, ctx) {
+               if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
+                       iwlcore_commit_rxon(priv, ctx);
+               else
+                       IWL_DEBUG_INFO(priv,
+                               "Not re-sending same RXON configuration.\n");
+               if (ht_changed[ctx->ctxid])
+                       iwl_update_qos(priv, ctx);
+       }
+
+out:
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       mutex_unlock(&priv->mutex);
+       return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_config);
+
+void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+       unsigned long flags;
+       /* IBSS can only be the IWL_RXON_CTX_BSS context */
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+       if (WARN_ON(!priv->cfg->ops->legacy))
+               return;
+
+       mutex_lock(&priv->mutex);
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       spin_lock_irqsave(&priv->lock, flags);
+       memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* new association get rid of ibss beacon skb */
+       if (priv->beacon_skb)
+               dev_kfree_skb(priv->beacon_skb);
+
+       priv->beacon_skb = NULL;
+
+       priv->timestamp = 0;
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       iwl_scan_cancel_timeout(priv, 100);
+       if (!iwl_is_ready_rf(priv)) {
+               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+               mutex_unlock(&priv->mutex);
+               return;
+       }
+
+       /* we are restarting association process
+        * clear RXON_FILTER_ASSOC_MSK bit
+        */
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       iwlcore_commit_rxon(priv, ctx);
+
+       iwl_set_rate(priv);
+
+       mutex_unlock(&priv->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
+
+static void iwl_ht_conf(struct iwl_priv *priv,
+                       struct ieee80211_vif *vif)
+{
+       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+       struct ieee80211_sta *sta;
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+
+       IWL_DEBUG_ASSOC(priv, "enter:\n");
+
+       if (!ctx->ht.enabled)
+               return;
+
+       ctx->ht.protection =
+               bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+       ctx->ht.non_gf_sta_present =
+               !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+       ht_conf->single_chain_sufficient = false;
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               rcu_read_lock();
+               sta = ieee80211_find_sta(vif, bss_conf->bssid);
+               if (sta) {
+                       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+                       int maxstreams;
+
+                       maxstreams = (ht_cap->mcs.tx_params &
+                                     IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+                                       >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+                       maxstreams += 1;
+
+                       if ((ht_cap->mcs.rx_mask[1] == 0) &&
+                           (ht_cap->mcs.rx_mask[2] == 0))
+                               ht_conf->single_chain_sufficient = true;
+                       if (maxstreams <= 1)
+                               ht_conf->single_chain_sufficient = true;
+               } else {
+                       /*
+                        * If at all, this can only happen through a race
+                        * when the AP disconnects us while we're still
+                        * setting up the connection, in that case mac80211
+                        * will soon tell us about that.
+                        */
+                       ht_conf->single_chain_sufficient = true;
+               }
+               rcu_read_unlock();
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               ht_conf->single_chain_sufficient = true;
+               break;
+       default:
+               break;
+       }
+
+       IWL_DEBUG_ASSOC(priv, "leave\n");
+}
+
+static inline void iwl_set_no_assoc(struct iwl_priv *priv,
+                                   struct ieee80211_vif *vif)
+{
+       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+
+       iwl_led_disassociate(priv);
+       /*
+        * inform the ucode that there is no longer an
+        * association and that no more packets should be
+        * sent
+        */
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       ctx->staging.assoc_id = 0;
+       iwlcore_commit_rxon(priv, ctx);
+}
+
+static void iwlcore_beacon_update(struct ieee80211_hw *hw,
+                                 struct ieee80211_vif *vif)
+{
+       struct iwl_priv *priv = hw->priv;
+       unsigned long flags;
+       __le64 timestamp;
+       struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+
+       if (!skb)
+               return;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       lockdep_assert_held(&priv->mutex);
+
+       if (!priv->beacon_ctx) {
+               IWL_ERR(priv, "update beacon but no beacon context!\n");
+               dev_kfree_skb(skb);
+               return;
+       }
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       if (priv->beacon_skb)
+               dev_kfree_skb(priv->beacon_skb);
+
+       priv->beacon_skb = skb;
+
+       timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+       priv->timestamp = le64_to_cpu(timestamp);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (!iwl_is_ready_rf(priv)) {
+               IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+               return;
+       }
+
+       priv->cfg->ops->legacy->post_associate(priv);
+}
+
+void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif,
+                                    struct ieee80211_bss_conf *bss_conf,
+                                    u32 changes)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+       int ret;
+
+       if (WARN_ON(!priv->cfg->ops->legacy))
+               return;
+
+       IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
+
+       if (!iwl_is_alive(priv))
+               return;
+
+       mutex_lock(&priv->mutex);
+
+       if (changes & BSS_CHANGED_QOS) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&priv->lock, flags);
+               ctx->qos_data.qos_active = bss_conf->qos;
+               iwl_update_qos(priv, ctx);
+               spin_unlock_irqrestore(&priv->lock, flags);
+       }
+
+       if (changes & BSS_CHANGED_BEACON_ENABLED) {
+               /*
+                * the add_interface code must make sure we only ever
+                * have a single interface that could be beaconing at
+                * any time.
+                */
+               if (vif->bss_conf.enable_beacon)
+                       priv->beacon_ctx = ctx;
+               else
+                       priv->beacon_ctx = NULL;
+       }
+
+       if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
+               dev_kfree_skb(priv->beacon_skb);
+               priv->beacon_skb = ieee80211_beacon_get(hw, vif);
+       }
+
+       if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
+               iwl_send_rxon_timing(priv, ctx);
+
+       if (changes & BSS_CHANGED_BSSID) {
+               IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
+
+               /*
+                * If there is currently a HW scan going on in the
+                * background then we need to cancel it else the RXON
+                * below/in post_associate will fail.
+                */
+               if (iwl_scan_cancel_timeout(priv, 100)) {
+                       IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
+                       IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
+                       mutex_unlock(&priv->mutex);
+                       return;
+               }
+
+               /* mac80211 only sets assoc when in STATION mode */
+               if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
+                       memcpy(ctx->staging.bssid_addr,
+                              bss_conf->bssid, ETH_ALEN);
+
+                       /* currently needed in a few places */
+                       memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+               } else {
+                       ctx->staging.filter_flags &=
+                               ~RXON_FILTER_ASSOC_MSK;
+               }
+
+       }
+
+       /*
+        * This needs to be after setting the BSSID in case
+        * mac80211 decides to do both changes at once because
+        * it will invoke post_associate.
+        */
+       if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
+               iwlcore_beacon_update(hw, vif);
+
+       if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+               IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
+                                  bss_conf->use_short_preamble);
+               if (bss_conf->use_short_preamble)
+                       ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+       }
+
+       if (changes & BSS_CHANGED_ERP_CTS_PROT) {
+               IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
+               if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
+                       ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+               if (bss_conf->use_cts_prot)
+                       ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+       }
+
+       if (changes & BSS_CHANGED_BASIC_RATES) {
+               /* XXX use this information
+                *
+                * To do that, remove code from iwl_set_rate() and put something
+                * like this here:
+                *
+               if (A-band)
+                       ctx->staging.ofdm_basic_rates =
+                               bss_conf->basic_rates;
+               else
+                       ctx->staging.ofdm_basic_rates =
+                               bss_conf->basic_rates >> 4;
+                       ctx->staging.cck_basic_rates =
+                               bss_conf->basic_rates & 0xF;
+                */
+       }
+
+       if (changes & BSS_CHANGED_HT) {
+               iwl_ht_conf(priv, vif);
+
+               if (priv->cfg->ops->hcmd->set_rxon_chain)
+                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+       }
+
+       if (changes & BSS_CHANGED_ASSOC) {
+               IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
+               if (bss_conf->assoc) {
+                       priv->timestamp = bss_conf->timestamp;
+
+                       iwl_led_associate(priv);
+
+                       if (!iwl_is_rfkill(priv))
+                               priv->cfg->ops->legacy->post_associate(priv);
+               } else
+                       iwl_set_no_assoc(priv, vif);
+       }
+
+       if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
+               IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
+                                  changes);
+               ret = iwl_send_rxon_assoc(priv, ctx);
+               if (!ret) {
+                       /* Sync active_rxon with latest change. */
+                       memcpy((void *)&ctx->active,
+                               &ctx->staging,
+                               sizeof(struct iwl_rxon_cmd));
+               }
+       }
+
+       if (changes & BSS_CHANGED_BEACON_ENABLED) {
+               if (vif->bss_conf.enable_beacon) {
+                       memcpy(ctx->staging.bssid_addr,
+                              bss_conf->bssid, ETH_ALEN);
+                       memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+                       iwl_led_associate(priv);
+                       priv->cfg->ops->legacy->config_ap(priv);
+               } else
+                       iwl_set_no_assoc(priv, vif);
+       }
+
+       if (changes & BSS_CHANGED_IBSS) {
+               ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
+                                                       bss_conf->ibss_joined);
+               if (ret)
+                       IWL_ERR(priv, "failed to %s IBSS station %pM\n",
+                               bss_conf->ibss_joined ? "add" : "remove",
+                               bss_conf->bssid);
+       }
+
+       mutex_unlock(&priv->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
+
+irqreturn_t iwl_isr_legacy(int irq, void *data)
+{
+       struct iwl_priv *priv = data;
+       u32 inta, inta_mask;
+       u32 inta_fh;
+       unsigned long flags;
+       if (!priv)
+               return IRQ_NONE;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Disable (but don't clear!) interrupts here to avoid
+        *    back-to-back ISRs and sporadic interrupts from our NIC.
+        * If we have something to service, the tasklet will re-enable ints.
+        * If we *don't* have something, we'll re-enable before leaving here. */
+       inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
+       iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+       /* Discover which interrupts are active/pending */
+       inta = iwl_read32(priv, CSR_INT);
+       inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+
+       /* Ignore interrupt if there's nothing in NIC to service.
+        * This may be due to IRQ shared with another device,
+        * or due to sporadic interrupts thrown from our NIC. */
+       if (!inta && !inta_fh) {
+               IWL_DEBUG_ISR(priv,
+                       "Ignore interrupt, inta == 0, inta_fh == 0\n");
+               goto none;
+       }
+
+       if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+               /* Hardware disappeared. It might have already raised
+                * an interrupt */
+               IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+               goto unplugged;
+       }
+
+       IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
+                     inta, inta_mask, inta_fh);
+
+       inta &= ~CSR_INT_BIT_SCD;
+
+       /* iwl_irq_tasklet() will service interrupts and re-enable them */
+       if (likely(inta || inta_fh))
+               tasklet_schedule(&priv->irq_tasklet);
+
+unplugged:
+       spin_unlock_irqrestore(&priv->lock, flags);
+       return IRQ_HANDLED;
+
+none:
+       /* re-enable interrupts here since we don't have anything to service. */
+       /* only Re-enable if diabled by irq */
+       if (test_bit(STATUS_INT_ENABLED, &priv->status))
+               iwl_enable_interrupts(priv);
+       spin_unlock_irqrestore(&priv->lock, flags);
+       return IRQ_NONE;
+}
+EXPORT_SYMBOL(iwl_isr_legacy);
+
+/*
+ *  iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
+ *  function.
+ */
+void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
+                              struct ieee80211_tx_info *info,
+                              __le16 fc, __le32 *tx_flags)
+{
+       if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+               *tx_flags |= TX_CMD_FLG_RTS_MSK;
+               *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+               if (!ieee80211_is_mgmt(fc))
+                       return;
+
+               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+               case cpu_to_le16(IEEE80211_STYPE_AUTH):
+               case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+               case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+               case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+                       *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+                       *tx_flags |= TX_CMD_FLG_CTS_MSK;
+                       break;
+               }
+       } else if (info->control.rates[0].flags &
+                  IEEE80211_TX_RC_USE_CTS_PROTECT) {
+               *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+               *tx_flags |= TX_CMD_FLG_CTS_MSK;
+               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+       }
+}
+EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.h b/drivers/net/wireless/iwlwifi/iwl-legacy.h
new file mode 100644 (file)
index 0000000..9f7b2f9
--- /dev/null
@@ -0,0 +1,79 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_h__
+#define __iwl_legacy_h__
+
+/* mac80211 handlers */
+int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
+void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
+void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif,
+                                    struct ieee80211_bss_conf *bss_conf,
+                                    u32 changes);
+void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
+                               struct ieee80211_tx_info *info,
+                               __le16 fc, __le32 *tx_flags);
+
+irqreturn_t iwl_isr_legacy(int irq, void *data);
+
+#endif /* __iwl_legacy_h__ */
index 49d7788937a9eccc80891c703ea4220c1a98f100..b7abd86676fdeeb304a6dffb58eb96bc11435c20 100644 (file)
@@ -263,70 +263,95 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
                                sizeof(struct iwl_powertable_cmd), cmd);
 }
 
-/* priv->mutex must be held */
-int iwl_power_update_mode(struct iwl_priv *priv, bool force)
+static void iwl_power_build_cmd(struct iwl_priv *priv,
+                               struct iwl_powertable_cmd *cmd)
 {
-       int ret = 0;
        bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
-       bool update_chains;
-       struct iwl_powertable_cmd cmd;
        int dtimper;
 
-       /* Don't update the RX chain when chain noise calibration is running */
-       update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
-                       priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
-
        dtimper = priv->hw->conf.ps_dtim_period ?: 1;
 
        if (priv->cfg->base_params->broken_powersave)
-               iwl_power_sleep_cam_cmd(priv, &cmd);
+               iwl_power_sleep_cam_cmd(priv, cmd);
        else if (priv->cfg->base_params->supports_idle &&
                 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
-               iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20);
+               iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
        else if (priv->cfg->ops->lib->tt_ops.lower_power_detection &&
                 priv->cfg->ops->lib->tt_ops.tt_power_mode &&
                 priv->cfg->ops->lib->tt_ops.lower_power_detection(priv)) {
                /* in thermal throttling low power state */
-               iwl_static_sleep_cmd(priv, &cmd,
+               iwl_static_sleep_cmd(priv, cmd,
                    priv->cfg->ops->lib->tt_ops.tt_power_mode(priv), dtimper);
        } else if (!enabled)
-               iwl_power_sleep_cam_cmd(priv, &cmd);
+               iwl_power_sleep_cam_cmd(priv, cmd);
        else if (priv->power_data.debug_sleep_level_override >= 0)
-               iwl_static_sleep_cmd(priv, &cmd,
+               iwl_static_sleep_cmd(priv, cmd,
                                     priv->power_data.debug_sleep_level_override,
                                     dtimper);
        else if (no_sleep_autoadjust)
-               iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_1, dtimper);
+               iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_1, dtimper);
        else
-               iwl_power_fill_sleep_cmd(priv, &cmd,
+               iwl_power_fill_sleep_cmd(priv, cmd,
                                         priv->hw->conf.dynamic_ps_timeout,
                                         priv->hw->conf.max_sleep_period);
+}
+
+int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
+                      bool force)
+{
+       int ret;
+       bool update_chains;
+
+       lockdep_assert_held(&priv->mutex);
+
+       /* Don't update the RX chain when chain noise calibration is running */
+       update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
+                       priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
+
+       if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
+               return 0;
+
+       if (!iwl_is_ready_rf(priv))
+               return -EIO;
+
+       /* scan complete use sleep_power_next, need to be updated */
+       memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
+       if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
+               IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
+               return 0;
+       }
+
+       if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
+               set_bit(STATUS_POWER_PMI, &priv->status);
 
-       if (iwl_is_ready_rf(priv) &&
-           (memcmp(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd)) || force)) {
-               if (cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
-                       set_bit(STATUS_POWER_PMI, &priv->status);
-
-               ret = iwl_set_power(priv, &cmd);
-               if (!ret) {
-                       if (!(cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
-                               clear_bit(STATUS_POWER_PMI, &priv->status);
-
-                       if (priv->cfg->ops->lib->update_chain_flags &&
-                           update_chains)
-                               priv->cfg->ops->lib->update_chain_flags(priv);
-                       else if (priv->cfg->ops->lib->update_chain_flags)
-                               IWL_DEBUG_POWER(priv,
+       ret = iwl_set_power(priv, cmd);
+       if (!ret) {
+               if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
+                       clear_bit(STATUS_POWER_PMI, &priv->status);
+
+               if (priv->cfg->ops->lib->update_chain_flags && update_chains)
+                       priv->cfg->ops->lib->update_chain_flags(priv);
+               else if (priv->cfg->ops->lib->update_chain_flags)
+                       IWL_DEBUG_POWER(priv,
                                        "Cannot update the power, chain noise "
                                        "calibration running: %d\n",
                                        priv->chain_noise_data.state);
-                       memcpy(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd));
-               } else
-                       IWL_ERR(priv, "set power fail, ret = %d", ret);
-       }
+
+               memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
+       } else
+               IWL_ERR(priv, "set power fail, ret = %d", ret);
 
        return ret;
 }
+EXPORT_SYMBOL(iwl_power_set_mode);
+
+int iwl_power_update_mode(struct iwl_priv *priv, bool force)
+{
+       struct iwl_powertable_cmd cmd;
+
+       iwl_power_build_cmd(priv, &cmd);
+       return iwl_power_set_mode(priv, &cmd, force);
+}
 EXPORT_SYMBOL(iwl_power_update_mode);
 
 /* initialize to default */
index df81565a7cc49c7414a6acd2421a2f3bd0f85930..fe012032c28c1e58695f791d607ae19ee79e36d9 100644 (file)
@@ -41,10 +41,13 @@ enum iwl_power_level {
 
 struct iwl_power_mgr {
        struct iwl_powertable_cmd sleep_cmd;
+       struct iwl_powertable_cmd sleep_cmd_next;
        int debug_sleep_level_override;
        bool pci_pm;
 };
 
+int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
+                      bool force);
 int iwl_power_update_mode(struct iwl_priv *priv, bool force);
 void iwl_power_initialize(struct iwl_priv *priv);
 
index f436270ca39af6adb3290a137cd68a84138d4835..87a6fd84d4d25827e7c363638fb31b3b5fbef3c8 100644 (file)
@@ -134,28 +134,37 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
        if (q->need_update == 0)
                goto exit_unlock;
 
-       /* If power-saving is in use, make sure device is awake */
-       if (test_bit(STATUS_POWER_PMI, &priv->status)) {
-               reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
+       if (priv->cfg->base_params->shadow_reg_enable) {
+               /* shadow register enabled */
+               /* Device expects a multiple of 8 */
+               q->write_actual = (q->write & ~0x7);
+               iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual);
+       } else {
+               /* If power-saving is in use, make sure device is awake */
+               if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+                       reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
 
-               if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
-                       IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n",
-                                     reg);
-                       iwl_set_bit(priv, CSR_GP_CNTRL,
-                                   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-                       goto exit_unlock;
-               }
+                       if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+                               IWL_DEBUG_INFO(priv,
+                                       "Rx queue requesting wakeup,"
+                                       " GP1 = 0x%x\n", reg);
+                               iwl_set_bit(priv, CSR_GP_CNTRL,
+                                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+                               goto exit_unlock;
+                       }
 
-               q->write_actual = (q->write & ~0x7);
-               iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
+                       q->write_actual = (q->write & ~0x7);
+                       iwl_write_direct32(priv, rx_wrt_ptr_reg,
+                                       q->write_actual);
 
-       /* Else device is assumed to be awake */
-       } else {
-               /* Device expects a multiple of 8 */
-               q->write_actual = (q->write & ~0x7);
-               iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
+               /* Else device is assumed to be awake */
+               } else {
+                       /* Device expects a multiple of 8 */
+                       q->write_actual = (q->write & ~0x7);
+                       iwl_write_direct32(priv, rx_wrt_ptr_reg,
+                               q->write_actual);
+               }
        }
-
        q->need_update = 0;
 
  exit_unlock:
index 67da31295781192a6e8446e46634772426735949..e1aa0e1daa5a71fdda39bf64078f67e8657ad067 100644 (file)
@@ -603,13 +603,16 @@ out_settings:
        if (!iwl_is_ready_rf(priv))
                goto out;
 
-       /* Since setting the TXPOWER may have been deferred while
-        * performing the scan, fire one off */
-       iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
+       /*
+        * We do not commit power settings while scan is pending,
+        * do it now if the settings changed.
+        */
+       iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
+       iwl_set_tx_power(priv, priv->tx_power_next, false);
 
        priv->cfg->ops->utils->post_scan(priv);
 
- out:
+out:
        mutex_unlock(&priv->mutex);
 }
 
index 7261ee49f282f2c1e3a6d027a2aad2954c9b85a3..feaa3670c6bb62621b39c4b812474f1ac9a86c6f 100644 (file)
@@ -49,30 +49,39 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
        if (txq->need_update == 0)
                return;
 
-       /* if we're trying to save power */
-       if (test_bit(STATUS_POWER_PMI, &priv->status)) {
-               /* wake up nic if it's powered down ...
-                * uCode will wake up, and interrupt us again, so next
-                * time we'll skip this part. */
-               reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
-               if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
-                       IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
-                                     txq_id, reg);
-                       iwl_set_bit(priv, CSR_GP_CNTRL,
-                                   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-                       return;
-               }
-
-               iwl_write_direct32(priv, HBUS_TARG_WRPTR,
-                                    txq->q.write_ptr | (txq_id << 8));
-
-       /* else not in power-save mode, uCode will never sleep when we're
-        * trying to tx (during RFKILL, we're not trying to tx). */
-       } else
+       if (priv->cfg->base_params->shadow_reg_enable) {
+               /* shadow register enabled */
                iwl_write32(priv, HBUS_TARG_WRPTR,
                            txq->q.write_ptr | (txq_id << 8));
+       } else {
+               /* if we're trying to save power */
+               if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+                       /* wake up nic if it's powered down ...
+                        * uCode will wake up, and interrupt us again, so next
+                        * time we'll skip this part. */
+                       reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+                       if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+                               IWL_DEBUG_INFO(priv,
+                                       "Tx queue %d requesting wakeup,"
+                                       " GP1 = 0x%x\n", txq_id, reg);
+                               iwl_set_bit(priv, CSR_GP_CNTRL,
+                                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+                               return;
+                       }
+
+                       iwl_write_direct32(priv, HBUS_TARG_WRPTR,
+                                    txq->q.write_ptr | (txq_id << 8));
 
+               /*
+                * else not in power-save mode,
+                * uCode will never sleep when we're
+                * trying to tx (during RFKILL, we're not trying to tx).
+                */
+               } else
+                       iwl_write32(priv, HBUS_TARG_WRPTR,
+                                   txq->q.write_ptr | (txq_id << 8));
+       }
        txq->need_update = 0;
 }
 EXPORT_SYMBOL(iwl_txq_update_write_ptr);
index 7edf8c2fb8c7c6072294854281ba40f416f1423a..931c546367ea1901104dc4f2ced1ae57eb4c083c 100644 (file)
@@ -61,6 +61,7 @@
 #include "iwl-helpers.h"
 #include "iwl-dev.h"
 #include "iwl-spectrum.h"
+#include "iwl-legacy.h"
 
 /*
  * module name, copyright, version, etc.
@@ -3057,22 +3058,22 @@ static void iwl3945_bg_rx_replenish(struct work_struct *data)
        mutex_unlock(&priv->mutex);
 }
 
-void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
+void iwl3945_post_associate(struct iwl_priv *priv)
 {
        int rc = 0;
        struct ieee80211_conf *conf = NULL;
        struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
 
-       if (!vif || !priv->is_open)
+       if (!ctx->vif || !priv->is_open)
                return;
 
-       if (vif->type == NL80211_IFTYPE_AP) {
+       if (ctx->vif->type == NL80211_IFTYPE_AP) {
                IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
                return;
        }
 
        IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
-                       vif->bss_conf.aid, ctx->active.bssid_addr);
+                       ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
 
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
@@ -3091,18 +3092,18 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
 
        ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
 
-       ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+       ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
 
        IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
-                       vif->bss_conf.aid, vif->bss_conf.beacon_int);
+                       ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
 
-       if (vif->bss_conf.use_short_preamble)
+       if (ctx->vif->bss_conf.use_short_preamble)
                ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
        else
                ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
 
        if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-               if (vif->bss_conf.use_short_slot)
+               if (ctx->vif->bss_conf.use_short_slot)
                        ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
                else
                        ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
@@ -3110,7 +3111,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
 
        iwl3945_commit_rxon(priv, ctx);
 
-       switch (vif->type) {
+       switch (ctx->vif->type) {
        case NL80211_IFTYPE_STATION:
                iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
                break;
@@ -3119,7 +3120,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
                break;
        default:
                IWL_ERR(priv, "%s Should not be called in %d mode\n",
-                       __func__, vif->type);
+                       __func__, ctx->vif->type);
                break;
        }
 }
@@ -3234,9 +3235,10 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        return NETDEV_TX_OK;
 }
 
-void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
+void iwl3945_config_ap(struct iwl_priv *priv)
 {
        struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct ieee80211_vif *vif = ctx->vif;
        int rc = 0;
 
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -3407,9 +3409,9 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
        ctx->staging.filter_flags |= filter_or;
 
        /*
-        * Committing directly here breaks for some reason,
-        * but we'll eventually commit the filter flags
-        * change anyway.
+        * Not committing directly because hardware can perform a scan,
+        * but even if hw is ready, committing here breaks for some reason,
+        * we'll eventually commit the filter flags change anyway.
         */
 
        mutex_unlock(&priv->mutex);
@@ -3824,18 +3826,19 @@ static struct attribute_group iwl3945_attribute_group = {
        .attrs = iwl3945_sysfs_entries,
 };
 
-static struct ieee80211_ops iwl3945_hw_ops = {
+struct ieee80211_ops iwl3945_hw_ops = {
        .tx = iwl3945_mac_tx,
        .start = iwl3945_mac_start,
        .stop = iwl3945_mac_stop,
        .add_interface = iwl_mac_add_interface,
        .remove_interface = iwl_mac_remove_interface,
-       .config = iwl_mac_config,
+       .change_interface = iwl_mac_change_interface,
+       .config = iwl_legacy_mac_config,
        .configure_filter = iwl3945_configure_filter,
        .set_key = iwl3945_mac_set_key,
        .conf_tx = iwl_mac_conf_tx,
-       .reset_tsf = iwl_mac_reset_tsf,
-       .bss_info_changed = iwl_bss_info_changed,
+       .reset_tsf = iwl_legacy_mac_reset_tsf,
+       .bss_info_changed = iwl_legacy_mac_bss_info_changed,
        .hw_scan = iwl_mac_hw_scan,
        .sta_add = iwl3945_mac_sta_add,
        .sta_remove = iwl_mac_sta_remove,
@@ -3866,6 +3869,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
        priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
 
        priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
+       priv->tx_power_next = IWL_DEFAULT_TX_POWER;
 
        if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
                IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
@@ -3965,7 +3969,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
 
        /* mac80211 allocates memory for this device instance, including
         *   space for this driver's private structure */
-       hw = iwl_alloc_all(cfg, &iwl3945_hw_ops);
+       hw = iwl_alloc_all(cfg);
        if (hw == NULL) {
                pr_err("Can not allocate network device\n");
                err = -ENOMEM;
@@ -4117,7 +4121,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
 
        pci_enable_msi(priv->pci_dev);
 
-       err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr,
+       err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr,
                          IRQF_SHARED, DRV_NAME, priv);
        if (err) {
                IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -4275,10 +4279,7 @@ static struct pci_driver iwl3945_driver = {
        .id_table = iwl3945_hw_card_ids,
        .probe = iwl3945_pci_probe,
        .remove = __devexit_p(iwl3945_pci_remove),
-#ifdef CONFIG_PM
-       .suspend = iwl_pci_suspend,
-       .resume = iwl_pci_resume,
-#endif
+       .driver.pm = IWL_PM_OPS,
 };
 
 static int __init iwl3945_init(void)
index 7eaaa3bab54780a334bcd70c99fb2d42b5862b92..454f045ddff33118525f93142e53349622e015c3 100644 (file)
@@ -309,6 +309,8 @@ struct mac80211_hwsim_data {
         */
        u64 group;
        struct dentry *debugfs_group;
+
+       int power_level;
 };
 
 
@@ -497,7 +499,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
        rx_status.band = data->channel->band;
        rx_status.rate_idx = info->control.rates[0].idx;
        /* TODO: simulate real signal strength (and optional packet loss) */
-       rx_status.signal = -50;
+       rx_status.signal = data->power_level - 50;
 
        if (data->ps != PS_DISABLED)
                hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
@@ -698,6 +700,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
        data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
 
        data->channel = conf->channel;
+       data->power_level = conf->power_level;
        if (!data->started || !data->beacon_int)
                del_timer(&data->beacon_timer);
        else
index f152a25be59f7020998d35ceb72ba55100ebff2b..1bbcd7c1d02ab5d704bccdef122ac4ab3d1f5a69 100644 (file)
@@ -1125,10 +1125,12 @@ struct mwl8k_tx_desc {
        __le32 reserved;
        __le16 rate_info;
        __u8 peer_id;
-       __u8 tx_frag_cnt;
+       __u8 xmitcontrol;
 } __packed;
 
 #define MWL8K_TX_DESCS         128
+#define MWL8K_XMITCONTROL_NON_AMPDU    0x04
+
 
 static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
 {
@@ -1448,6 +1450,9 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
                tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
        else
                tx->peer_id = 0;
+
+       if (priv->ap_fw)
+               tx->xmitcontrol = MWL8K_XMITCONTROL_NON_AMPDU;
        wmb();
        tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
 
index 4f420a9ec5dc26f9b8f1bb663a3c39aaccf54330..9ec6691adf0d5782ea04182100a9025cb91646fb 100644 (file)
@@ -885,8 +885,7 @@ static void rt2400pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
 
        rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
        rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX,
-                          (state == STATE_RADIO_RX_OFF) ||
-                          (state == STATE_RADIO_RX_OFF_LINK));
+                          (state == STATE_RADIO_RX_OFF));
        rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
 }
 
@@ -989,9 +988,7 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2400pci_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_RX_ON:
-       case STATE_RADIO_RX_ON_LINK:
        case STATE_RADIO_RX_OFF:
-       case STATE_RADIO_RX_OFF_LINK:
                rt2400pci_toggle_rx(rt2x00dev, state);
                break;
        case STATE_RADIO_IRQ_ON:
@@ -1612,6 +1609,7 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
        .get_tsf                = rt2400pci_get_tsf,
        .tx_last_beacon         = rt2400pci_tx_last_beacon,
        .rfkill_poll            = rt2x00mac_rfkill_poll,
+       .flush                  = rt2x00mac_flush,
 };
 
 static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
@@ -1640,28 +1638,28 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
 };
 
 static const struct data_queue_desc rt2400pci_queue_rx = {
-       .entry_num              = RX_ENTRIES,
+       .entry_num              = 24,
        .data_size              = DATA_FRAME_SIZE,
        .desc_size              = RXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_pci),
 };
 
 static const struct data_queue_desc rt2400pci_queue_tx = {
-       .entry_num              = TX_ENTRIES,
+       .entry_num              = 24,
        .data_size              = DATA_FRAME_SIZE,
        .desc_size              = TXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_pci),
 };
 
 static const struct data_queue_desc rt2400pci_queue_bcn = {
-       .entry_num              = BEACON_ENTRIES,
+       .entry_num              = 1,
        .data_size              = MGMT_FRAME_SIZE,
        .desc_size              = TXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_pci),
 };
 
 static const struct data_queue_desc rt2400pci_queue_atim = {
-       .entry_num              = ATIM_ENTRIES,
+       .entry_num              = 8,
        .data_size              = DATA_FRAME_SIZE,
        .desc_size              = TXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_pci),
index c048b18f41331e747c152ffeb4f7a18b8d35b56a..d3a4a68cc439b22faea7f6e9d4899e9313e5a7e2 100644 (file)
 /*
  * DMA descriptor defines.
  */
-#define TXD_DESC_SIZE                  ( 8 * sizeof(__le32) )
-#define RXD_DESC_SIZE                  ( 8 * sizeof(__le32) )
+#define TXD_DESC_SIZE                  (8 * sizeof(__le32))
+#define RXD_DESC_SIZE                  (8 * sizeof(__le32))
 
 /*
  * TX descriptor format for TX, PRIO, ATIM and Beacon Ring.
        ((__CLAMP_TX(__txpower) - MAX_TXPOWER) + MIN_TXPOWER)
 
 #define TXPOWER_TO_DEV(__txpower) \
-       MAX_TXPOWER - (__CLAMP_TX(__txpower) - MIN_TXPOWER)
+       (MAX_TXPOWER - (__CLAMP_TX(__txpower) - MIN_TXPOWER))
 
 #endif /* RT2400PCI_H */
index 97feb7aef80929946714e97ed8931bf514487109..3e7f20346243d2b8a9dec103953aec6a70560696 100644 (file)
@@ -1040,8 +1040,7 @@ static void rt2500pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
 
        rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
        rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX,
-                          (state == STATE_RADIO_RX_OFF) ||
-                          (state == STATE_RADIO_RX_OFF_LINK));
+                          (state == STATE_RADIO_RX_OFF));
        rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
 }
 
@@ -1144,9 +1143,7 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2500pci_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_RX_ON:
-       case STATE_RADIO_RX_ON_LINK:
        case STATE_RADIO_RX_OFF:
-       case STATE_RADIO_RX_OFF_LINK:
                rt2500pci_toggle_rx(rt2x00dev, state);
                break;
        case STATE_RADIO_IRQ_ON:
@@ -1193,9 +1190,9 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry,
 
        rt2x00_desc_read(txd, 2, &word);
        rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER);
-       rt2x00_set_field32(&word, TXD_W2_AIFS, txdesc->aifs);
-       rt2x00_set_field32(&word, TXD_W2_CWMIN, txdesc->cw_min);
-       rt2x00_set_field32(&word, TXD_W2_CWMAX, txdesc->cw_max);
+       rt2x00_set_field32(&word, TXD_W2_AIFS, entry->queue->aifs);
+       rt2x00_set_field32(&word, TXD_W2_CWMIN, entry->queue->cw_min);
+       rt2x00_set_field32(&word, TXD_W2_CWMAX, entry->queue->cw_max);
        rt2x00_desc_write(txd, 2, word);
 
        rt2x00_desc_read(txd, 3, &word);
@@ -1909,6 +1906,7 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
        .get_tsf                = rt2500pci_get_tsf,
        .tx_last_beacon         = rt2500pci_tx_last_beacon,
        .rfkill_poll            = rt2x00mac_rfkill_poll,
+       .flush                  = rt2x00mac_flush,
 };
 
 static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
@@ -1937,28 +1935,28 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
 };
 
 static const struct data_queue_desc rt2500pci_queue_rx = {
-       .entry_num              = RX_ENTRIES,
+       .entry_num              = 32,
        .data_size              = DATA_FRAME_SIZE,
        .desc_size              = RXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_pci),
 };
 
 static const struct data_queue_desc rt2500pci_queue_tx = {
-       .entry_num              = TX_ENTRIES,
+       .entry_num              = 32,
        .data_size              = DATA_FRAME_SIZE,
        .desc_size              = TXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_pci),
 };
 
 static const struct data_queue_desc rt2500pci_queue_bcn = {
-       .entry_num              = BEACON_ENTRIES,
+       .entry_num              = 1,
        .data_size              = MGMT_FRAME_SIZE,
        .desc_size              = TXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_pci),
 };
 
 static const struct data_queue_desc rt2500pci_queue_atim = {
-       .entry_num              = ATIM_ENTRIES,
+       .entry_num              = 8,
        .data_size              = DATA_FRAME_SIZE,
        .desc_size              = TXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_pci),
index d708031361ac4d8fb257314f8df6b36ba6333385..2aad7ba8a10083547c8e39d4cba5dc77dcbc42d7 100644 (file)
 /*
  * DMA descriptor defines.
  */
-#define TXD_DESC_SIZE                  ( 11 * sizeof(__le32) )
-#define RXD_DESC_SIZE                  ( 11 * sizeof(__le32) )
+#define TXD_DESC_SIZE                  (11 * sizeof(__le32))
+#define RXD_DESC_SIZE                  (11 * sizeof(__le32))
 
 /*
  * TX descriptor format for TX, PRIO, ATIM and Beacon Ring.
index 93e44c7f3a749ac5962fb4c6079ee7e8f0493163..8152fec317537937e773593c039475df1f67c716 100644 (file)
@@ -39,7 +39,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt = 0;
+static int modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
@@ -938,8 +938,7 @@ static void rt2500usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
 
        rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg);
        rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX,
-                          (state == STATE_RADIO_RX_OFF) ||
-                          (state == STATE_RADIO_RX_OFF_LINK));
+                          (state == STATE_RADIO_RX_OFF));
        rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg);
 }
 
@@ -1019,9 +1018,7 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2500usb_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_RX_ON:
-       case STATE_RADIO_RX_ON_LINK:
        case STATE_RADIO_RX_OFF:
-       case STATE_RADIO_RX_OFF_LINK:
                rt2500usb_toggle_rx(rt2x00dev, state);
                break;
        case STATE_RADIO_IRQ_ON:
@@ -1081,9 +1078,9 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry,
 
        rt2x00_desc_read(txd, 1, &word);
        rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
-       rt2x00_set_field32(&word, TXD_W1_AIFS, txdesc->aifs);
-       rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
-       rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
+       rt2x00_set_field32(&word, TXD_W1_AIFS, entry->queue->aifs);
+       rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
+       rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
        rt2x00_desc_write(txd, 1, word);
 
        rt2x00_desc_read(txd, 2, &word);
@@ -1801,6 +1798,7 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
        .bss_info_changed       = rt2x00mac_bss_info_changed,
        .conf_tx                = rt2x00mac_conf_tx,
        .rfkill_poll            = rt2x00mac_rfkill_poll,
+       .flush                  = rt2x00mac_flush,
 };
 
 static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
@@ -1829,28 +1827,28 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
 };
 
 static const struct data_queue_desc rt2500usb_queue_rx = {
-       .entry_num              = RX_ENTRIES,
+       .entry_num              = 32,
        .data_size              = DATA_FRAME_SIZE,
        .desc_size              = RXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_usb),
 };
 
 static const struct data_queue_desc rt2500usb_queue_tx = {
-       .entry_num              = TX_ENTRIES,
+       .entry_num              = 32,
        .data_size              = DATA_FRAME_SIZE,
        .desc_size              = TXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_usb),
 };
 
 static const struct data_queue_desc rt2500usb_queue_bcn = {
-       .entry_num              = BEACON_ENTRIES,
+       .entry_num              = 1,
        .data_size              = MGMT_FRAME_SIZE,
        .desc_size              = TXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_usb_bcn),
 };
 
 static const struct data_queue_desc rt2500usb_queue_atim = {
-       .entry_num              = ATIM_ENTRIES,
+       .entry_num              = 8,
        .data_size              = DATA_FRAME_SIZE,
        .desc_size              = TXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_usb),
index eb8b6cab992516fa8bdf58d2feabcf44950585f3..002224c9bb621f4b23234ab5d07047fee36b638e 100644 (file)
 #define BCN_OFFSET1_BCN7               FIELD32(0xff000000)
 
 /*
- * PBF registers
- * Most are for debug. Driver doesn't touch PBF register.
+ * TXRXQ_PCNT: PBF register
+ * PCNT_TX0Q: Page count for TX hardware queue 0
+ * PCNT_TX1Q: Page count for TX hardware queue 1
+ * PCNT_TX2Q: Page count for TX hardware queue 2
+ * PCNT_RX0Q: Page count for RX hardware queue
  */
 #define TXRXQ_PCNT                     0x0438
+#define TXRXQ_PCNT_TX0Q                        FIELD32(0x000000ff)
+#define TXRXQ_PCNT_TX1Q                        FIELD32(0x0000ff00)
+#define TXRXQ_PCNT_TX2Q                        FIELD32(0x00ff0000)
+#define TXRXQ_PCNT_RX0Q                        FIELD32(0xff000000)
+
+/*
+ * PBF register
+ * Debug. Driver doesn't touch PBF register.
+ */
 #define PBF_DBG                                0x043c
 
 /*
 
 /*
  * TXOP_CTRL_CFG:
+ * TIMEOUT_TRUN_EN: Enable/Disable TXOP timeout truncation
+ * AC_TRUN_EN: Enable/Disable truncation for AC change
+ * TXRATEGRP_TRUN_EN: Enable/Disable truncation for TX rate group change
+ * USER_MODE_TRUN_EN: Enable/Disable truncation for user TXOP mode
+ * MIMO_PS_TRUN_EN: Enable/Disable truncation for MIMO PS RTS/CTS
+ * RESERVED_TRUN_EN: Reserved
+ * LSIG_TXOP_EN: Enable/Disable L-SIG TXOP protection
+ * EXT_CCA_EN: Enable/Disable extension channel CCA reference (Defer 40Mhz
+ *            transmissions if extension CCA is clear).
+ * EXT_CCA_DLY: Extension CCA signal delay time (unit: us)
+ * EXT_CWMIN: CwMin for extension channel backoff
+ *           0: Disabled
+ *
  */
 #define TXOP_CTRL_CFG                  0x1340
+#define TXOP_CTRL_CFG_TIMEOUT_TRUN_EN  FIELD32(0x00000001)
+#define TXOP_CTRL_CFG_AC_TRUN_EN       FIELD32(0x00000002)
+#define TXOP_CTRL_CFG_TXRATEGRP_TRUN_EN        FIELD32(0x00000004)
+#define TXOP_CTRL_CFG_USER_MODE_TRUN_EN        FIELD32(0x00000008)
+#define TXOP_CTRL_CFG_MIMO_PS_TRUN_EN  FIELD32(0x00000010)
+#define TXOP_CTRL_CFG_RESERVED_TRUN_EN FIELD32(0x00000020)
+#define TXOP_CTRL_CFG_LSIG_TXOP_EN     FIELD32(0x00000040)
+#define TXOP_CTRL_CFG_EXT_CCA_EN       FIELD32(0x00000080)
+#define TXOP_CTRL_CFG_EXT_CCA_DLY      FIELD32(0x0000ff00)
+#define TXOP_CTRL_CFG_EXT_CWMIN                FIELD32(0x000f0000)
 
 /*
  * TX_RTS_CFG:
 #define SHARED_KEY_MODE_BASE           0x7000
 
 #define MAC_WCID_ENTRY(__idx) \
-       ( MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)) )
+       (MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)))
 #define PAIRWISE_KEY_ENTRY(__idx) \
-       ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
+       (PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)))
 #define MAC_IVEIV_ENTRY(__idx) \
-       ( MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)) )
+       (MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)))
 #define MAC_WCID_ATTR_ENTRY(__idx) \
-       ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
+       (MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)))
 #define SHARED_KEY_ENTRY(__idx) \
-       ( SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
+       (SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)))
 #define SHARED_KEY_MODE_ENTRY(__idx) \
-       ( SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)) )
+       (SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)))
 
 struct mac_wcid_entry {
        u8 mac[6];
@@ -1635,9 +1670,9 @@ struct mac_iveiv_entry {
 #define HW_BEACON_BASE7                        0x5bc0
 
 #define HW_BEACON_OFFSET(__index) \
-       ( ((__index) < 4) ? ( HW_BEACON_BASE0 + (__index * 0x0200) ) : \
-         (((__index) < 6) ? ( HW_BEACON_BASE4 + ((__index - 4) * 0x0200) ) : \
-         (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))) )
+       (((__index) < 4) ? (HW_BEACON_BASE0 + (__index * 0x0200)) : \
+         (((__index) < 6) ? (HW_BEACON_BASE4 + ((__index - 4) * 0x0200)) : \
+         (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))))
 
 /*
  * BBP registers.
@@ -1987,8 +2022,8 @@ struct mac_iveiv_entry {
 /*
  * DMA descriptor defines.
  */
-#define TXWI_DESC_SIZE                 ( 4 * sizeof(__le32) )
-#define RXWI_DESC_SIZE                 ( 4 * sizeof(__le32) )
+#define TXWI_DESC_SIZE                 (4 * sizeof(__le32))
+#define RXWI_DESC_SIZE                 (4 * sizeof(__le32))
 
 /*
  * TX WI structure
index 5f00e00789d823bad28752516a4789da7057d8f9..b5d2ebab6ea84374d5a65c2602c88fd1c770d731 100644 (file)
@@ -277,13 +277,17 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
        unsigned int i;
        u32 reg;
 
+       /*
+        * Some devices are really slow to respond here. Wait a whole second
+        * before timing out.
+        */
        for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
                rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
                if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
                    !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
                        return 0;
 
-               msleep(1);
+               msleep(10);
        }
 
        ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
@@ -483,7 +487,7 @@ void rt2800_write_tx_data(struct queue_entry *entry,
                           txdesc->key_idx : 0xff);
        rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
                           txdesc->length);
-       rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, txdesc->qid);
+       rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, entry->queue->qid);
        rt2x00_set_field32(&word, TXWI_W1_PACKETID_ENTRY, (entry->entry_idx % 3) + 1);
        rt2x00_desc_write(txwi, 1, word);
 
@@ -727,7 +731,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
         * that the TX_STA_FIFO stack has a size of 16. We stick to our
         * tx ring size for now.
         */
-       for (i = 0; i < TX_ENTRIES; i++) {
+       for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
                rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
                if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
                        break;
@@ -824,7 +828,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
 }
 EXPORT_SYMBOL_GPL(rt2800_write_beacon);
 
-static void inline rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
+static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
                                       unsigned int beacon_base)
 {
        int i;
@@ -1144,6 +1148,7 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
                        struct rt2x00intf_conf *conf, const unsigned int flags)
 {
        u32 reg;
+       bool update_bssid = false;
 
        if (flags & CONFIG_UPDATE_TYPE) {
                /*
@@ -1173,6 +1178,16 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
        }
 
        if (flags & CONFIG_UPDATE_MAC) {
+               if (flags & CONFIG_UPDATE_TYPE &&
+                   conf->sync == TSF_SYNC_AP_NONE) {
+                       /*
+                        * The BSSID register has to be set to our own mac
+                        * address in AP mode.
+                        */
+                       memcpy(conf->bssid, conf->mac, sizeof(conf->mac));
+                       update_bssid = true;
+               }
+
                if (!is_zero_ether_addr((const u8 *)conf->mac)) {
                        reg = le32_to_cpu(conf->mac[1]);
                        rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
@@ -1183,7 +1198,7 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
                                              conf->mac, sizeof(conf->mac));
        }
 
-       if (flags & CONFIG_UPDATE_BSSID) {
+       if ((flags & CONFIG_UPDATE_BSSID) || update_bssid) {
                if (!is_zero_ether_addr((const u8 *)conf->bssid)) {
                        reg = le32_to_cpu(conf->bssid[1]);
                        rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3);
@@ -2097,7 +2112,23 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
                rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
        }
 
-       rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, 0x0000583f);
+       /*
+        * The legacy driver also sets TXOP_CTRL_CFG_RESERVED_TRUN_EN to 1
+        * although it is reserved.
+        */
+       rt2800_register_read(rt2x00dev, TXOP_CTRL_CFG, &reg);
+       rt2x00_set_field32(&reg, TXOP_CTRL_CFG_TIMEOUT_TRUN_EN, 1);
+       rt2x00_set_field32(&reg, TXOP_CTRL_CFG_AC_TRUN_EN, 1);
+       rt2x00_set_field32(&reg, TXOP_CTRL_CFG_TXRATEGRP_TRUN_EN, 1);
+       rt2x00_set_field32(&reg, TXOP_CTRL_CFG_USER_MODE_TRUN_EN, 1);
+       rt2x00_set_field32(&reg, TXOP_CTRL_CFG_MIMO_PS_TRUN_EN, 1);
+       rt2x00_set_field32(&reg, TXOP_CTRL_CFG_RESERVED_TRUN_EN, 1);
+       rt2x00_set_field32(&reg, TXOP_CTRL_CFG_LSIG_TXOP_EN, 0);
+       rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CCA_EN, 0);
+       rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CCA_DLY, 88);
+       rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CWMIN, 0);
+       rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, reg);
+
        rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002);
 
        rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
index b267395359863ca10a55fe8a39f186e5bf08deb9..5f3a018c088d693625d67f5af8f4bd1ff7f0e67f 100644 (file)
@@ -328,8 +328,7 @@ static void rt2800pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
 
        rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
        rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
-                          (state == STATE_RADIO_RX_ON) ||
-                          (state == STATE_RADIO_RX_ON_LINK));
+                          (state == STATE_RADIO_RX_ON));
        rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 }
 
@@ -442,7 +441,7 @@ static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
         * if the device is booting and wasn't asleep it will return
         * failure when attempting to wakeup.
         */
-       rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2);
+       rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2);
 
        if (state == STATE_AWAKE) {
                rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0);
@@ -477,9 +476,7 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
                break;
        case STATE_RADIO_RX_ON:
-       case STATE_RADIO_RX_ON_LINK:
        case STATE_RADIO_RX_OFF:
-       case STATE_RADIO_RX_OFF_LINK:
                rt2800pci_toggle_rx(rt2x00dev, state);
                break;
        case STATE_RADIO_IRQ_ON:
@@ -777,7 +774,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
         * Since we have only one producer and one consumer we don't
         * need to lock the kfifo.
         */
-       for (i = 0; i < TX_ENTRIES; i++) {
+       for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
                rt2800_register_read(rt2x00dev, TX_STA_FIFO, &status);
 
                if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
@@ -943,6 +940,7 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
        .get_tsf                = rt2800_get_tsf,
        .rfkill_poll            = rt2x00mac_rfkill_poll,
        .ampdu_action           = rt2800_ampdu_action,
+       .flush                  = rt2x00mac_flush,
 };
 
 static const struct rt2800_ops rt2800pci_rt2800_ops = {
@@ -991,21 +989,21 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
 };
 
 static const struct data_queue_desc rt2800pci_queue_rx = {
-       .entry_num              = RX_ENTRIES,
+       .entry_num              = 128,
        .data_size              = AGGREGATION_SIZE,
        .desc_size              = RXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_pci),
 };
 
 static const struct data_queue_desc rt2800pci_queue_tx = {
-       .entry_num              = TX_ENTRIES,
+       .entry_num              = 64,
        .data_size              = AGGREGATION_SIZE,
        .desc_size              = TXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_pci),
 };
 
 static const struct data_queue_desc rt2800pci_queue_bcn = {
-       .entry_num              = 8 * BEACON_ENTRIES,
+       .entry_num              = 8,
        .data_size              = 0, /* No DMA required for beacons */
        .desc_size              = TXWI_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_pci),
index 5a8dda9b5b5a66c0d6b798dcd837be20c1cc29e4..70e050d904c853c81973942d9526cbc7dc41635d 100644 (file)
  * Queue register offset macros
  */
 #define TX_QUEUE_REG_OFFSET            0x10
-#define TX_BASE_PTR(__x)               TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET)
-#define TX_MAX_CNT(__x)                        TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET)
-#define TX_CTX_IDX(__x)                        TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET)
-#define TX_DTX_IDX(__x)                        TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET)
+#define TX_BASE_PTR(__x)               (TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_MAX_CNT(__x)                        (TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_CTX_IDX(__x)                        (TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_DTX_IDX(__x)                        (TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
 
 /*
  * 8051 firmware image.
@@ -52,8 +52,8 @@
 /*
  * DMA descriptor defines.
  */
-#define TXD_DESC_SIZE                  ( 4 * sizeof(__le32) )
-#define RXD_DESC_SIZE                  ( 4 * sizeof(__le32) )
+#define TXD_DESC_SIZE                  (4 * sizeof(__le32))
+#define RXD_DESC_SIZE                  (4 * sizeof(__le32))
 
 /*
  * TX descriptor format for TX, PRIO and Beacon Ring.
index 3dff56ec195abaa60e9500611a6a020da2dc199f..389ecba8e89144fb9d5bda183832ff49c4e7d2f3 100644 (file)
@@ -45,7 +45,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt = 0;
+static int modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
@@ -114,8 +114,7 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
 
        rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
        rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
-                          (state == STATE_RADIO_RX_ON) ||
-                          (state == STATE_RADIO_RX_ON_LINK));
+                          (state == STATE_RADIO_RX_ON));
        rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
 }
 
@@ -165,7 +164,8 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
         * this limit so reduce the number to prevent errors.
         */
        rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_LIMIT,
-                          ((RX_ENTRIES * DATA_FRAME_SIZE) / 1024) - 3);
+                          ((rt2x00dev->ops->rx->entry_num * DATA_FRAME_SIZE)
+                           / 1024) - 3);
        rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1);
        rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
        rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg);
@@ -183,9 +183,9 @@ static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev,
                               enum dev_state state)
 {
        if (state == STATE_AWAKE)
-               rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0);
+               rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 2);
        else
-               rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2);
+               rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2);
 
        return 0;
 }
@@ -215,9 +215,7 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt2800usb_set_state(rt2x00dev, STATE_SLEEP);
                break;
        case STATE_RADIO_RX_ON:
-       case STATE_RADIO_RX_ON_LINK:
        case STATE_RADIO_RX_OFF:
-       case STATE_RADIO_RX_OFF_LINK:
                rt2800usb_toggle_rx(rt2x00dev, state);
                break;
        case STATE_RADIO_IRQ_ON:
@@ -244,6 +242,49 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
        return retval;
 }
 
+/*
+ * Watchdog handlers
+ */
+static void rt2800usb_watchdog(struct rt2x00_dev *rt2x00dev)
+{
+       unsigned int i;
+       u32 reg;
+
+       rt2800_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
+       if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) {
+               WARNING(rt2x00dev, "TX HW queue 0 timed out,"
+                       " invoke forced kick");
+
+               rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40012);
+
+               for (i = 0; i < 10; i++) {
+                       udelay(10);
+                       if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q))
+                               break;
+               }
+
+               rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
+       }
+
+       rt2800_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
+       if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) {
+               WARNING(rt2x00dev, "TX HW queue 1 timed out,"
+                       " invoke forced kick");
+
+               rt2800_register_write(rt2x00dev, PBF_CFG, 0xf4000a);
+
+               for (i = 0; i < 10; i++) {
+                       udelay(10);
+                       if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q))
+                               break;
+               }
+
+               rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
+       }
+
+       rt2x00usb_watchdog(rt2x00dev);
+}
+
 /*
  * TX descriptor initialization
  */
@@ -507,6 +548,7 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = {
        .get_tsf                = rt2800_get_tsf,
        .rfkill_poll            = rt2x00mac_rfkill_poll,
        .ampdu_action           = rt2800_ampdu_action,
+       .flush                  = rt2x00mac_flush,
 };
 
 static const struct rt2800_ops rt2800usb_rt2800_ops = {
@@ -535,7 +577,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
        .link_stats             = rt2800_link_stats,
        .reset_tuner            = rt2800_reset_tuner,
        .link_tuner             = rt2800_link_tuner,
-       .watchdog               = rt2x00usb_watchdog,
+       .watchdog               = rt2800usb_watchdog,
        .write_tx_desc          = rt2800usb_write_tx_desc,
        .write_tx_data          = rt2800_write_tx_data,
        .write_beacon           = rt2800_write_beacon,
@@ -553,21 +595,21 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
 };
 
 static const struct data_queue_desc rt2800usb_queue_rx = {
-       .entry_num              = RX_ENTRIES,
+       .entry_num              = 128,
        .data_size              = AGGREGATION_SIZE,
        .desc_size              = RXINFO_DESC_SIZE + RXWI_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_usb),
 };
 
 static const struct data_queue_desc rt2800usb_queue_tx = {
-       .entry_num              = TX_ENTRIES,
+       .entry_num              = 64,
        .data_size              = AGGREGATION_SIZE,
        .desc_size              = TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_usb),
 };
 
 static const struct data_queue_desc rt2800usb_queue_bcn = {
-       .entry_num              = 8 * BEACON_ENTRIES,
+       .entry_num              = 8,
        .data_size              = MGMT_FRAME_SIZE,
        .desc_size              = TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_usb),
index 0722badccf86cbe3dcfa1d2407da5f19dac295c1..671ea35926101ad76cce66b441cb57b151096cad 100644 (file)
@@ -40,8 +40,8 @@
 /*
  * DMA descriptor defines.
  */
-#define TXINFO_DESC_SIZE               ( 1 * sizeof(__le32) )
-#define RXINFO_DESC_SIZE               ( 1 * sizeof(__le32) )
+#define TXINFO_DESC_SIZE               (1 * sizeof(__le32))
+#define RXINFO_DESC_SIZE               (1 * sizeof(__le32))
 
 /*
  * TX Info structure
index 94fe589acfaabff06eac097fe7e3ae3e2f7ea310..42bd3a96f23b8fac6f847a61e04325b8a1c10be5 100644 (file)
@@ -1133,6 +1133,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
 int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
                      const struct ieee80211_tx_queue_params *params);
 void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
+void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop);
 
 /*
  * Driver allocation handlers.
index 54ffb5aeb34e4b808f1a94bcf8e6de7463883893..a238e908c85402ca51e5206d47bd005d94de5c7a 100644 (file)
@@ -133,7 +133,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
         */
        if (!(ant->flags & ANTENNA_RX_DIVERSITY))
                config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx);
-       else if(config.rx == ANTENNA_SW_DIVERSITY)
+       else if (config.rx == ANTENNA_SW_DIVERSITY)
                config.rx = active->rx;
 
        if (!(ant->flags & ANTENNA_TX_DIVERSITY))
@@ -146,7 +146,8 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
         * else the changes will be ignored by the device.
         */
        if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-               rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK);
+               rt2x00dev->ops->lib->set_device_state(rt2x00dev,
+                                                     STATE_RADIO_RX_OFF);
 
        /*
         * Write new antenna setup to device and reset the link tuner.
@@ -160,7 +161,8 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
        memcpy(active, &config, sizeof(config));
 
        if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-               rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK);
+               rt2x00dev->ops->lib->set_device_state(rt2x00dev,
+                                                     STATE_RADIO_RX_ON);
 }
 
 void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
index fcdb6b0dc40f88d4e90ca5a064a70e6516cc6f30..64dfb1f6823ed9b6cfc0272e6700687df82b6ce6 100644 (file)
@@ -162,11 +162,11 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
        struct timeval timestamp;
        u32 data_len;
 
-       do_gettimeofday(&timestamp);
-
-       if (!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags))
+       if (likely(!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags)))
                return;
 
+       do_gettimeofday(&timestamp);
+
        if (skb_queue_len(&intf->frame_dump_skbqueue) > 20) {
                DEBUG(rt2x00dev, "txrx dump queue length exceeded.\n");
                return;
@@ -342,7 +342,7 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
            sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdma done\tdone\n");
 
        queue_for_each(intf->rt2x00dev, queue) {
-               spin_lock_irqsave(&queue->lock, irqflags);
+               spin_lock_irqsave(&queue->index_lock, irqflags);
 
                temp += sprintf(temp, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", queue->qid,
                                queue->count, queue->limit, queue->length,
@@ -350,7 +350,7 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
                                queue->index[Q_INDEX_DMA_DONE],
                                queue->index[Q_INDEX_DONE]);
 
-               spin_unlock_irqrestore(&queue->lock, irqflags);
+               spin_unlock_irqrestore(&queue->index_lock, irqflags);
        }
 
        size = strlen(data);
index 5ba79b935f09f5bed56f999653338501584cf141..3afa2a3ebee480c3cdb0e4c84385015adb459264 100644 (file)
@@ -68,7 +68,8 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
        /*
         * Enable RX.
         */
-       rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
+       rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_RX_ON);
+       rt2x00link_start_tuner(rt2x00dev);
 
        /*
         * Start watchdog monitoring.
@@ -102,7 +103,8 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
        /*
         * Disable RX.
         */
-       rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
+       rt2x00link_stop_tuner(rt2x00dev);
+       rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_RX_OFF);
 
        /*
         * Disable radio.
@@ -113,23 +115,6 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
        rt2x00leds_led_radio(rt2x00dev, false);
 }
 
-void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state)
-{
-       /*
-        * When we are disabling the RX, we should also stop the link tuner.
-        */
-       if (state == STATE_RADIO_RX_OFF)
-               rt2x00link_stop_tuner(rt2x00dev);
-
-       rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
-
-       /*
-        * When we are enabling the RX, we should also start the link tuner.
-        */
-       if (state == STATE_RADIO_RX_ON)
-               rt2x00link_start_tuner(rt2x00dev);
-}
-
 static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
                                          struct ieee80211_vif *vif)
 {
@@ -483,6 +468,10 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
        unsigned int header_length;
        int rate_idx;
 
+       if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
+           !test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+               goto submit_entry;
+
        if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
                goto submit_entry;
 
@@ -567,9 +556,13 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
        entry->skb = skb;
 
 submit_entry:
-       rt2x00dev->ops->lib->clear_entry(entry);
-       rt2x00queue_index_inc(entry->queue, Q_INDEX);
+       entry->flags = 0;
        rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
+       if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
+           test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) {
+               rt2x00dev->ops->lib->clear_entry(entry);
+               rt2x00queue_index_inc(entry->queue, Q_INDEX);
+       }
 }
 EXPORT_SYMBOL_GPL(rt2x00lib_rxdone);
 
@@ -678,7 +671,7 @@ static void rt2x00lib_rate(struct ieee80211_rate *entry,
 {
        entry->flags = 0;
        entry->bitrate = rate->bitrate;
-       entry->hw_value =index;
+       entry->hw_value = index;
        entry->hw_value_short = index;
 
        if (rate->flags & DEV_RATE_SHORT_PREAMBLE)
index 619da23b7b56004cec3e3d327100449e9269b500..2cf68f82674b0139ddb328b1a34bcb870f48970b 100644 (file)
@@ -57,7 +57,7 @@ static inline const struct rt2x00_rate *rt2x00_get_rate(const u16 hw_value)
 }
 
 #define RATE_MCS(__mode, __mcs) \
-       ( (((__mode) & 0x00ff) << 8) | ((__mcs) & 0x00ff) )
+       ((((__mode) & 0x00ff) << 8) | ((__mcs) & 0x00ff))
 
 static inline int rt2x00_get_rate_mcs(const u16 mcs_value)
 {
@@ -69,7 +69,6 @@ static inline int rt2x00_get_rate_mcs(const u16 mcs_value)
  */
 int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev);
 void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev);
-void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state);
 
 /*
  * Initialization handlers.
index b971d8798ebf8ce23e7efbc2d7b1c7b88233d107..bfda60eaf4efc6b888091f7e7976cd7a2c4d0267 100644 (file)
@@ -67,7 +67,7 @@
            (__avg).avg_weight  ? \
                ((((__avg).avg_weight * ((AVG_SAMPLES) - 1)) + \
                  ((__val) * (AVG_FACTOR))) / \
-                (AVG_SAMPLES) ) : \
+                (AVG_SAMPLES)) : \
                ((__val) * (AVG_FACTOR)); \
        __new.avg = __new.avg_weight / (AVG_FACTOR); \
        __new; \
index c3c206a97d54c608da497256a7fcadd44293d9df..829bf4be9bc3aa4d9fe4d6a659c391368a072286 100644 (file)
@@ -283,14 +283,8 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
         * invalid behavior in the device.
         */
        memcpy(&intf->mac, vif->addr, ETH_ALEN);
-       if (vif->type == NL80211_IFTYPE_AP) {
-               memcpy(&intf->bssid, vif->addr, ETH_ALEN);
-               rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
-                                     intf->mac, intf->bssid);
-       } else {
-               rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
-                                     intf->mac, NULL);
-       }
+       rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
+                             intf->mac, NULL);
 
        /*
         * Some filters depend on the current working mode. We can force
@@ -358,7 +352,7 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
         * if for any reason the link tuner must be reset, this will be
         * handled by rt2x00lib_config().
         */
-       rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK);
+       rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_RX_OFF);
 
        /*
         * When we've just turned on the radio, we want to reprogram
@@ -376,7 +370,7 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
        rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant);
 
        /* Turn RX back on */
-       rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK);
+       rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_RX_ON);
 
        return 0;
 }
@@ -719,3 +713,41 @@ void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw)
        wiphy_rfkill_set_hw_state(hw->wiphy, !active);
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
+
+void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop)
+{
+       struct rt2x00_dev *rt2x00dev = hw->priv;
+       struct data_queue *queue;
+       unsigned int i = 0;
+
+       ieee80211_stop_queues(hw);
+
+       /*
+        * Run over all queues to kick them, this will force
+        * any pending frames to be transmitted.
+        */
+       tx_queue_for_each(rt2x00dev, queue) {
+               rt2x00dev->ops->lib->kick_tx_queue(queue);
+       }
+
+       /**
+        * All queues have been kicked, now wait for each queue
+        * to become empty. With a bit of luck, we only have to wait
+        * for the first queue to become empty, because while waiting
+        * for the that queue, the other queues will have transmitted
+        * all their frames as well (since they were already kicked).
+        */
+       tx_queue_for_each(rt2x00dev, queue) {
+               for (i = 0; i < 10; i++) {
+                       if (rt2x00queue_empty(queue))
+                               break;
+                       msleep(100);
+               }
+
+               if (!rt2x00queue_empty(queue))
+                       WARNING(rt2x00dev, "Failed to flush queue %d", queue->qid);
+       }
+
+       ieee80211_wake_queues(hw);
+}
+EXPORT_SYMBOL_GPL(rt2x00mac_flush);
index 2449d785cf8d5b4d9b5260a0a2e4f79d0506fdd9..868ca19b13ea3e59b57a475ccdbc63a919693862 100644 (file)
@@ -105,7 +105,7 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
         */
        addr = dma_alloc_coherent(rt2x00dev->dev,
                                  queue->limit * queue->desc_size,
-                                 &dma, GFP_KERNEL | GFP_DMA);
+                                 &dma, GFP_KERNEL);
        if (!addr)
                return -ENOMEM;
 
index e360d287defb01ae61672feae4e3618b3a8bea79..dc543174dfaddc7df9c3718f2b870a9753d31d78 100644 (file)
@@ -310,14 +310,6 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
 
        memset(txdesc, 0, sizeof(*txdesc));
 
-       /*
-        * Initialize information from queue
-        */
-       txdesc->qid = entry->queue->qid;
-       txdesc->cw_min = entry->queue->cw_min;
-       txdesc->cw_max = entry->queue->cw_max;
-       txdesc->aifs = entry->queue->aifs;
-
        /*
         * Header and frame information.
         */
@@ -460,12 +452,9 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
        rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
 }
 
-static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
+static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
                                      struct txentry_desc *txdesc)
 {
-       struct data_queue *queue = entry->queue;
-       struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
-
        /*
         * Check if we need to kick the queue, there are however a few rules
         *      1) Don't kick unless this is the last in frame in a burst.
@@ -477,7 +466,7 @@ static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
         */
        if (rt2x00queue_threshold(queue) ||
            !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
-               rt2x00dev->ops->lib->kick_tx_queue(queue);
+               queue->rt2x00dev->ops->lib->kick_tx_queue(queue);
 }
 
 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
@@ -567,7 +556,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
 
        rt2x00queue_index_inc(queue, Q_INDEX);
        rt2x00queue_write_tx_descriptor(entry, &txdesc);
-       rt2x00queue_kick_tx_queue(entry, &txdesc);
+       rt2x00queue_kick_tx_queue(queue, &txdesc);
 
        return 0;
 }
@@ -649,10 +638,10 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
         * it should not be kicked during this run, since it
         * is part of another TX operation.
         */
-       spin_lock_irqsave(&queue->lock, irqflags);
+       spin_lock_irqsave(&queue->index_lock, irqflags);
        index_start = queue->index[start];
        index_end = queue->index[end];
-       spin_unlock_irqrestore(&queue->lock, irqflags);
+       spin_unlock_irqrestore(&queue->index_lock, irqflags);
 
        /*
         * Start from the TX done pointer, this guarentees that we will
@@ -706,11 +695,11 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
                return NULL;
        }
 
-       spin_lock_irqsave(&queue->lock, irqflags);
+       spin_lock_irqsave(&queue->index_lock, irqflags);
 
        entry = &queue->entries[queue->index[index]];
 
-       spin_unlock_irqrestore(&queue->lock, irqflags);
+       spin_unlock_irqrestore(&queue->index_lock, irqflags);
 
        return entry;
 }
@@ -726,7 +715,7 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
                return;
        }
 
-       spin_lock_irqsave(&queue->lock, irqflags);
+       spin_lock_irqsave(&queue->index_lock, irqflags);
 
        queue->index[index]++;
        if (queue->index[index] >= queue->limit)
@@ -741,7 +730,7 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
                queue->count++;
        }
 
-       spin_unlock_irqrestore(&queue->lock, irqflags);
+       spin_unlock_irqrestore(&queue->index_lock, irqflags);
 }
 
 static void rt2x00queue_reset(struct data_queue *queue)
@@ -749,7 +738,7 @@ static void rt2x00queue_reset(struct data_queue *queue)
        unsigned long irqflags;
        unsigned int i;
 
-       spin_lock_irqsave(&queue->lock, irqflags);
+       spin_lock_irqsave(&queue->index_lock, irqflags);
 
        queue->count = 0;
        queue->length = 0;
@@ -759,7 +748,7 @@ static void rt2x00queue_reset(struct data_queue *queue)
                queue->last_action[i] = jiffies;
        }
 
-       spin_unlock_irqrestore(&queue->lock, irqflags);
+       spin_unlock_irqrestore(&queue->index_lock, irqflags);
 }
 
 void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
@@ -809,8 +798,8 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
                return -ENOMEM;
 
 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
-       ( ((char *)(__base)) + ((__limit) * (__esize)) + \
-           ((__index) * (__psize)) )
+       (((char *)(__base)) + ((__limit) * (__esize)) + \
+           ((__index) * (__psize)))
 
        for (i = 0; i < queue->limit; i++) {
                entries[i].flags = 0;
@@ -911,7 +900,7 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
                             struct data_queue *queue, enum data_queue_qid qid)
 {
-       spin_lock_init(&queue->lock);
+       spin_lock_init(&queue->index_lock);
 
        queue->rt2x00dev = rt2x00dev;
        queue->qid = qid;
index d81d85f3486611cb79b18571694727c9616d7885..29b051ac64012461b48d33a9f557bdf0c2897cd9 100644 (file)
 #define MGMT_FRAME_SIZE                256
 #define AGGREGATION_SIZE       3840
 
-/**
- * DOC: Number of entries per queue
- *
- * Under normal load without fragmentation, 12 entries are sufficient
- * without the queue being filled up to the maximum. When using fragmentation
- * and the queue threshold code, we need to add some additional margins to
- * make sure the queue will never (or only under extreme load) fill up
- * completely.
- * Since we don't use preallocated DMA, having a large number of queue entries
- * will have minimal impact on the memory requirements for the queue.
- */
-#define RX_ENTRIES     24
-#define TX_ENTRIES     24
-#define BEACON_ENTRIES 1
-#define ATIM_ENTRIES   8
-
 /**
  * enum data_queue_qid: Queue identification
  *
@@ -296,7 +280,6 @@ enum txentry_desc_flags {
  * Summary of information for the frame descriptor before sending a TX frame.
  *
  * @flags: Descriptor flags (See &enum queue_entry_flags).
- * @qid: Queue identification (See &enum data_queue_qid).
  * @length: Length of the entire frame.
  * @header_length: Length of 802.11 header.
  * @length_high: PLCP length high word.
@@ -309,11 +292,8 @@ enum txentry_desc_flags {
  * @rate_mode: Rate mode (See @enum rate_modulation).
  * @mpdu_density: MDPU density.
  * @retry_limit: Max number of retries.
- * @aifs: AIFS value.
  * @ifs: IFS value.
  * @txop: IFS value for 11n capable chips.
- * @cw_min: cwmin value.
- * @cw_max: cwmax value.
  * @cipher: Cipher type used for encryption.
  * @key_idx: Key index used for encryption.
  * @iv_offset: Position where IV should be inserted by hardware.
@@ -322,8 +302,6 @@ enum txentry_desc_flags {
 struct txentry_desc {
        unsigned long flags;
 
-       enum data_queue_qid qid;
-
        u16 length;
        u16 header_length;
 
@@ -339,11 +317,8 @@ struct txentry_desc {
        u16 mpdu_density;
 
        short retry_limit;
-       short aifs;
        short ifs;
        short txop;
-       short cw_min;
-       short cw_max;
 
        enum cipher cipher;
        u16 key_idx;
@@ -423,7 +398,7 @@ enum queue_index {
  * @entries: Base address of the &struct queue_entry which are
  *     part of this queue.
  * @qid: The queue identification, see &enum data_queue_qid.
- * @lock: Spinlock to protect index handling. Whenever @index, @index_done or
+ * @index_lock: Spinlock to protect index handling. Whenever @index, @index_done or
  *     @index_crypt needs to be changed this lock should be grabbed to prevent
  *     index corruption due to concurrency.
  * @count: Number of frames handled in the queue.
@@ -447,7 +422,7 @@ struct data_queue {
 
        enum data_queue_qid qid;
 
-       spinlock_t lock;
+       spinlock_t index_lock;
        unsigned int count;
        unsigned short limit;
        unsigned short threshold;
@@ -618,10 +593,10 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
 }
 
 /**
- * rt2x00queue_timeout - Check if a timeout occured for STATUS reorts
+ * rt2x00queue_status_timeout - Check if a timeout occured for STATUS reports
  * @queue: Queue to check.
  */
-static inline int rt2x00queue_timeout(struct data_queue *queue)
+static inline int rt2x00queue_status_timeout(struct data_queue *queue)
 {
        return time_after(queue->last_action[Q_INDEX_DMA_DONE],
                          queue->last_action[Q_INDEX_DONE] + (HZ / 10));
index cef94621cef791ade84a675c629c56ff16bebc72..ed71be95136d619019346d8bcea318372369a063 100644 (file)
@@ -85,8 +85,6 @@ enum dev_state {
        STATE_RADIO_OFF,
        STATE_RADIO_RX_ON,
        STATE_RADIO_RX_OFF,
-       STATE_RADIO_RX_ON_LINK,
-       STATE_RADIO_RX_OFF_LINK,
        STATE_RADIO_IRQ_ON,
        STATE_RADIO_IRQ_OFF,
        STATE_RADIO_IRQ_ON_ISR,
index b3317df7a7d4afdd88169c8ad16f2ad99984bc9e..9ac14598e2a0e614833a879cb226f345e2d86178 100644 (file)
@@ -226,9 +226,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
         * Schedule the delayed work for reading the TX status
         * from the device.
         */
-       if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
-           test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-               ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
+       ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
 }
 
 static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
@@ -322,21 +320,6 @@ static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
         */
        rt2x00dev->txdone_work.func(&rt2x00dev->txdone_work);
 
-       /*
-        * Security measure: if the driver did override the
-        * txdone_work function, and the hardware did arrive
-        * in a state which causes it to malfunction, it is
-        * possible that the driver couldn't handle the txdone
-        * event correctly. So after giving the driver the
-        * chance to cleanup, we now force a cleanup of any
-        * leftovers.
-        */
-       if (!rt2x00queue_empty(queue)) {
-               WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
-                       " status handling failed, invoke hard reset", queue->qid);
-               rt2x00usb_work_txdone(&rt2x00dev->txdone_work);
-       }
-
        /*
         * The queue has been reset, and mac80211 is allowed to use the
         * queue again.
@@ -361,7 +344,7 @@ void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
                if (!rt2x00queue_empty(queue)) {
                        if (rt2x00queue_dma_timeout(queue))
                                rt2x00usb_watchdog_tx_dma(queue);
-                       if (rt2x00queue_timeout(queue))
+                       if (rt2x00queue_status_timeout(queue))
                                rt2x00usb_watchdog_tx_status(queue);
                }
        }
@@ -424,9 +407,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
         * Schedule the delayed work for reading the RX status
         * from the device.
         */
-       if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
-           test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-               ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
+       ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
 }
 
 /*
index af548c87f1084a6d641430bfd5fbf84bef52b8a0..6b09b01f634f029c7cc4c0eda8951ad2f6e354e1 100644 (file)
@@ -1623,8 +1623,7 @@ static void rt61pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
 
        rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
        rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX,
-                          (state == STATE_RADIO_RX_OFF) ||
-                          (state == STATE_RADIO_RX_OFF_LINK));
+                          (state == STATE_RADIO_RX_OFF));
        rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
 }
 
@@ -1745,9 +1744,7 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt61pci_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_RX_ON:
-       case STATE_RADIO_RX_ON_LINK:
        case STATE_RADIO_RX_OFF:
-       case STATE_RADIO_RX_OFF_LINK:
                rt61pci_toggle_rx(rt2x00dev, state);
                break;
        case STATE_RADIO_IRQ_ON:
@@ -1789,10 +1786,10 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
         * Start writing the descriptor words.
         */
        rt2x00_desc_read(txd, 1, &word);
-       rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->qid);
-       rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
-       rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
-       rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
+       rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
+       rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs);
+       rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
+       rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
        rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
        rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
                           test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
@@ -1820,7 +1817,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
        rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
        rt2x00_desc_write(txd, 5, word);
 
-       if (txdesc->qid != QID_BEACON) {
+       if (entry->queue->qid != QID_BEACON) {
                rt2x00_desc_read(txd, 6, &word);
                rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
                                   skbdesc->skb_dma);
@@ -1866,8 +1863,8 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
         * Register descriptor details in skb frame descriptor.
         */
        skbdesc->desc = txd;
-       skbdesc->desc_len =
-               (txdesc->qid == QID_BEACON) ?  TXINFO_SIZE : TXD_DESC_SIZE;
+       skbdesc->desc_len = (entry->queue->qid == QID_BEACON) ? TXINFO_SIZE :
+                           TXD_DESC_SIZE;
 }
 
 /*
@@ -2078,7 +2075,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
         * that the TX_STA_FIFO stack has a size of 16. We stick to our
         * tx ring size for now.
         */
-       for (i = 0; i < TX_ENTRIES; i++) {
+       for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
                rt2x00pci_register_read(rt2x00dev, STA_CSR4, &reg);
                if (!rt2x00_get_field32(reg, STA_CSR4_VALID))
                        break;
@@ -2824,6 +2821,7 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
        .conf_tx                = rt61pci_conf_tx,
        .get_tsf                = rt61pci_get_tsf,
        .rfkill_poll            = rt2x00mac_rfkill_poll,
+       .flush                  = rt2x00mac_flush,
 };
 
 static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
@@ -2857,21 +2855,21 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
 };
 
 static const struct data_queue_desc rt61pci_queue_rx = {
-       .entry_num              = RX_ENTRIES,
+       .entry_num              = 32,
        .data_size              = DATA_FRAME_SIZE,
        .desc_size              = RXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_pci),
 };
 
 static const struct data_queue_desc rt61pci_queue_tx = {
-       .entry_num              = TX_ENTRIES,
+       .entry_num              = 32,
        .data_size              = DATA_FRAME_SIZE,
        .desc_size              = TXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_pci),
 };
 
 static const struct data_queue_desc rt61pci_queue_bcn = {
-       .entry_num              = 4 * BEACON_ENTRIES,
+       .entry_num              = 4,
        .data_size              = 0, /* No DMA required for beacons */
        .desc_size              = TXINFO_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_pci),
index e2e728ab0b2e747ca88486eceeaf73ce869a4ab7..afc803b7959fae0fe6b99ac64ea39280a1a7114e 100644 (file)
@@ -412,7 +412,7 @@ struct hw_pairwise_ta_entry {
  * DROP_VERSION_ERROR: Drop version error frame.
  * DROP_MULTICAST: Drop multicast frames.
  * DROP_BORADCAST: Drop broadcast frames.
- * ROP_ACK_CTS: Drop received ACK and CTS.
+ * DROP_ACK_CTS: Drop received ACK and CTS.
  */
 #define TXRX_CSR0                      0x3040
 #define TXRX_CSR0_RX_ACK_TIMEOUT       FIELD32(0x000001ff)
index 9be8089317e4b1a6ffe7921f9d46bae9336b7cc7..6f04552f581987ff802bc180deef39291425a5fe 100644 (file)
@@ -40,7 +40,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt = 0;
+static int modparam_nohwcrypt;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
@@ -1331,8 +1331,7 @@ static void rt73usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
 
        rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
        rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX,
-                          (state == STATE_RADIO_RX_OFF) ||
-                          (state == STATE_RADIO_RX_OFF_LINK));
+                          (state == STATE_RADIO_RX_OFF));
        rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
 }
 
@@ -1403,9 +1402,7 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
                rt73usb_disable_radio(rt2x00dev);
                break;
        case STATE_RADIO_RX_ON:
-       case STATE_RADIO_RX_ON_LINK:
        case STATE_RADIO_RX_OFF:
-       case STATE_RADIO_RX_OFF_LINK:
                rt73usb_toggle_rx(rt2x00dev, state);
                break;
        case STATE_RADIO_IRQ_ON:
@@ -1472,10 +1469,10 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry,
        rt2x00_desc_write(txd, 0, word);
 
        rt2x00_desc_read(txd, 1, &word);
-       rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->qid);
-       rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
-       rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
-       rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
+       rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
+       rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs);
+       rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
+       rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
        rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
        rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
                           test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
@@ -2264,6 +2261,7 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
        .conf_tx                = rt73usb_conf_tx,
        .get_tsf                = rt73usb_get_tsf,
        .rfkill_poll            = rt2x00mac_rfkill_poll,
+       .flush                  = rt2x00mac_flush,
 };
 
 static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
@@ -2296,21 +2294,21 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
 };
 
 static const struct data_queue_desc rt73usb_queue_rx = {
-       .entry_num              = RX_ENTRIES,
+       .entry_num              = 32,
        .data_size              = DATA_FRAME_SIZE,
        .desc_size              = RXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_usb),
 };
 
 static const struct data_queue_desc rt73usb_queue_tx = {
-       .entry_num              = TX_ENTRIES,
+       .entry_num              = 32,
        .data_size              = DATA_FRAME_SIZE,
        .desc_size              = TXD_DESC_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_usb),
 };
 
 static const struct data_queue_desc rt73usb_queue_bcn = {
-       .entry_num              = 4 * BEACON_ENTRIES,
+       .entry_num              = 4,
        .data_size              = MGMT_FRAME_SIZE,
        .desc_size              = TXINFO_SIZE,
        .priv_size              = sizeof(struct queue_entry_priv_usb),
index 44d5b2bebd399c0645956157d25edefc8c444110..1315ce5c992f839c6979f09ffbfcee9c32179cd2 100644 (file)
@@ -322,7 +322,7 @@ struct hw_pairwise_ta_entry {
  * DROP_VERSION_ERROR: Drop version error frame.
  * DROP_MULTICAST: Drop multicast frames.
  * DROP_BORADCAST: Drop broadcast frames.
- * ROP_ACK_CTS: Drop received ACK and CTS.
+ * DROP_ACK_CTS: Drop received ACK and CTS.
  */
 #define TXRX_CSR0                      0x3040
 #define TXRX_CSR0_RX_ACK_TIMEOUT       FIELD32(0x000001ff)
index 38fa8244cc96d93d26f7d7201d4d2d1a85c129c9..eeee244fcaabebde76a0d19fe59a2206fa33a972 100644 (file)
@@ -553,6 +553,46 @@ static int rtl8187b_init_status_urb(struct ieee80211_hw *dev)
        return ret;
 }
 
+static void rtl8187_set_anaparam(struct rtl8187_priv *priv, bool rfon)
+{
+       u32 anaparam, anaparam2;
+       u8 anaparam3, reg;
+
+       if (!priv->is_rtl8187b) {
+               if (rfon) {
+                       anaparam = RTL8187_RTL8225_ANAPARAM_ON;
+                       anaparam2 = RTL8187_RTL8225_ANAPARAM2_ON;
+               } else {
+                       anaparam = RTL8187_RTL8225_ANAPARAM_OFF;
+                       anaparam2 = RTL8187_RTL8225_ANAPARAM2_OFF;
+               }
+       } else {
+               if (rfon) {
+                       anaparam = RTL8187B_RTL8225_ANAPARAM_ON;
+                       anaparam2 = RTL8187B_RTL8225_ANAPARAM2_ON;
+                       anaparam3 = RTL8187B_RTL8225_ANAPARAM3_ON;
+               } else {
+                       anaparam = RTL8187B_RTL8225_ANAPARAM_OFF;
+                       anaparam2 = RTL8187B_RTL8225_ANAPARAM2_OFF;
+                       anaparam3 = RTL8187B_RTL8225_ANAPARAM3_OFF;
+               }
+       }
+
+       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+                        RTL818X_EEPROM_CMD_CONFIG);
+       reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+       reg |= RTL818X_CONFIG3_ANAPARAM_WRITE;
+       rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
+       rtl818x_iowrite32(priv, &priv->map->ANAPARAM, anaparam);
+       rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2);
+       if (priv->is_rtl8187b)
+               rtl818x_iowrite8(priv, &priv->map->ANAPARAM3, anaparam3);
+       reg &= ~RTL818X_CONFIG3_ANAPARAM_WRITE;
+       rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
+       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+                        RTL818X_EEPROM_CMD_NORMAL);
+}
+
 static int rtl8187_cmd_reset(struct ieee80211_hw *dev)
 {
        struct rtl8187_priv *priv = dev->priv;
@@ -603,19 +643,7 @@ static int rtl8187_init_hw(struct ieee80211_hw *dev)
        int res;
 
        /* reset */
-       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
-                        RTL818X_EEPROM_CMD_CONFIG);
-       reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
-       rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg |
-                        RTL818X_CONFIG3_ANAPARAM_WRITE);
-       rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
-                         RTL8187_RTL8225_ANAPARAM_ON);
-       rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
-                         RTL8187_RTL8225_ANAPARAM2_ON);
-       rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg &
-                        ~RTL818X_CONFIG3_ANAPARAM_WRITE);
-       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
-                        RTL818X_EEPROM_CMD_NORMAL);
+       rtl8187_set_anaparam(priv, true);
 
        rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
 
@@ -629,17 +657,7 @@ static int rtl8187_init_hw(struct ieee80211_hw *dev)
        if (res)
                return res;
 
-       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
-       reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
-       rtl818x_iowrite8(priv, &priv->map->CONFIG3,
-                       reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
-       rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
-                         RTL8187_RTL8225_ANAPARAM_ON);
-       rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
-                         RTL8187_RTL8225_ANAPARAM2_ON);
-       rtl818x_iowrite8(priv, &priv->map->CONFIG3,
-                       reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
-       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+       rtl8187_set_anaparam(priv, true);
 
        /* setup card */
        rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0);
@@ -712,10 +730,9 @@ static const u8 rtl8187b_reg_table[][3] = {
 
        {0x58, 0x4B, 1}, {0x59, 0x00, 1}, {0x5A, 0x4B, 1}, {0x5B, 0x00, 1},
        {0x60, 0x4B, 1}, {0x61, 0x09, 1}, {0x62, 0x4B, 1}, {0x63, 0x09, 1},
-       {0xCE, 0x0F, 1}, {0xCF, 0x00, 1}, {0xE0, 0xFF, 1}, {0xE1, 0x0F, 1},
-       {0xE2, 0x00, 1}, {0xF0, 0x4E, 1}, {0xF1, 0x01, 1}, {0xF2, 0x02, 1},
-       {0xF3, 0x03, 1}, {0xF4, 0x04, 1}, {0xF5, 0x05, 1}, {0xF6, 0x06, 1},
-       {0xF7, 0x07, 1}, {0xF8, 0x08, 1},
+       {0xCE, 0x0F, 1}, {0xCF, 0x00, 1}, {0xF0, 0x4E, 1}, {0xF1, 0x01, 1},
+       {0xF2, 0x02, 1}, {0xF3, 0x03, 1}, {0xF4, 0x04, 1}, {0xF5, 0x05, 1},
+       {0xF6, 0x06, 1}, {0xF7, 0x07, 1}, {0xF8, 0x08, 1},
 
        {0x4E, 0x00, 2}, {0x0C, 0x04, 2}, {0x21, 0x61, 2}, {0x22, 0x68, 2},
        {0x23, 0x6F, 2}, {0x24, 0x76, 2}, {0x25, 0x7D, 2}, {0x26, 0x84, 2},
@@ -723,14 +740,13 @@ static const u8 rtl8187b_reg_table[][3] = {
        {0x52, 0x04, 2}, {0x53, 0xA0, 2}, {0x54, 0x1F, 2}, {0x55, 0x23, 2},
        {0x56, 0x45, 2}, {0x57, 0x67, 2}, {0x58, 0x08, 2}, {0x59, 0x08, 2},
        {0x5A, 0x08, 2}, {0x5B, 0x08, 2}, {0x60, 0x08, 2}, {0x61, 0x08, 2},
-       {0x62, 0x08, 2}, {0x63, 0x08, 2}, {0x64, 0xCF, 2}, {0x72, 0x56, 2},
-       {0x73, 0x9A, 2},
+       {0x62, 0x08, 2}, {0x63, 0x08, 2}, {0x64, 0xCF, 2},
 
-       {0x34, 0xF0, 0}, {0x35, 0x0F, 0}, {0x5B, 0x40, 0}, {0x84, 0x88, 0},
-       {0x85, 0x24, 0}, {0x88, 0x54, 0}, {0x8B, 0xB8, 0}, {0x8C, 0x07, 0},
-       {0x8D, 0x00, 0}, {0x94, 0x1B, 0}, {0x95, 0x12, 0}, {0x96, 0x00, 0},
-       {0x97, 0x06, 0}, {0x9D, 0x1A, 0}, {0x9F, 0x10, 0}, {0xB4, 0x22, 0},
-       {0xBE, 0x80, 0}, {0xDB, 0x00, 0}, {0xEE, 0x00, 0}, {0x4C, 0x00, 2},
+       {0x5B, 0x40, 0}, {0x84, 0x88, 0}, {0x85, 0x24, 0}, {0x88, 0x54, 0},
+       {0x8B, 0xB8, 0}, {0x8C, 0x07, 0}, {0x8D, 0x00, 0}, {0x94, 0x1B, 0},
+       {0x95, 0x12, 0}, {0x96, 0x00, 0}, {0x97, 0x06, 0}, {0x9D, 0x1A, 0},
+       {0x9F, 0x10, 0}, {0xB4, 0x22, 0}, {0xBE, 0x80, 0}, {0xDB, 0x00, 0},
+       {0xEE, 0x00, 0}, {0x4C, 0x00, 2},
 
        {0x9F, 0x00, 3}, {0x8C, 0x01, 0}, {0x8D, 0x10, 0}, {0x8E, 0x08, 0},
        {0x8F, 0x00, 0}
@@ -742,48 +758,34 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
        int res, i;
        u8 reg;
 
-       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
-                        RTL818X_EEPROM_CMD_CONFIG);
-
-       reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
-       reg |= RTL818X_CONFIG3_ANAPARAM_WRITE | RTL818X_CONFIG3_GNT_SELECT;
-       rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
-       rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
-                         RTL8187B_RTL8225_ANAPARAM2_ON);
-       rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
-                         RTL8187B_RTL8225_ANAPARAM_ON);
-       rtl818x_iowrite8(priv, &priv->map->ANAPARAM3,
-                        RTL8187B_RTL8225_ANAPARAM3_ON);
+       rtl8187_set_anaparam(priv, true);
 
+       /* Reset PLL sequence on 8187B. Realtek note: reduces power
+        * consumption about 30 mA */
        rtl818x_iowrite8(priv, (u8 *)0xFF61, 0x10);
        reg = rtl818x_ioread8(priv, (u8 *)0xFF62);
        rtl818x_iowrite8(priv, (u8 *)0xFF62, reg & ~(1 << 5));
        rtl818x_iowrite8(priv, (u8 *)0xFF62, reg | (1 << 5));
 
-       reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
-       reg &= ~RTL818X_CONFIG3_ANAPARAM_WRITE;
-       rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
-
-       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
-                        RTL818X_EEPROM_CMD_NORMAL);
-
        res = rtl8187_cmd_reset(dev);
        if (res)
                return res;
 
-       rtl818x_iowrite16(priv, (__le16 *)0xFF2D, 0x0FFF);
+       rtl8187_set_anaparam(priv, true);
+
+       /* BRSR (Basic Rate Set Register) on 8187B looks to be the same as
+        * RESP_RATE on 8187L in Realtek sources: each bit should be each
+        * one of the 12 rates, all are enabled */
+       rtl818x_iowrite16(priv, (__le16 *)0xFF34, 0x0FFF);
+
        reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
        reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
        rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
-       reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
-       reg |= RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT |
-              RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
-       rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
 
+       /* Auto Rate Fallback Register (ARFR): 1M-54M setting */
        rtl818x_iowrite16_idx(priv, (__le16 *)0xFFE0, 0x0FFF, 1);
+       rtl818x_iowrite8_idx(priv, (u8 *)0xFFE2, 0x00, 1);
 
-       rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100);
-       rtl818x_iowrite16(priv, &priv->map->ATIM_WND, 2);
        rtl818x_iowrite16_idx(priv, (__le16 *)0xFFD4, 0xFFFF, 1);
 
        rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
@@ -811,16 +813,9 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
 
        rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x00004001);
 
+       /* RFSW_CTRL register */
        rtl818x_iowrite16_idx(priv, (__le16 *)0xFF72, 0x569A, 2);
 
-       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
-                        RTL818X_EEPROM_CMD_CONFIG);
-       reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
-       reg |= RTL818X_CONFIG3_ANAPARAM_WRITE;
-       rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
-       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
-                        RTL818X_EEPROM_CMD_NORMAL);
-
        rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480);
        rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488);
        rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
@@ -929,6 +924,12 @@ static int rtl8187_start(struct ieee80211_hw *dev)
                priv->rx_conf = reg;
                rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
 
+               reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
+               reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
+               reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+               reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
+               rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
+
                rtl818x_iowrite32(priv, &priv->map->TX_CONF,
                                  RTL818X_TX_CONF_HW_SEQNUM |
                                  RTL818X_TX_CONF_DISREQQSIZE |
@@ -1002,6 +1003,7 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
        rtl818x_iowrite8(priv, &priv->map->CMD, reg);
 
        priv->rf->stop(dev);
+       rtl8187_set_anaparam(priv, false);
 
        rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
        reg = rtl818x_ioread8(priv, &priv->map->CONFIG4);
index 97eebdcf7eb9f432729c18a3ad090fe8934e0171..5c6666f09ac1613de2833bc1cac968e1102578c0 100644 (file)
@@ -898,29 +898,7 @@ static void rtl8225z2_b_rf_init(struct ieee80211_hw *dev)
 
 static void rtl8225_rf_stop(struct ieee80211_hw *dev)
 {
-       u8 reg;
-       struct rtl8187_priv *priv = dev->priv;
-
        rtl8225_write(dev, 0x4, 0x1f);
-
-       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
-       reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
-       rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
-       if (!priv->is_rtl8187b) {
-               rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
-                                 RTL8187_RTL8225_ANAPARAM2_OFF);
-               rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
-                                 RTL8187_RTL8225_ANAPARAM_OFF);
-       } else {
-               rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
-                                 RTL8187B_RTL8225_ANAPARAM2_OFF);
-               rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
-                                 RTL8187B_RTL8225_ANAPARAM_OFF);
-               rtl818x_iowrite8(priv, &priv->map->ANAPARAM3,
-                                 RTL8187B_RTL8225_ANAPARAM3_OFF);
-       }
-       rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
-       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
 }
 
 static void rtl8225_rf_set_channel(struct ieee80211_hw *dev,
index 7a8762553cdcb54470d5b430fa7019fabf6b471c..012e1a4016fed0a195a2790f5b23c8ab9b884062 100644 (file)
@@ -52,14 +52,14 @@ void wl1251_disable_interrupts(struct wl1251 *wl)
        wl->if_ops->disable_irq(wl);
 }
 
-static void wl1251_power_off(struct wl1251 *wl)
+static int wl1251_power_off(struct wl1251 *wl)
 {
-       wl->set_power(false);
+       return wl->if_ops->power(wl, false);
 }
 
-static void wl1251_power_on(struct wl1251 *wl)
+static int wl1251_power_on(struct wl1251 *wl)
 {
-       wl->set_power(true);
+       return wl->if_ops->power(wl, true);
 }
 
 static int wl1251_fetch_firmware(struct wl1251 *wl)
@@ -152,9 +152,12 @@ static void wl1251_fw_wakeup(struct wl1251 *wl)
 
 static int wl1251_chip_wakeup(struct wl1251 *wl)
 {
-       int ret = 0;
+       int ret;
+
+       ret = wl1251_power_on(wl);
+       if (ret < 0)
+               return ret;
 
-       wl1251_power_on(wl);
        msleep(WL1251_POWER_ON_SLEEP);
        wl->if_ops->reset(wl);
 
index 74ba9ced5393fd35901643a53a5607ec68bf4747..596d90ecba33c6027c1e8be64e41ce8c15cf31a6 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/platform_device.h>
 #include <linux/wl12xx.h>
 #include <linux/irq.h>
+#include <linux/pm_runtime.h>
 
 #include "wl1251.h"
 
@@ -42,8 +43,6 @@ struct wl1251_sdio {
        u32 elp_val;
 };
 
-static struct wl12xx_platform_data *wl12xx_board_data;
-
 static struct sdio_func *wl_to_func(struct wl1251 *wl)
 {
        struct wl1251_sdio *wl_sdio = wl->if_priv;
@@ -171,8 +170,42 @@ static void wl1251_disable_line_irq(struct wl1251 *wl)
        return disable_irq(wl->irq);
 }
 
-static void wl1251_sdio_set_power(bool enable)
+static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
 {
+       struct sdio_func *func = wl_to_func(wl);
+       int ret;
+
+       if (enable) {
+               /*
+                * Power is controlled by runtime PM, but we still call board
+                * callback in case it wants to do any additional setup,
+                * for example enabling clock buffer for the module.
+                */
+               if (wl->set_power)
+                       wl->set_power(true);
+
+               ret = pm_runtime_get_sync(&func->dev);
+               if (ret < 0)
+                       goto out;
+
+               sdio_claim_host(func);
+               sdio_enable_func(func);
+               sdio_release_host(func);
+       } else {
+               sdio_claim_host(func);
+               sdio_disable_func(func);
+               sdio_release_host(func);
+
+               ret = pm_runtime_put_sync(&func->dev);
+               if (ret < 0)
+                       goto out;
+
+               if (wl->set_power)
+                       wl->set_power(false);
+       }
+
+out:
+       return ret;
 }
 
 static struct wl1251_if_operations wl1251_sdio_ops = {
@@ -181,30 +214,7 @@ static struct wl1251_if_operations wl1251_sdio_ops = {
        .write_elp = wl1251_sdio_write_elp,
        .read_elp = wl1251_sdio_read_elp,
        .reset = wl1251_sdio_reset,
-};
-
-static int wl1251_platform_probe(struct platform_device *pdev)
-{
-       if (pdev->id != -1) {
-               wl1251_error("can only handle single device");
-               return -ENODEV;
-       }
-
-       wl12xx_board_data = pdev->dev.platform_data;
-       return 0;
-}
-
-/*
- * Dummy platform_driver for passing platform_data to this driver,
- * until we have a way to pass this through SDIO subsystem or
- * some other way.
- */
-static struct platform_driver wl1251_platform_driver = {
-       .driver = {
-               .name   = "wl1251_data",
-               .owner  = THIS_MODULE,
-       },
-       .probe  = wl1251_platform_probe,
+       .power = wl1251_sdio_set_power,
 };
 
 static int wl1251_sdio_probe(struct sdio_func *func,
@@ -214,6 +224,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
        struct wl1251 *wl;
        struct ieee80211_hw *hw;
        struct wl1251_sdio *wl_sdio;
+       const struct wl12xx_platform_data *wl12xx_board_data;
 
        hw = wl1251_alloc_hw();
        if (IS_ERR(hw))
@@ -239,8 +250,8 @@ static int wl1251_sdio_probe(struct sdio_func *func,
        wl_sdio->func = func;
        wl->if_priv = wl_sdio;
        wl->if_ops = &wl1251_sdio_ops;
-       wl->set_power = wl1251_sdio_set_power;
 
+       wl12xx_board_data = wl12xx_get_platform_data();
        if (wl12xx_board_data != NULL) {
                wl->set_power = wl12xx_board_data->set_power;
                wl->irq = wl12xx_board_data->irq;
@@ -273,6 +284,10 @@ static int wl1251_sdio_probe(struct sdio_func *func,
                goto out_free_irq;
 
        sdio_set_drvdata(func, wl);
+
+       /* Tell PM core that we don't need the card to be powered now */
+       pm_runtime_put_noidle(&func->dev);
+
        return ret;
 
 out_free_irq:
@@ -294,6 +309,9 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
        struct wl1251 *wl = sdio_get_drvdata(func);
        struct wl1251_sdio *wl_sdio = wl->if_priv;
 
+       /* Undo decrement done above in wl1251_probe */
+       pm_runtime_get_noresume(&func->dev);
+
        if (wl->irq)
                free_irq(wl->irq, wl);
        kfree(wl_sdio);
@@ -305,23 +323,37 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
        sdio_release_host(func);
 }
 
+static int wl1251_suspend(struct device *dev)
+{
+       /*
+        * Tell MMC/SDIO core it's OK to power down the card
+        * (if it isn't already), but not to remove it completely.
+        */
+       return 0;
+}
+
+static int wl1251_resume(struct device *dev)
+{
+       return 0;
+}
+
+static const struct dev_pm_ops wl1251_sdio_pm_ops = {
+       .suspend        = wl1251_suspend,
+       .resume         = wl1251_resume,
+};
+
 static struct sdio_driver wl1251_sdio_driver = {
        .name           = "wl1251_sdio",
        .id_table       = wl1251_devices,
        .probe          = wl1251_sdio_probe,
        .remove         = __devexit_p(wl1251_sdio_remove),
+       .drv.pm         = &wl1251_sdio_pm_ops,
 };
 
 static int __init wl1251_sdio_init(void)
 {
        int err;
 
-       err = platform_driver_register(&wl1251_platform_driver);
-       if (err) {
-               wl1251_error("failed to register platform driver: %d", err);
-               return err;
-       }
-
        err = sdio_register_driver(&wl1251_sdio_driver);
        if (err)
                wl1251_error("failed to register sdio driver: %d", err);
@@ -331,7 +363,6 @@ static int __init wl1251_sdio_init(void)
 static void __exit wl1251_sdio_exit(void)
 {
        sdio_unregister_driver(&wl1251_sdio_driver);
-       platform_driver_unregister(&wl1251_platform_driver);
        wl1251_notice("unloaded");
 }
 
index 88fa8e69d0d1d30b06c76a64cdc8d00f5592754a..ac872b38960f7a1d47089ba9f754b5c37aa7e2f5 100644 (file)
@@ -215,12 +215,21 @@ static void wl1251_spi_disable_irq(struct wl1251 *wl)
        return disable_irq(wl->irq);
 }
 
+static int wl1251_spi_set_power(struct wl1251 *wl, bool enable)
+{
+       if (wl->set_power)
+               wl->set_power(enable);
+
+       return 0;
+}
+
 static const struct wl1251_if_operations wl1251_spi_ops = {
        .read = wl1251_spi_read,
        .write = wl1251_spi_write,
        .reset = wl1251_spi_reset_wake,
        .enable_irq = wl1251_spi_enable_irq,
        .disable_irq = wl1251_spi_disable_irq,
+       .power = wl1251_spi_set_power,
 };
 
 static int __devinit wl1251_spi_probe(struct spi_device *spi)
index e113d4c1fb357924d20a342d4ec214b6fde33c85..13fbeeccf6091db47621abbefddea3e74718c356 100644 (file)
@@ -256,6 +256,7 @@ struct wl1251_if_operations {
        void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len);
        void (*read_elp)(struct wl1251 *wl, int addr, u32 *val);
        void (*write_elp)(struct wl1251 *wl, int addr, u32 val);
+       int  (*power)(struct wl1251 *wl, bool enable);
        void (*reset)(struct wl1251 *wl);
        void (*enable_irq)(struct wl1251 *wl);
        void (*disable_irq)(struct wl1251 *wl);
index b447559f1db52dbf2780afff4ca3454fbecf6dbd..02ad4bc15976904590cb70ff96c7c686345e0d59 100644 (file)
@@ -18,6 +18,16 @@ config WL1271
          If you choose to build a module, it'll be called wl1271. Say N if
          unsure.
 
+config WL1271_HT
+        bool "TI wl1271 802.11 HT support (EXPERIMENTAL)"
+        depends on WL1271 && EXPERIMENTAL
+        default n
+        ---help---
+          This will enable 802.11 HT support for TI wl1271 chipset.
+
+         That configuration is temporary due to the code incomplete and
+         still in testing process.
+
 config WL1271_SPI
        tristate "TI wl1271 SPI support"
        depends on WL1271 && SPI_MASTER
@@ -42,5 +52,5 @@ config WL1271_SDIO
 
 config WL12XX_PLATFORM_DATA
        bool
-       depends on WL1271_SDIO != n
+       depends on WL1271_SDIO != n || WL1251_SDIO != n
        default y
index 8a4cd763e5a24c49080a97a0100f994e1f4993d0..ab53162b434339fc93e75de833b3f0c651bd74dd 100644 (file)
@@ -351,6 +351,7 @@ struct wl1271 {
 #define WL1271_FLAG_IDLE_REQUESTED    (11)
 #define WL1271_FLAG_PSPOLL_FAILURE    (12)
 #define WL1271_FLAG_STA_STATE_SENT    (13)
+#define WL1271_FLAG_FW_TX_BUSY        (14)
        unsigned long flags;
 
        struct wl1271_partition_set part;
@@ -397,6 +398,7 @@ struct wl1271 {
        struct work_struct tx_work;
 
        /* Pending TX frames */
+       unsigned long tx_frames_map[BITS_TO_LONGS(ACX_TX_DESCRIPTORS)];
        struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
        int tx_frames_cnt;
 
@@ -432,7 +434,12 @@ struct wl1271 {
        /* Our association ID */
        u16 aid;
 
-       /* currently configured rate set */
+       /*
+        * currently configured rate set:
+        *      bits  0-15 - 802.11abg rates
+        *      bits 16-23 - 802.11n   MCS index mask
+        * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
+        */
        u32 sta_rate_set;
        u32 basic_rate_set;
        u32 basic_rate;
@@ -509,4 +516,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
 #define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
 #define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
 
+/* Macros to handle wl1271.sta_rate_set */
+#define HW_BG_RATES_MASK       0xffff
+#define HW_HT_RATES_OFFSET     16
+
 #endif
index 6189934052629d61b528ba771d30f94865e51f6f..bd7f95f4eef3c1a6c8a85902845a761862098685 100644 (file)
@@ -1226,6 +1226,89 @@ out:
        return ret;
 }
 
+int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
+                                   struct ieee80211_sta_ht_cap *ht_cap,
+                                   bool allow_ht_operation)
+{
+       struct wl1271_acx_ht_capabilities *acx;
+       u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+       int ret = 0;
+
+       wl1271_debug(DEBUG_ACX, "acx ht capabilities setting");
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* Allow HT Operation ? */
+       if (allow_ht_operation) {
+               acx->ht_capabilites =
+                       WL1271_ACX_FW_CAP_HT_OPERATION;
+               if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD)
+                       acx->ht_capabilites |=
+                               WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT;
+               if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+                       acx->ht_capabilites |=
+                               WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS;
+               if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT)
+                       acx->ht_capabilites |=
+                               WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION;
+
+               /* get data from A-MPDU parameters field */
+               acx->ampdu_max_length = ht_cap->ampdu_factor;
+               acx->ampdu_min_spacing = ht_cap->ampdu_density;
+
+               memcpy(acx->mac_address, mac_address, ETH_ALEN);
+       } else { /* HT operations are not allowed */
+               acx->ht_capabilites = 0;
+       }
+
+       ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("acx ht capabilities setting failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
+
+int wl1271_acx_set_ht_information(struct wl1271 *wl,
+                                  u16 ht_operation_mode)
+{
+       struct wl1271_acx_ht_information *acx;
+       int ret = 0;
+
+       wl1271_debug(DEBUG_ACX, "acx ht information setting");
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       acx->ht_protection =
+               (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
+       acx->rifs_mode = 0;
+       acx->gf_protection = 0;
+       acx->ht_tx_burst_limit = 0;
+       acx->dual_cts_protection = 0;
+
+       ret = wl1271_cmd_configure(wl, ACX_HT_BSS_OPERATION, acx, sizeof(*acx));
+
+       if (ret < 0) {
+               wl1271_warning("acx ht information setting failed: %d", ret);
+               goto out;
+       }
+
+out:
+       kfree(acx);
+       return ret;
+}
+
 int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime)
 {
        struct wl1271_acx_fw_tsf_information *tsf_info;
index ebb341d36e8c01a88f2fe423908c2a0994e6be1d..b7c490845f3e15c19a4e5562c2261308e0150268 100644 (file)
@@ -61,7 +61,8 @@
                                            WL1271_ACX_INTR_HW_AVAILABLE  | \
                                            WL1271_ACX_INTR_DATA)
 
-#define WL1271_INTR_MASK                   (WL1271_ACX_INTR_EVENT_A      | \
+#define WL1271_INTR_MASK                   (WL1271_ACX_INTR_WATCHDOG     | \
+                                           WL1271_ACX_INTR_EVENT_A      | \
                                            WL1271_ACX_INTR_EVENT_B      | \
                                            WL1271_ACX_INTR_HW_AVAILABLE | \
                                            WL1271_ACX_INTR_DATA)
@@ -964,6 +965,87 @@ struct wl1271_acx_rssi_snr_avg_weights {
        u8 snr_data;
 };
 
+/*
+ * ACX_PEER_HT_CAP
+ * Configure HT capabilities - declare the capabilities of the peer
+ * we are connected to.
+ */
+struct wl1271_acx_ht_capabilities {
+       struct acx_header header;
+
+       /*
+        * bit 0 - Allow HT Operation
+        * bit 1 - Allow Greenfield format in TX
+        * bit 2 - Allow Short GI in TX
+        * bit 3 - Allow L-SIG TXOP Protection in TX
+        * bit 4 - Allow HT Control fields in TX.
+        *         Note, driver will still leave space for HT control in packets
+        *         regardless of the value of this field. FW will be responsible
+        *         to drop the HT field from any frame when this Bit set to 0.
+        * bit 5 - Allow RD initiation in TXOP. FW is allowed to initate RD.
+        *         Exact policy setting for this feature is TBD.
+        *         Note, this bit can only be set to 1 if bit 3 is set to 1.
+        */
+       __le32 ht_capabilites;
+
+       /*
+        * Indicates to which peer these capabilities apply.
+        * For infrastructure use ff:ff:ff:ff:ff:ff that indicates relevance
+        * for all peers.
+        * Only valid for IBSS/DLS operation.
+        */
+       u8 mac_address[ETH_ALEN];
+
+       /*
+        * This the maximum A-MPDU length supported by the AP. The FW may not
+        * exceed this length when sending A-MPDUs
+        */
+       u8 ampdu_max_length;
+
+       /* This is the minimal spacing required when sending A-MPDUs to the AP*/
+       u8 ampdu_min_spacing;
+} __packed;
+
+/* HT Capabilites Fw Bit Mask Mapping */
+#define WL1271_ACX_FW_CAP_HT_OPERATION                 BIT(0)
+#define WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT      BIT(1)
+#define WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS   BIT(2)
+#define WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION         BIT(3)
+#define WL1271_ACX_FW_CAP_HT_CONTROL_FIELDS            BIT(4)
+#define WL1271_ACX_FW_CAP_RD_INITIATION                BIT(5)
+
+
+/*
+ * ACX_HT_BSS_OPERATION
+ * Configure HT capabilities - AP rules for behavior in the BSS.
+ */
+struct wl1271_acx_ht_information {
+       struct acx_header header;
+
+       /* Values: 0 - RIFS not allowed, 1 - RIFS allowed */
+       u8 rifs_mode;
+
+       /* Values: 0 - 3 like in spec */
+       u8 ht_protection;
+
+       /* Values: 0 - GF protection not required, 1 - GF protection required */
+       u8 gf_protection;
+
+       /*Values: 0 - TX Burst limit not required, 1 - TX Burst Limit required*/
+       u8 ht_tx_burst_limit;
+
+       /*
+        * Values: 0 - Dual CTS protection not required,
+        *         1 - Dual CTS Protection required
+        * Note: When this value is set to 1 FW will protect all TXOP with RTS
+        * frame and will not use CTS-to-self regardless of the value of the
+        * ACX_CTS_PROTECTION information element
+        */
+       u8 dual_cts_protection;
+
+       u8 padding[3];
+} __packed;
+
 struct wl1271_acx_fw_tsf_information {
        struct acx_header header;
 
@@ -1093,6 +1175,11 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
 int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
                                s16 thold, u8 hyst);
 int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
+int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
+                                   struct ieee80211_sta_ht_cap *ht_cap,
+                                   bool allow_ht_operation);
+int wl1271_acx_set_ht_information(struct wl1271 *wl,
+                                  u16 ht_operation_mode);
 int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
 
 #endif /* __WL1271_ACX_H__ */
index b910212420985cd3ecf28dfa18dd3584b03b1cb7..5b190728ca554bffd5178f51561f0002ce7c88d0 100644 (file)
@@ -471,20 +471,19 @@ int wl1271_boot(struct wl1271 *wl)
 {
        int ret = 0;
        u32 tmp, clk, pause;
-       int ref_clock = wl->ref_clock;
 
        wl1271_boot_hw_version(wl);
 
-       if (ref_clock == 0 || ref_clock == 2 || ref_clock == 4)
+       if (wl->ref_clock == 0 || wl->ref_clock == 2 || wl->ref_clock == 4)
                /* ref clk: 19.2/38.4/38.4-XTAL */
                clk = 0x3;
-       else if (ref_clock == 1 || ref_clock == 3)
+       else if (wl->ref_clock == 1 || wl->ref_clock == 3)
                /* ref clk: 26/52 */
                clk = 0x5;
        else
                return -EINVAL;
 
-       if (ref_clock != 0) {
+       if (wl->ref_clock != 0) {
                u16 val;
                /* Set clock type (open drain) */
                val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
@@ -529,8 +528,7 @@ int wl1271_boot(struct wl1271 *wl)
 
        wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
 
-       /* 2 */
-       clk |= (ref_clock << 1) << 4;
+       clk |= (wl->ref_clock << 1) << 4;
        wl1271_write32(wl, DRPW_SCRATCH_START, clk);
 
        wl1271_set_partition(wl, &part_table[PART_WORK]);
index 66c2b90ddfd461df7471aef34e7c3335c84a360b..3468b849852e05493979fc0c29fe62c04310924d 100644 (file)
 #define WL1271_DEBUGFS_STATS_LIFETIME 1000
 
 /* debugfs macros idea from mac80211 */
+#define DEBUGFS_FORMAT_BUFFER_SIZE 100
+static int wl1271_format_buffer(char __user *userbuf, size_t count,
+                                   loff_t *ppos, char *fmt, ...)
+{
+       va_list args;
+       char buf[DEBUGFS_FORMAT_BUFFER_SIZE];
+       int res;
 
-#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...)             \
+       va_start(args, fmt);
+       res = vscnprintf(buf, sizeof(buf), fmt, args);
+       va_end(args);
+
+       return simple_read_from_buffer(userbuf, count, ppos, buf, res);
+}
+
+#define DEBUGFS_READONLY_FILE(name, fmt, value...)                     \
 static ssize_t name## _read(struct file *file, char __user *userbuf,   \
                            size_t count, loff_t *ppos)                 \
 {                                                                      \
        struct wl1271 *wl = file->private_data;                         \
-       char buf[buflen];                                               \
-       int res;                                                        \
-                                                                       \
-       res = scnprintf(buf, buflen, fmt "\n", ##value);                \
-       return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
+       return wl1271_format_buffer(userbuf, count, ppos,               \
+                                   fmt "\n", ##value);                 \
 }                                                                      \
                                                                        \
 static const struct file_operations name## _ops = {                    \
@@ -69,20 +80,17 @@ static const struct file_operations name## _ops = {                 \
                wl->debugfs.name = NULL;                                \
        } while (0)
 
-#define DEBUGFS_FWSTATS_FILE(sub, name, buflen, fmt)                   \
+#define DEBUGFS_FWSTATS_FILE(sub, name, fmt)                           \
 static ssize_t sub## _ ##name## _read(struct file *file,               \
                                      char __user *userbuf,             \
                                      size_t count, loff_t *ppos)       \
 {                                                                      \
        struct wl1271 *wl = file->private_data;                         \
-       char buf[buflen];                                               \
-       int res;                                                        \
                                                                        \
        wl1271_debugfs_update_stats(wl);                                \
                                                                        \
-       res = scnprintf(buf, buflen, fmt "\n",                          \
-                       wl->stats.fw_stats->sub.name);                  \
-       return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
+       return wl1271_format_buffer(userbuf, count, ppos, fmt "\n",     \
+                                   wl->stats.fw_stats->sub.name);      \
 }                                                                      \
                                                                        \
 static const struct file_operations sub## _ ##name## _ops = {          \
@@ -126,100 +134,99 @@ static int wl1271_open_file_generic(struct inode *inode, struct file *file)
        return 0;
 }
 
-DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(rx, out_of_mem, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rx, hw_stuck, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rx, dropped, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rx, fcs_err, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rx, path_reset, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rx, reset_counter, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(dma, rx_requested, 20, "%u");
-DEBUGFS_FWSTATS_FILE(dma, rx_errors, 20, "%u");
-DEBUGFS_FWSTATS_FILE(dma, tx_requested, 20, "%u");
-DEBUGFS_FWSTATS_FILE(dma, tx_errors, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, fiqs, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_headers, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_rdys, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, irqs, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, tx_procs, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, decrypt_done, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, dma0_done, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, dma1_done, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, commands, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_procs, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, pci_pm, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, wakeups, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, low_rssi, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(wep, addr_key_count, 20, "%u");
-DEBUGFS_FWSTATS_FILE(wep, default_key_count, 20, "%u");
+DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, "%u");
+
+DEBUGFS_FWSTATS_FILE(rx, out_of_mem, "%u");
+DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, "%u");
+DEBUGFS_FWSTATS_FILE(rx, hw_stuck, "%u");
+DEBUGFS_FWSTATS_FILE(rx, dropped, "%u");
+DEBUGFS_FWSTATS_FILE(rx, fcs_err, "%u");
+DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, "%u");
+DEBUGFS_FWSTATS_FILE(rx, path_reset, "%u");
+DEBUGFS_FWSTATS_FILE(rx, reset_counter, "%u");
+
+DEBUGFS_FWSTATS_FILE(dma, rx_requested, "%u");
+DEBUGFS_FWSTATS_FILE(dma, rx_errors, "%u");
+DEBUGFS_FWSTATS_FILE(dma, tx_requested, "%u");
+DEBUGFS_FWSTATS_FILE(dma, tx_errors, "%u");
+
+DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, "%u");
+DEBUGFS_FWSTATS_FILE(isr, fiqs, "%u");
+DEBUGFS_FWSTATS_FILE(isr, rx_headers, "%u");
+DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, "%u");
+DEBUGFS_FWSTATS_FILE(isr, rx_rdys, "%u");
+DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
+DEBUGFS_FWSTATS_FILE(isr, tx_procs, "%u");
+DEBUGFS_FWSTATS_FILE(isr, decrypt_done, "%u");
+DEBUGFS_FWSTATS_FILE(isr, dma0_done, "%u");
+DEBUGFS_FWSTATS_FILE(isr, dma1_done, "%u");
+DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, "%u");
+DEBUGFS_FWSTATS_FILE(isr, commands, "%u");
+DEBUGFS_FWSTATS_FILE(isr, rx_procs, "%u");
+DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, "%u");
+DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, "%u");
+DEBUGFS_FWSTATS_FILE(isr, pci_pm, "%u");
+DEBUGFS_FWSTATS_FILE(isr, wakeups, "%u");
+DEBUGFS_FWSTATS_FILE(isr, low_rssi, "%u");
+
+DEBUGFS_FWSTATS_FILE(wep, addr_key_count, "%u");
+DEBUGFS_FWSTATS_FILE(wep, default_key_count, "%u");
 /* skipping wep.reserved */
-DEBUGFS_FWSTATS_FILE(wep, key_not_found, 20, "%u");
-DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, 20, "%u");
-DEBUGFS_FWSTATS_FILE(wep, packets, 20, "%u");
-DEBUGFS_FWSTATS_FILE(wep, interrupt, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(pwr, ps_enter, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, elp_enter, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, power_save_off, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, enable_ps, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, disable_ps, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, 20, "%u");
+DEBUGFS_FWSTATS_FILE(wep, key_not_found, "%u");
+DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, "%u");
+DEBUGFS_FWSTATS_FILE(wep, packets, "%u");
+DEBUGFS_FWSTATS_FILE(wep, interrupt, "%u");
+
+DEBUGFS_FWSTATS_FILE(pwr, ps_enter, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, elp_enter, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, power_save_off, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, enable_ps, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, disable_ps, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, "%u");
 /* skipping cont_miss_bcns_spread for now */
-DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(mic, rx_pkts, 20, "%u");
-DEBUGFS_FWSTATS_FILE(mic, calc_failure, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, 20, "%u");
-DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, 20, "%u");
-DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, 20, "%u");
-DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, 20, "%u");
-DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, 20, "%u");
-DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(event, heart_beat, 20, "%u");
-DEBUGFS_FWSTATS_FILE(event, calibration, 20, "%u");
-DEBUGFS_FWSTATS_FILE(event, rx_mismatch, 20, "%u");
-DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, 20, "%u");
-DEBUGFS_FWSTATS_FILE(event, rx_pool, 20, "%u");
-DEBUGFS_FWSTATS_FILE(event, oom_late, 20, "%u");
-DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, 20, "%u");
-DEBUGFS_FWSTATS_FILE(event, tx_stuck, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, 20, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, 20, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, 20, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, 20, "%u");
-DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, 20, "%u");
-DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, 20, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data,
-                    20, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, 20, "%u");
-
-DEBUGFS_READONLY_FILE(retry_count, 20, "%u", wl->stats.retry_count);
-DEBUGFS_READONLY_FILE(excessive_retries, 20, "%u",
+DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, "%u");
+
+DEBUGFS_FWSTATS_FILE(mic, rx_pkts, "%u");
+DEBUGFS_FWSTATS_FILE(mic, calc_failure, "%u");
+
+DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, "%u");
+DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, "%u");
+DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, "%u");
+DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, "%u");
+DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, "%u");
+DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, "%u");
+
+DEBUGFS_FWSTATS_FILE(event, heart_beat, "%u");
+DEBUGFS_FWSTATS_FILE(event, calibration, "%u");
+DEBUGFS_FWSTATS_FILE(event, rx_mismatch, "%u");
+DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, "%u");
+DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u");
+DEBUGFS_FWSTATS_FILE(event, oom_late, "%u");
+DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, "%u");
+DEBUGFS_FWSTATS_FILE(event, tx_stuck, "%u");
+
+DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, "%u");
+DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, "%u");
+DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, "%u");
+DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, "%u");
+DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, "%u");
+DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, "%u");
+DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, "%u");
+
+DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, "%u");
+DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, "%u");
+DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data, "%u");
+DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, "%u");
+DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
+
+DEBUGFS_READONLY_FILE(retry_count, "%u", wl->stats.retry_count);
+DEBUGFS_READONLY_FILE(excessive_retries, "%u",
                      wl->stats.excessive_retries);
 
 static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf,
index 7b3f503829631018630ca21e9faa67f840db10c8..38ccef7d73a5213e855b6e4def49b3cacf50a203 100644 (file)
@@ -134,8 +134,6 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
 
                /* go to extremely low power mode */
                wl1271_ps_elp_sleep(wl);
-               if (ret < 0)
-                       break;
                break;
        case EVENT_EXIT_POWER_SAVE_FAIL:
                wl1271_debug(DEBUG_PSM, "PSM exit failed");
index 48a4b9961ae6e1bea44a4985bca56e46bdf1ee32..f5b1d19bc88d7f1a13ff66563693fb792c533a6d 100644 (file)
@@ -481,9 +481,9 @@ static void wl1271_fw_status(struct wl1271 *wl,
                total += cnt;
        }
 
-       /* if more blocks are available now, schedule some tx work */
-       if (total && !skb_queue_empty(&wl->tx_queue))
-               ieee80211_queue_work(wl->hw, &wl->tx_work);
+       /* if more blocks are available now, tx work can be scheduled */
+       if (total)
+               clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
 
        /* update the host-chipset time offset */
        getnstimeofday(&ts);
@@ -529,6 +529,15 @@ static void wl1271_irq_work(struct work_struct *work)
 
                intr &= WL1271_INTR_MASK;
 
+               if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
+                       wl1271_error("watchdog interrupt received! "
+                                    "starting recovery.");
+                       ieee80211_queue_work(wl->hw, &wl->recovery_work);
+
+                       /* restarting the chip. ignore any other interrupt. */
+                       goto out;
+               }
+
                if (intr & WL1271_ACX_INTR_DATA) {
                        wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
 
@@ -537,6 +546,16 @@ static void wl1271_irq_work(struct work_struct *work)
                            (wl->tx_results_count & 0xff))
                                wl1271_tx_complete(wl);
 
+                       /* Check if any tx blocks were freed */
+                       if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
+                                       !skb_queue_empty(&wl->tx_queue)) {
+                               /*
+                                * In order to avoid starvation of the TX path,
+                                * call the work function directly.
+                                */
+                               wl1271_tx_work_locked(wl);
+                       }
+
                        wl1271_rx(wl, wl->fw_status);
                }
 
@@ -851,12 +870,32 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
        struct ieee80211_sta *sta = txinfo->control.sta;
        unsigned long flags;
 
-       /* peek into the rates configured in the STA entry */
+       /*
+        * peek into the rates configured in the STA entry.
+        * The rates set after connection stage, The first block only BG sets:
+        * the compare is for bit 0-16 of sta_rate_set. The second block add
+        * HT rates in case of HT supported.
+        */
        spin_lock_irqsave(&wl->wl_lock, flags);
-       if (sta && sta->supp_rates[conf->channel->band] != wl->sta_rate_set) {
+       if (sta &&
+           (sta->supp_rates[conf->channel->band] !=
+           (wl->sta_rate_set & HW_BG_RATES_MASK))) {
                wl->sta_rate_set = sta->supp_rates[conf->channel->band];
                set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
        }
+
+#ifdef CONFIG_WL1271_HT
+       if (sta &&
+           sta->ht_cap.ht_supported &&
+           ((wl->sta_rate_set >> HW_HT_RATES_OFFSET) !=
+             sta->ht_cap.mcs.rx_mask[0])) {
+               /* Clean MCS bits before setting them */
+               wl->sta_rate_set &= HW_BG_RATES_MASK;
+               wl->sta_rate_set |=
+                       (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
+               set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
+       }
+#endif
        spin_unlock_irqrestore(&wl->wl_lock, flags);
 
        /* queue the packet */
@@ -867,7 +906,8 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
         * before that, the tx_work will not be initialized!
         */
 
-       ieee80211_queue_work(wl->hw, &wl->tx_work);
+       if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
+               ieee80211_queue_work(wl->hw, &wl->tx_work);
 
        /*
         * The workqueue is slow to process the tx_queue and we need stop
@@ -919,18 +959,19 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
        struct wiphy *wiphy = hw->wiphy;
        int retries = WL1271_BOOT_RETRIES;
        int ret = 0;
+       bool booted = false;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
                     vif->type, vif->addr);
 
        mutex_lock(&wl->mutex);
        if (wl->vif) {
+               wl1271_debug(DEBUG_MAC80211,
+                            "multiple vifs are not supported yet");
                ret = -EBUSY;
                goto out;
        }
 
-       wl->vif = vif;
-
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
                wl->bss_type = BSS_TYPE_STA_BSS;
@@ -968,15 +1009,8 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
                if (ret < 0)
                        goto irq_disable;
 
-               wl->state = WL1271_STATE_ON;
-               wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
-
-               /* update hw/fw version info in wiphy struct */
-               wiphy->hw_version = wl->chip.id;
-               strncpy(wiphy->fw_version, wl->chip.fw_ver,
-                       sizeof(wiphy->fw_version));
-
-               goto out;
+               booted = true;
+               break;
 
 irq_disable:
                wl1271_disable_interrupts(wl);
@@ -994,8 +1028,21 @@ power_off:
                wl1271_power_off(wl);
        }
 
-       wl1271_error("firmware boot failed despite %d retries",
-                    WL1271_BOOT_RETRIES);
+       if (!booted) {
+               wl1271_error("firmware boot failed despite %d retries",
+                            WL1271_BOOT_RETRIES);
+               goto out;
+       }
+
+       wl->vif = vif;
+       wl->state = WL1271_STATE_ON;
+       wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
+
+       /* update hw/fw version info in wiphy struct */
+       wiphy->hw_version = wl->chip.id;
+       strncpy(wiphy->fw_version, wl->chip.fw_ver,
+               sizeof(wiphy->fw_version));
+
 out:
        mutex_unlock(&wl->mutex);
 
@@ -1025,6 +1072,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
                wl->scan.state = WL1271_SCAN_STATE_IDLE;
                kfree(wl->scan.scanned_ch);
                wl->scan.scanned_ch = NULL;
+               wl->scan.req = NULL;
                ieee80211_scan_completed(wl->hw, true);
        }
 
@@ -1312,8 +1360,10 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+               ret = -EAGAIN;
                goto out;
+       }
 
        ret = wl1271_ps_elp_wakeup(wl, false);
        if (ret < 0)
@@ -1536,6 +1586,11 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        mutex_lock(&wl->mutex);
 
+       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+               ret = -EAGAIN;
+               goto out_unlock;
+       }
+
        ret = wl1271_ps_elp_wakeup(wl, false);
        if (ret < 0)
                goto out_unlock;
@@ -1645,6 +1700,16 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
+       if (wl->state == WL1271_STATE_OFF) {
+               /*
+                * We cannot return -EBUSY here because cfg80211 will expect
+                * a call to ieee80211_scan_completed if we do - in this case
+                * there won't be any call.
+                */
+               ret = -EAGAIN;
+               goto out;
+       }
+
        ret = wl1271_ps_elp_wakeup(wl, false);
        if (ret < 0)
                goto out;
@@ -1666,8 +1731,10 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 
        mutex_lock(&wl->mutex);
 
-       if (unlikely(wl->state == WL1271_STATE_OFF))
+       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+               ret = -EAGAIN;
                goto out;
+       }
 
        ret = wl1271_ps_elp_wakeup(wl, false);
        if (ret < 0)
@@ -1709,6 +1776,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
 {
        enum wl1271_cmd_ps_mode mode;
        struct wl1271 *wl = hw->priv;
+       struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
        bool do_join = false;
        bool set_assoc = false;
        int ret;
@@ -1717,6 +1785,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
 
        mutex_lock(&wl->mutex);
 
+       if (unlikely(wl->state == WL1271_STATE_OFF))
+               goto out;
+
        ret = wl1271_ps_elp_wakeup(wl, false);
        if (ret < 0)
                goto out;
@@ -1927,6 +1998,37 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                }
        }
 
+       /*
+        * Takes care of: New association with HT enable,
+        *                HT information change in beacon.
+        */
+       if (sta &&
+           (changed & BSS_CHANGED_HT) &&
+           (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
+               ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true);
+               if (ret < 0) {
+                       wl1271_warning("Set ht cap true failed %d", ret);
+                       goto out_sleep;
+               }
+                       ret = wl1271_acx_set_ht_information(wl,
+                               bss_conf->ht_operation_mode);
+               if (ret < 0) {
+                       wl1271_warning("Set ht information failed %d", ret);
+                       goto out_sleep;
+               }
+       }
+       /*
+        * Takes care of: New association without HT,
+        *                Disassociation.
+        */
+       else if (sta && (changed & BSS_CHANGED_ASSOC)) {
+               ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, false);
+               if (ret < 0) {
+                       wl1271_warning("Set ht cap false failed %d", ret);
+                       goto out_sleep;
+               }
+       }
+
        if (changed & BSS_CHANGED_ARP_FILTER) {
                __be32 addr = bss_conf->arp_addr_list[0];
                WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
@@ -1966,6 +2068,11 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
 
+       if (unlikely(wl->state == WL1271_STATE_OFF)) {
+               ret = -EAGAIN;
+               goto out;
+       }
+
        ret = wl1271_ps_elp_wakeup(wl, false);
        if (ret < 0)
                goto out;
@@ -2009,6 +2116,9 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
 
        mutex_lock(&wl->mutex);
 
+       if (unlikely(wl->state == WL1271_STATE_OFF))
+               goto out;
+
        ret = wl1271_ps_elp_wakeup(wl, false);
        if (ret < 0)
                goto out;
@@ -2030,14 +2140,14 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
 {
        struct wl1271 *wl = hw->priv;
        struct ieee80211_conf *conf = &hw->conf;
+
        if (idx != 0)
                return -ENOENT;
+
        survey->channel = conf->channel;
        survey->filled = SURVEY_INFO_NOISE_DBM;
        survey->noise = wl->noise;
+
        return 0;
 }
 
@@ -2107,14 +2217,14 @@ static struct ieee80211_channel wl1271_channels[] = {
 /* mapping to indexes for wl1271_rates */
 static const u8 wl1271_rate_to_idx_2ghz[] = {
        /* MCS rates are used only with 11n */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */
+       7,                            /* CONF_HW_RXTX_RATE_MCS7 */
+       6,                            /* CONF_HW_RXTX_RATE_MCS6 */
+       5,                            /* CONF_HW_RXTX_RATE_MCS5 */
+       4,                            /* CONF_HW_RXTX_RATE_MCS4 */
+       3,                            /* CONF_HW_RXTX_RATE_MCS3 */
+       2,                            /* CONF_HW_RXTX_RATE_MCS2 */
+       1,                            /* CONF_HW_RXTX_RATE_MCS1 */
+       0,                            /* CONF_HW_RXTX_RATE_MCS0 */
 
        11,                            /* CONF_HW_RXTX_RATE_54   */
        10,                            /* CONF_HW_RXTX_RATE_48   */
@@ -2134,12 +2244,34 @@ static const u8 wl1271_rate_to_idx_2ghz[] = {
        0                              /* CONF_HW_RXTX_RATE_1    */
 };
 
+/* 11n STA capabilities */
+#define HW_RX_HIGHEST_RATE     72
+
+#ifdef CONFIG_WL1271_HT
+#define WL1271_HT_CAP { \
+       .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20, \
+       .ht_supported = true, \
+       .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, \
+       .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
+       .mcs = { \
+               .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, \
+               .rx_highest = cpu_to_le16(HW_RX_HIGHEST_RATE), \
+               .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
+               }, \
+}
+#else
+#define WL1271_HT_CAP { \
+       .ht_supported = false, \
+}
+#endif
+
 /* can't be const, mac80211 writes to this */
 static struct ieee80211_supported_band wl1271_band_2ghz = {
        .channels = wl1271_channels,
        .n_channels = ARRAY_SIZE(wl1271_channels),
        .bitrates = wl1271_rates,
        .n_bitrates = ARRAY_SIZE(wl1271_rates),
+       .ht_cap = WL1271_HT_CAP,
 };
 
 /* 5 GHz data rates for WL1273 */
@@ -2222,14 +2354,14 @@ static struct ieee80211_channel wl1271_channels_5ghz[] = {
 /* mapping to indexes for wl1271_rates_5ghz */
 static const u8 wl1271_rate_to_idx_5ghz[] = {
        /* MCS rates are used only with 11n */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */
-       CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */
+       7,                            /* CONF_HW_RXTX_RATE_MCS7 */
+       6,                            /* CONF_HW_RXTX_RATE_MCS6 */
+       5,                            /* CONF_HW_RXTX_RATE_MCS5 */
+       4,                            /* CONF_HW_RXTX_RATE_MCS4 */
+       3,                            /* CONF_HW_RXTX_RATE_MCS3 */
+       2,                            /* CONF_HW_RXTX_RATE_MCS2 */
+       1,                            /* CONF_HW_RXTX_RATE_MCS1 */
+       0,                            /* CONF_HW_RXTX_RATE_MCS0 */
 
        7,                             /* CONF_HW_RXTX_RATE_54   */
        6,                             /* CONF_HW_RXTX_RATE_48   */
@@ -2254,6 +2386,7 @@ static struct ieee80211_supported_band wl1271_band_5ghz = {
        .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
        .bitrates = wl1271_rates_5ghz,
        .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
+       .ht_cap = WL1271_HT_CAP,
 };
 
 static const u8 *wl1271_band_rate_to_idx[] = {
@@ -2281,18 +2414,18 @@ static const struct ieee80211_ops wl1271_ops = {
 };
 
 
-u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate)
+u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band)
 {
        u8 idx;
 
-       BUG_ON(wl->band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *));
+       BUG_ON(band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *));
 
        if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) {
                wl1271_error("Illegal RX rate from HW: %d", rate);
                return 0;
        }
 
-       idx = wl1271_band_rate_to_idx[wl->band][rate];
+       idx = wl1271_band_rate_to_idx[band][rate];
        if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
                wl1271_error("Unsupported RX rate from HW: %d", rate);
                return 0;
@@ -2521,6 +2654,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
        wl->sg_enabled = true;
        wl->hw_pg_ver = -1;
 
+       memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
        for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
                wl->tx_frames[i] = NULL;
 
index bea133b6e4893e2d61cbeff1e4d1f31d1faa42c4..cacfee56a0d069c24c2f3527b95ab249666e8436 100644 (file)
@@ -48,10 +48,24 @@ static void wl1271_rx_status(struct wl1271 *wl,
                             struct ieee80211_rx_status *status,
                             u8 beacon)
 {
+       enum ieee80211_band desc_band;
+
        memset(status, 0, sizeof(struct ieee80211_rx_status));
 
        status->band = wl->band;
-       status->rate_idx = wl1271_rate_to_idx(wl, desc->rate);
+
+       if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
+               desc_band = IEEE80211_BAND_2GHZ;
+       else
+               desc_band = IEEE80211_BAND_5GHZ;
+
+       status->rate_idx = wl1271_rate_to_idx(desc->rate, desc_band);
+
+#ifdef CONFIG_WL1271_HT
+       /* 11n support */
+       if (desc->rate <= CONF_HW_RXTX_RATE_MCS0)
+               status->flag |= RX_FLAG_HT;
+#endif
 
        status->signal = desc->rssi;
 
@@ -170,10 +184,14 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
                while (pkt_offset < buf_size) {
                        pkt_length = wl1271_rx_get_buf_size(status,
                                        drv_rx_counter);
-                       if (wl1271_rx_handle_data(wl,
-                                       wl->aggr_buf + pkt_offset,
-                                       pkt_length) < 0)
-                               break;
+                       /*
+                        * the handle data call can only fail in memory-outage
+                        * conditions, in that case the received frame will just
+                        * be dropped.
+                        */
+                       wl1271_rx_handle_data(wl,
+                                             wl->aggr_buf + pkt_offset,
+                                             pkt_length);
                        wl->rx_counter++;
                        drv_rx_counter++;
                        drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
index 13a232333b13fd5cb6e5fbbd260b7e5d9195d6c4..6d41981ce53fcb7f16d16e9543d847bb3a76920f 100644 (file)
@@ -116,6 +116,6 @@ struct wl1271_rx_descriptor {
 } __packed;
 
 void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
-u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
+u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
 
 #endif
index 909bb47995b6907ee8afad05c3acf37500043d4a..e0661a543a3591f32ace38d1ab89a7b642d218c8 100644 (file)
@@ -48,14 +48,15 @@ void wl1271_scan_complete_work(struct work_struct *work)
        wl->scan.state = WL1271_SCAN_STATE_IDLE;
        kfree(wl->scan.scanned_ch);
        wl->scan.scanned_ch = NULL;
-       mutex_unlock(&wl->mutex);
-
+       wl->scan.req = NULL;
        ieee80211_scan_completed(wl->hw, false);
 
        if (wl->scan.failed) {
                wl1271_info("Scan completed due to error.");
                ieee80211_queue_work(wl->hw, &wl->recovery_work);
        }
+       mutex_unlock(&wl->mutex);
+
 }
 
 
index a3aa84386c88b3aef93cbfe4210e796797738974..55ec4428922b2f154afc458ce503259d96161dd0 100644 (file)
@@ -37,6 +37,7 @@ enum wl1271_tm_commands {
        WL1271_TM_CMD_CONFIGURE,
        WL1271_TM_CMD_NVS_PUSH,
        WL1271_TM_CMD_SET_PLT_MODE,
+       WL1271_TM_CMD_RECOVER,
 
        __WL1271_TM_CMD_AFTER_LAST
 };
@@ -248,6 +249,15 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
        return ret;
 }
 
+static int wl1271_tm_cmd_recover(struct wl1271 *wl, struct nlattr *tb[])
+{
+       wl1271_debug(DEBUG_TESTMODE, "testmode cmd recover");
+
+       ieee80211_queue_work(wl->hw, &wl->recovery_work);
+
+       return 0;
+}
+
 int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
 {
        struct wl1271 *wl = hw->priv;
@@ -272,6 +282,8 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
                return wl1271_tm_cmd_nvs_push(wl, tb);
        case WL1271_TM_CMD_SET_PLT_MODE:
                return wl1271_tm_cmd_set_plt_mode(wl, tb);
+       case WL1271_TM_CMD_RECOVER:
+               return wl1271_tm_cmd_recover(wl, tb);
        default:
                return -EOPNOTSUPP;
        }
index e3dc13c4d01ad0b572267526efae5f666b5bf757..279be5b98d9fe2f00e6439f4961b80b7e9823516 100644 (file)
 #include "wl1271_ps.h"
 #include "wl1271_tx.h"
 
-static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb)
+static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
 {
-       int i;
-       for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
-               if (wl->tx_frames[i] == NULL) {
-                       wl->tx_frames[i] = skb;
-                       wl->tx_frames_cnt++;
-                       return i;
-               }
+       int id;
+
+       id = find_first_zero_bit(wl->tx_frames_map, ACX_TX_DESCRIPTORS);
+       if (id >= ACX_TX_DESCRIPTORS)
+               return -EBUSY;
+
+       __set_bit(id, wl->tx_frames_map);
+       wl->tx_frames[id] = skb;
+       wl->tx_frames_cnt++;
+       return id;
+}
 
-       return -EBUSY;
+static void wl1271_free_tx_id(struct wl1271 *wl, int id)
+{
+       if (__test_and_clear_bit(id, wl->tx_frames_map)) {
+               wl->tx_frames[id] = NULL;
+               wl->tx_frames_cnt--;
+       }
 }
 
 static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
@@ -52,10 +61,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
        int id, ret = -EBUSY;
 
        if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
-               return -EBUSY;
+               return -EAGAIN;
 
        /* allocate free identifier for the packet */
-       id = wl1271_tx_id(wl, skb);
+       id = wl1271_alloc_tx_id(wl, skb);
        if (id < 0)
                return id;
 
@@ -79,8 +88,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
                             "tx_allocate: size: %d, blocks: %d, id: %d",
                             total_len, total_blocks, id);
        } else {
-               wl->tx_frames[id] = NULL;
-               wl->tx_frames_cnt--;
+               wl1271_free_tx_id(wl, id);
        }
 
        return ret;
@@ -201,41 +209,67 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
                rate_set >>= 1;
        }
 
+#ifdef CONFIG_WL1271_HT
+       /* MCS rates indication are on bits 16 - 23 */
+       rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
+
+       for (bit = 0; bit < 8; bit++) {
+               if (rate_set & 0x1)
+                       enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
+               rate_set >>= 1;
+       }
+#endif
+
        return enabled_rates;
 }
 
-void wl1271_tx_work(struct work_struct *work)
+static void handle_tx_low_watermark(struct wl1271 *wl)
+{
+       unsigned long flags;
+
+       if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
+           skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) {
+               /* firmware buffer has space, restart queues */
+               spin_lock_irqsave(&wl->wl_lock, flags);
+               ieee80211_wake_queues(wl->hw);
+               clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
+               spin_unlock_irqrestore(&wl->wl_lock, flags);
+       }
+}
+
+void wl1271_tx_work_locked(struct wl1271 *wl)
 {
-       struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
        struct sk_buff *skb;
        bool woken_up = false;
        u32 sta_rates = 0;
-       u32 buf_offset;
+       u32 buf_offset = 0;
+       bool sent_packets = false;
        int ret;
 
        /* check if the rates supported by the AP have changed */
        if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
                                        &wl->flags))) {
                unsigned long flags;
+
                spin_lock_irqsave(&wl->wl_lock, flags);
                sta_rates = wl->sta_rate_set;
                spin_unlock_irqrestore(&wl->wl_lock, flags);
        }
 
-       mutex_lock(&wl->mutex);
-
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
        /* if rates have changed, re-configure the rate policy */
        if (unlikely(sta_rates)) {
+               ret = wl1271_ps_elp_wakeup(wl, false);
+               if (ret < 0)
+                       goto out;
+               woken_up = true;
+
                wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
                wl1271_acx_rate_policies(wl);
        }
 
-       /* Prepare the transfer buffer, by aggregating all
-        * available packets */
-       buf_offset = 0;
        while ((skb = skb_dequeue(&wl->tx_queue))) {
                if (!woken_up) {
                        ret = wl1271_ps_elp_wakeup(wl, false);
@@ -245,13 +279,25 @@ void wl1271_tx_work(struct work_struct *work)
                }
 
                ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
-               if (ret == -EBUSY) {
+               if (ret == -EAGAIN) {
+                       /*
+                        * Aggregation buffer is full.
+                        * Flush buffer and try again.
+                        */
+                       skb_queue_head(&wl->tx_queue, skb);
+                       wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
+                               buf_offset, true);
+                       sent_packets = true;
+                       buf_offset = 0;
+                       continue;
+               } else if (ret == -EBUSY) {
                        /*
-                        * Either the firmware buffer is full, or the
-                        * aggregation buffer is.
+                        * Firmware buffer is full.
                         * Queue back last skb, and stop aggregating.
                         */
                        skb_queue_head(&wl->tx_queue, skb);
+                       /* No work left, avoid scheduling redundant tx work */
+                       set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
                        goto out_ack;
                } else if (ret < 0) {
                        dev_kfree_skb(skb);
@@ -265,14 +311,25 @@ out_ack:
        if (buf_offset) {
                wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
                                buf_offset, true);
+               sent_packets = true;
+       }
+       if (sent_packets) {
                /* interrupt the firmware with the new packets */
                wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
+               handle_tx_low_watermark(wl);
        }
 
 out:
        if (woken_up)
                wl1271_ps_elp_sleep(wl);
+}
+
+void wl1271_tx_work(struct work_struct *work)
+{
+       struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
 
+       mutex_lock(&wl->mutex);
+       wl1271_tx_work_locked(wl);
        mutex_unlock(&wl->mutex);
 }
 
@@ -298,7 +355,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
        if (result->status == TX_SUCCESS) {
                if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
                        info->flags |= IEEE80211_TX_STAT_ACK;
-               rate = wl1271_rate_to_idx(wl, result->rate_class_index);
+               rate = wl1271_rate_to_idx(result->rate_class_index, wl->band);
                retries = result->ack_failures;
        } else if (result->status == TX_RETRY_EXCEEDED) {
                wl->stats.excessive_retries++;
@@ -335,8 +392,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
 
        /* return the packet to the stack */
        ieee80211_tx_status(wl->hw, skb);
-       wl->tx_frames[result->id] = NULL;
-       wl->tx_frames_cnt--;
+       wl1271_free_tx_id(wl, result->id);
 }
 
 /* Called upon reception of a TX complete interrupt */
@@ -375,19 +431,6 @@ void wl1271_tx_complete(struct wl1271 *wl)
 
                wl->tx_results_count++;
        }
-
-       if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
-           skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) {
-               unsigned long flags;
-
-               /* firmware buffer has space, restart queues */
-               wl1271_debug(DEBUG_TX, "tx_complete: waking queues");
-               spin_lock_irqsave(&wl->wl_lock, flags);
-               ieee80211_wake_queues(wl->hw);
-               clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
-               spin_unlock_irqrestore(&wl->wl_lock, flags);
-               ieee80211_queue_work(wl->hw, &wl->tx_work);
-       }
 }
 
 /* caller must hold wl->mutex */
@@ -402,14 +445,19 @@ void wl1271_tx_reset(struct wl1271 *wl)
                ieee80211_tx_status(wl->hw, skb);
        }
 
+       /*
+        * Make sure the driver is at a consistent state, in case this
+        * function is called from a context other than interface removal.
+        */
+       handle_tx_low_watermark(wl);
+
        for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
                if (wl->tx_frames[i] != NULL) {
                        skb = wl->tx_frames[i];
-                       wl->tx_frames[i] = NULL;
+                       wl1271_free_tx_id(wl, i);
                        wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
                        ieee80211_tx_status(wl->hw, skb);
                }
-       wl->tx_frames_cnt = 0;
 }
 
 #define WL1271_TX_FLUSH_TIMEOUT 500000
index d12a129ad11cc79716e46ef771112c7429faa3c6..9dc6f228c0de9b7696a1eb6883a0f593cec972be 100644 (file)
@@ -140,10 +140,11 @@ static inline int wl1271_tx_get_queue(int queue)
 }
 
 void wl1271_tx_work(struct work_struct *work);
+void wl1271_tx_work_locked(struct wl1271 *wl);
 void wl1271_tx_complete(struct wl1271 *wl);
 void wl1271_tx_reset(struct wl1271 *wl);
 void wl1271_tx_flush(struct wl1271 *wl);
-u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
+u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
 
 #endif
index 390d77f762c49fc9c2d9e4889f282d3efba40212..b97aa9c78a96d925e4de5f25504c008f60f3dfad 100644 (file)
@@ -30,6 +30,7 @@ static struct usb_device_id zd1201_table[] = {
        {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */
        {USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb  adapter */
        {USB_DEVICE(0x0db0, 0x6823)}, /* MSI UB11B usb  adapter */
+       {USB_DEVICE(0x1044, 0x8004)}, /* Gigabyte GN-WLBZ101 */
        {USB_DEVICE(0x1044, 0x8005)}, /* GIGABYTE GN-WLBZ201 usb adapter */
        {}
 };
index 818e1480ca93f1a5e34d13b0e04163b135e68b29..06041cb1c4220e548a08d4229a5a64ca645a0bbb 100644 (file)
@@ -55,6 +55,7 @@ static struct usb_device_id usb_ids[] = {
        { USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 },
        { USB_DEVICE(0x13b1, 0x001e), .driver_info = DEVICE_ZD1211 },
        { USB_DEVICE(0x1435, 0x0711), .driver_info = DEVICE_ZD1211 },
+       { USB_DEVICE(0x14ea, 0xab10), .driver_info = DEVICE_ZD1211 },
        { USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 },
        { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
        { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
@@ -92,6 +93,7 @@ static struct usb_device_id usb_ids[] = {
        { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
        { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
        { USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B },
+       { USB_DEVICE(0x2019, 0xed01), .driver_info = DEVICE_ZD1211B },
        /* "Driverless" devices that need ejecting */
        { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
        { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
index 14f0955eca68b37ed8f776fd83390339cacf1351..2de52d18152f1fb6b71ff631b576e5252059c4ed 100644 (file)
@@ -515,7 +515,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
  */
 static int xemaclite_set_mac_address(struct net_device *dev, void *address)
 {
-       struct net_local *lp = (struct net_local *) netdev_priv(dev);
+       struct net_local *lp = netdev_priv(dev);
        struct sockaddr *addr = address;
 
        if (netif_running(dev))
@@ -534,7 +534,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
  */
 static void xemaclite_tx_timeout(struct net_device *dev)
 {
-       struct net_local *lp = (struct net_local *) netdev_priv(dev);
+       struct net_local *lp = netdev_priv(dev);
        unsigned long flags;
 
        dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n",
@@ -578,7 +578,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
  */
 static void xemaclite_tx_handler(struct net_device *dev)
 {
-       struct net_local *lp = (struct net_local *) netdev_priv(dev);
+       struct net_local *lp = netdev_priv(dev);
 
        dev->stats.tx_packets++;
        if (lp->deferred_skb) {
@@ -605,7 +605,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
  */
 static void xemaclite_rx_handler(struct net_device *dev)
 {
-       struct net_local *lp = (struct net_local *) netdev_priv(dev);
+       struct net_local *lp = netdev_priv(dev);
        struct sk_buff *skb;
        unsigned int align;
        u32 len;
@@ -661,7 +661,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
 {
        bool tx_complete = 0;
        struct net_device *dev = dev_id;
-       struct net_local *lp = (struct net_local *) netdev_priv(dev);
+       struct net_local *lp = netdev_priv(dev);
        void __iomem *base_addr = lp->base_addr;
        u32 tx_status;
 
@@ -918,7 +918,7 @@ void xemaclite_adjust_link(struct net_device *ndev)
  */
 static int xemaclite_open(struct net_device *dev)
 {
-       struct net_local *lp = (struct net_local *) netdev_priv(dev);
+       struct net_local *lp = netdev_priv(dev);
        int retval;
 
        /* Just to be safe, stop the device first */
@@ -987,7 +987,7 @@ static int xemaclite_open(struct net_device *dev)
  */
 static int xemaclite_close(struct net_device *dev)
 {
-       struct net_local *lp = (struct net_local *) netdev_priv(dev);
+       struct net_local *lp = netdev_priv(dev);
 
        netif_stop_queue(dev);
        xemaclite_disable_interrupts(lp);
@@ -1031,7 +1031,7 @@ static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
  */
 static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
 {
-       struct net_local *lp = (struct net_local *) netdev_priv(dev);
+       struct net_local *lp = netdev_priv(dev);
        struct sk_buff *new_skb;
        unsigned int len;
        unsigned long flags;
@@ -1068,7 +1068,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
 static void xemaclite_remove_ndev(struct net_device *ndev)
 {
        if (ndev) {
-               struct net_local *lp = (struct net_local *) netdev_priv(ndev);
+               struct net_local *lp = netdev_priv(ndev);
 
                if (lp->base_addr)
                        iounmap((void __iomem __force *) (lp->base_addr));
@@ -1245,7 +1245,7 @@ static int __devexit xemaclite_of_remove(struct platform_device *of_dev)
        struct device *dev = &of_dev->dev;
        struct net_device *ndev = dev_get_drvdata(dev);
 
-       struct net_local *lp = (struct net_local *) netdev_priv(ndev);
+       struct net_local *lp = netdev_priv(ndev);
 
        /* Un-register the mii_bus, if configured */
        if (lp->has_mdio) {
index c3a3292045114bf86a285622ad7b75efeef89fa3..ae07b3dfbcc160e9991c1dd8c7d695a581f67f3a 100644 (file)
@@ -124,7 +124,7 @@ MODULE_LICENSE("GPL");
 #define TX_BUF_SIZE 8192
 #define DMA_BUF_SIZE (RX_BUF_SIZE + 16)        /* 8k + 16 bytes for trailers */
 
-#define TX_TIMEOUT     10
+#define TX_TIMEOUT     (HZ/10)
 
 struct znet_private {
        int rx_dma, tx_dma;
index 6536a041d90dd671966bf478a474aefe2ec926ef..f6c8c81a00252d50de120e949fd4084cd4e1389e 100644 (file)
@@ -59,6 +59,7 @@ static int ssb_pcihost_probe(struct pci_dev *dev,
        struct ssb_bus *ssb;
        int err = -ENOMEM;
        const char *name;
+       u32 val;
 
        ssb = kzalloc(sizeof(*ssb), GFP_KERNEL);
        if (!ssb)
@@ -74,6 +75,12 @@ static int ssb_pcihost_probe(struct pci_dev *dev,
                goto err_pci_disable;
        pci_set_master(dev);
 
+       /* Disable the RETRY_TIMEOUT register (0x41) to keep
+        * PCI Tx retries from interfering with C3 CPU state */
+       pci_read_config_dword(dev, 0x40, &val);
+       if ((val & 0x0000ff00) != 0)
+               pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
+
        err = ssb_bus_pcibus_register(ssb, dev);
        if (err)
                goto err_pci_release_regions;
index 827cc95711ef82563e0f7a1718f364ce8adfd451..2184c6b97aebb699206e2eb7426bd716b8f0e54f 100644 (file)
@@ -109,6 +109,17 @@ static inline __u8 ror8(__u8 word, unsigned int shift)
        return (word >> shift) | (word << (8 - shift));
 }
 
+/**
+ * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
+ * @value: value to sign extend
+ * @index: 0 based bit index (0<=index<32) to sign bit
+ */
+static inline __s32 sign_extend32(__u32 value, int index)
+{
+       __u8 shift = 31 - index;
+       return (__s32)(value << shift) >> shift;
+}
+
 static inline unsigned fls_long(unsigned long l)
 {
        if (sizeof(l) == 4)
index 749f01ccd26ef4ea23e06241b6f0c51f84efbd65..eed52bcd35d0d7e2e6e0e36e12ac773ada2dbb6d 100644 (file)
@@ -462,6 +462,7 @@ struct dccp_ackvec;
  * @dccps_hc_rx_insert_options - receiver wants to add options when acking
  * @dccps_hc_tx_insert_options - sender wants to add options when sending
  * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3)
+ * @dccps_sync_scheduled - flag which signals "send out-of-band message soon"
  * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets
  * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing)
  * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs)
@@ -503,6 +504,7 @@ struct dccp_sock {
        __u8                            dccps_hc_rx_insert_options:1;
        __u8                            dccps_hc_tx_insert_options:1;
        __u8                            dccps_server_timewait:1;
+       __u8                            dccps_sync_scheduled:1;
        struct tasklet_struct           dccps_xmitlet;
        struct timer_list               dccps_xmit_timer;
 };
index 69b43dbea6c6ac444dfbf8c0daf1650942a9a9b1..447a775878fb94db30df9ac047327fcc8746079b 100644 (file)
@@ -91,54 +91,6 @@ struct sock_fprog {  /* Required for SO_ATTACH_FILTER. */
 #define         BPF_TAX         0x00
 #define         BPF_TXA         0x80
 
-enum {
-       BPF_S_RET_K = 0,
-       BPF_S_RET_A,
-       BPF_S_ALU_ADD_K,
-       BPF_S_ALU_ADD_X,
-       BPF_S_ALU_SUB_K,
-       BPF_S_ALU_SUB_X,
-       BPF_S_ALU_MUL_K,
-       BPF_S_ALU_MUL_X,
-       BPF_S_ALU_DIV_X,
-       BPF_S_ALU_AND_K,
-       BPF_S_ALU_AND_X,
-       BPF_S_ALU_OR_K,
-       BPF_S_ALU_OR_X,
-       BPF_S_ALU_LSH_K,
-       BPF_S_ALU_LSH_X,
-       BPF_S_ALU_RSH_K,
-       BPF_S_ALU_RSH_X,
-       BPF_S_ALU_NEG,
-       BPF_S_LD_W_ABS,
-       BPF_S_LD_H_ABS,
-       BPF_S_LD_B_ABS,
-       BPF_S_LD_W_LEN,
-       BPF_S_LD_W_IND,
-       BPF_S_LD_H_IND,
-       BPF_S_LD_B_IND,
-       BPF_S_LD_IMM,
-       BPF_S_LDX_W_LEN,
-       BPF_S_LDX_B_MSH,
-       BPF_S_LDX_IMM,
-       BPF_S_MISC_TAX,
-       BPF_S_MISC_TXA,
-       BPF_S_ALU_DIV_K,
-       BPF_S_LD_MEM,
-       BPF_S_LDX_MEM,
-       BPF_S_ST,
-       BPF_S_STX,
-       BPF_S_JMP_JA,
-       BPF_S_JMP_JEQ_K,
-       BPF_S_JMP_JEQ_X,
-       BPF_S_JMP_JGE_K,
-       BPF_S_JMP_JGE_X,
-       BPF_S_JMP_JGT_K,
-       BPF_S_JMP_JGT_X,
-       BPF_S_JMP_JSET_K,
-       BPF_S_JMP_JSET_X,
-};
-
 #ifndef BPF_MAXINSNS
 #define BPF_MAXINSNS 4096
 #endif
@@ -195,7 +147,7 @@ struct sock;
 
 extern int sk_filter(struct sock *sk, struct sk_buff *skb);
 extern unsigned int sk_run_filter(struct sk_buff *skb,
-                                 struct sock_filter *filter, int flen);
+                                 const struct sock_filter *filter);
 extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 extern int sk_detach_filter(struct sock *sk);
 extern int sk_chk_filter(struct sock_filter *filter, int flen);
index 0d241a5c4909b8bdc99f107b1ba4ac60de9996ae..f7e73c338c40f630f5b0e9bf204c3ab82f00c356 100644 (file)
@@ -102,7 +102,9 @@ struct __fdb_entry {
 #include <linux/netdevice.h>
 
 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
-extern int (*br_should_route_hook)(struct sk_buff *skb);
+
+typedef int (*br_should_route_hook_t)(struct sk_buff *skb);
+extern br_should_route_hook_t __rcu *br_should_route_hook;
 
 #endif
 
index 2fc66dd783eefd18119cbd29f5d5b7fe54a15ec4..2e02e4d7b11e0daad83193478215fa0d7932a02a 100644 (file)
@@ -80,6 +80,24 @@ struct rtnl_link_ifmap {
        __u8    port;
 };
 
+/*
+ * IFLA_AF_SPEC
+ *   Contains nested attributes for address family specific attributes.
+ *   Each address family may create a attribute with the address family
+ *   number as type and create its own attribute structure in it.
+ *
+ *   Example:
+ *   [IFLA_AF_SPEC] = {
+ *       [AF_INET] = {
+ *           [IFLA_INET_CONF] = ...,
+ *       },
+ *       [AF_INET6] = {
+ *           [IFLA_INET6_FLAGS] = ...,
+ *           [IFLA_INET6_CONF] = ...,
+ *       }
+ *   }
+ */
+
 enum {
        IFLA_UNSPEC,
        IFLA_ADDRESS,
@@ -116,6 +134,7 @@ enum {
        IFLA_STATS64,
        IFLA_VF_PORTS,
        IFLA_PORT_SELF,
+       IFLA_AF_SPEC,
        __IFLA_MAX
 };
 
@@ -128,6 +147,14 @@ enum {
 #define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg))
 #endif
 
+enum {
+       IFLA_INET_UNSPEC,
+       IFLA_INET_CONF,
+       __IFLA_INET_MAX,
+};
+
+#define IFLA_INET_MAX (__IFLA_INET_MAX - 1)
+
 /* ifi_flags.
 
    IFF_* flags.
index 8a2fd66a8b5f8b51bd5f9475dc04580c0ccd6757..e28b2e4959d44fb269a06c443b8066a1f49d58cb 100644 (file)
@@ -25,19 +25,25 @@ struct macvlan_port;
 struct macvtap_queue;
 
 /**
- *     struct macvlan_rx_stats - MACVLAN percpu rx stats
+ *     struct macvlan_pcpu_stats - MACVLAN percpu stats
  *     @rx_packets: number of received packets
  *     @rx_bytes: number of received bytes
  *     @rx_multicast: number of received multicast packets
+ *     @tx_packets: number of transmitted packets
+ *     @tx_bytes: number of transmitted bytes
  *     @syncp: synchronization point for 64bit counters
- *     @rx_errors: number of errors
+ *     @rx_errors: number of rx errors
+ *     @tx_dropped: number of tx dropped packets
  */
-struct macvlan_rx_stats {
+struct macvlan_pcpu_stats {
        u64                     rx_packets;
        u64                     rx_bytes;
        u64                     rx_multicast;
+       u64                     tx_packets;
+       u64                     tx_bytes;
        struct u64_stats_sync   syncp;
-       unsigned long           rx_errors;
+       u32                     rx_errors;
+       u32                     tx_dropped;
 };
 
 /*
@@ -52,7 +58,7 @@ struct macvlan_dev {
        struct hlist_node       hlist;
        struct macvlan_port     *port;
        struct net_device       *lowerdev;
-       struct macvlan_rx_stats __percpu *rx_stats;
+       struct macvlan_pcpu_stats __percpu *pcpu_stats;
        enum macvlan_mode       mode;
        int (*receive)(struct sk_buff *skb);
        int (*forward)(struct net_device *dev, struct sk_buff *skb);
@@ -64,18 +70,18 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
                                    unsigned int len, bool success,
                                    bool multicast)
 {
-       struct macvlan_rx_stats *rx_stats;
-
-       rx_stats = this_cpu_ptr(vlan->rx_stats);
        if (likely(success)) {
-               u64_stats_update_begin(&rx_stats->syncp);
-               rx_stats->rx_packets++;;
-               rx_stats->rx_bytes += len;
+               struct macvlan_pcpu_stats *pcpu_stats;
+
+               pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+               u64_stats_update_begin(&pcpu_stats->syncp);
+               pcpu_stats->rx_packets++;
+               pcpu_stats->rx_bytes += len;
                if (multicast)
-                       rx_stats->rx_multicast++;
-               u64_stats_update_end(&rx_stats->syncp);
+                       pcpu_stats->rx_multicast++;
+               u64_stats_update_end(&pcpu_stats->syncp);
        } else {
-               rx_stats->rx_errors++;
+               this_cpu_inc(vlan->pcpu_stats->rx_errors);
        }
 }
 
index 93fc2449af10e8dded6cbed23b4d979e4b5d10ef..c4987f265109411f2339a4fd819b4fd0d4513c67 100644 (file)
@@ -167,10 +167,10 @@ struct ip_sf_socklist {
  */
 
 struct ip_mc_socklist {
-       struct ip_mc_socklist   *next;
+       struct ip_mc_socklist __rcu *next_rcu;
        struct ip_mreqn         multi;
        unsigned int            sfmode;         /* MCAST_{INCLUDE,EXCLUDE} */
-       struct ip_sf_socklist   *sflist;
+       struct ip_sf_socklist __rcu     *sflist;
        struct rcu_head         rcu;
 };
 
@@ -186,11 +186,14 @@ struct ip_sf_list {
 struct ip_mc_list {
        struct in_device        *interface;
        __be32                  multiaddr;
+       unsigned int            sfmode;
        struct ip_sf_list       *sources;
        struct ip_sf_list       *tomb;
-       unsigned int            sfmode;
        unsigned long           sfcount[2];
-       struct ip_mc_list       *next;
+       union {
+               struct ip_mc_list *next;
+               struct ip_mc_list __rcu *next_rcu;
+       };
        struct timer_list       timer;
        int                     users;
        atomic_t                refcnt;
@@ -201,6 +204,7 @@ struct ip_mc_list {
        char                    loaded;
        unsigned char           gsquery;        /* check source marks? */
        unsigned char           crcount;
+       struct rcu_head         rcu;
 };
 
 /* V3 exponential field decoding */
@@ -234,7 +238,7 @@ extern void ip_mc_unmap(struct in_device *);
 extern void ip_mc_remap(struct in_device *);
 extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
 extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
-extern void ip_mc_rejoin_group(struct ip_mc_list *im);
+extern void ip_mc_rejoin_groups(struct in_device *in_dev);
 
 #endif
 #endif
index ccd5b07d678deb8a61ff759ba943dceae6bf1507..2b86eaf11773fd80cb58d4b4e19b2640ae833dae 100644 (file)
@@ -41,10 +41,12 @@ enum
        __IPV4_DEVCONF_MAX
 };
 
+#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
+
 struct ipv4_devconf {
        void    *sysctl;
-       int     data[__IPV4_DEVCONF_MAX - 1];
-       DECLARE_BITMAP(state, __IPV4_DEVCONF_MAX - 1);
+       int     data[IPV4_DEVCONF_MAX];
+       DECLARE_BITMAP(state, IPV4_DEVCONF_MAX);
 };
 
 struct in_device {
@@ -52,9 +54,8 @@ struct in_device {
        atomic_t                refcnt;
        int                     dead;
        struct in_ifaddr        *ifa_list;      /* IP ifaddr chain              */
-       rwlock_t                mc_list_lock;
-       struct ip_mc_list       *mc_list;       /* IP multicast filter chain    */
-       int                     mc_count;                 /* Number of installed mcasts */
+       struct ip_mc_list __rcu *mc_list;       /* IP multicast filter chain    */
+       int                     mc_count;       /* Number of installed mcasts   */
        spinlock_t              mc_tomb_lock;
        struct ip_mc_list       *mc_tomb;
        unsigned long           mr_v1_seen;
@@ -91,7 +92,7 @@ static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
 
 static inline void ipv4_devconf_setall(struct in_device *in_dev)
 {
-       bitmap_fill(in_dev->cnf.state, __IPV4_DEVCONF_MAX - 1);
+       bitmap_fill(in_dev->cnf.state, IPV4_DEVCONF_MAX);
 }
 
 #define IN_DEV_CONF_GET(in_dev, attr) \
index d8fd2c23a1b994ec10e0a204820d6c7daf7dca0e..b45c1b8b1d19961b73a146e0e43b732705a2b322 100644 (file)
@@ -592,8 +592,7 @@ struct netdev_rx_queue {
        struct rps_map __rcu            *rps_map;
        struct rps_dev_flow_table __rcu *rps_flow_table;
        struct kobject                  kobj;
-       struct netdev_rx_queue          *first;
-       atomic_t                        count;
+       struct net_device               *dev;
 } ____cacheline_aligned_in_smp;
 #endif /* CONFIG_RPS */
 
@@ -951,7 +950,7 @@ struct net_device {
 #endif
        void                    *atalk_ptr;     /* AppleTalk link       */
        struct in_device __rcu  *ip_ptr;        /* IPv4 specific data   */
-       void                    *dn_ptr;        /* DECnet specific data */
+       struct dn_dev __rcu     *dn_ptr;        /* DECnet specific data */
        struct inet6_dev __rcu  *ip6_ptr;       /* IPv6 specific data */
        void                    *ec_ptr;        /* Econet specific data */
        void                    *ax25_ptr;      /* AX.25 specific data */
@@ -995,8 +994,8 @@ struct net_device {
        unsigned int            real_num_rx_queues;
 #endif
 
-       rx_handler_func_t       *rx_handler;
-       void                    *rx_handler_data;
+       rx_handler_func_t __rcu *rx_handler;
+       void __rcu              *rx_handler_data;
 
        struct netdev_queue __rcu *ingress_queue;
 
@@ -2239,6 +2238,8 @@ unsigned long netdev_fix_features(unsigned long features, const char *name);
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
                                        struct net_device *dev);
 
+int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev);
+
 static inline int net_gso_ok(int features, int gso_type)
 {
        int feature = gso_type << NETIF_F_GSO_SHIFT;
@@ -2254,10 +2255,7 @@ static inline int skb_gso_ok(struct sk_buff *skb, int features)
 static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
 {
        if (skb_is_gso(skb)) {
-               int features = dev->features;
-
-               if (skb->protocol == htons(ETH_P_8021Q) || skb->vlan_tci)
-                       features &= dev->vlan_features;
+               int features = netif_get_vlan_features(skb, dev);
 
                return (!skb_gso_ok(skb, features) ||
                        unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
index 03317c8d4077a6488148b646ee9327b05a7dcec1..1893837b39660821351c4e744a166f0983244f7d 100644 (file)
@@ -33,6 +33,8 @@
 
 #define NF_QUEUE_NR(x) ((((x) << NF_VERDICT_BITS) & NF_VERDICT_QMASK) | NF_QUEUE)
 
+#define NF_DROP_ERR(x) (((-x) << NF_VERDICT_BITS) | NF_DROP)
+
 /* only for userspace compatibility */
 #ifndef __KERNEL__
 /* Generic cache responses from hook functions.
index 0edb2566c14ca6602a188c422fe0a10fdc17bdff..fb877b5621b7cc5f8b6e57d45a45ce0375c07492 100644 (file)
@@ -1307,7 +1307,11 @@ enum nl80211_bitrate_attr {
  *     wireless core it thinks its knows the regulatory domain we should be in.
  * @NL80211_REGDOM_SET_BY_COUNTRY_IE: the wireless core has received an
  *     802.11 country information element with regulatory information it
- *     thinks we should consider.
+ *     thinks we should consider. cfg80211 only processes the country
+ *     code from the IE, and relies on the regulatory domain information
+ *     structure pased by userspace (CRDA) from our wireless-regdb.
+ *     If a channel is enabled but the country code indicates it should
+ *     be disabled we disable the channel and re-enable it upon disassociation.
  */
 enum nl80211_reg_initiator {
        NL80211_REGDOM_SET_BY_CORE,
index 08c32e4f261aca004ac06d895a1a8bbd73cfd887..c6c608482cba493bbe8bf2c2dd5779420f4552db 100644 (file)
@@ -354,37 +354,6 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
 }
 #endif /* RFKILL || RFKILL_MODULE */
 
-
-#ifdef CONFIG_RFKILL_LEDS
-/**
- * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED.
- * This function might return a NULL pointer if registering of the
- * LED trigger failed. Use this as "default_trigger" for the LED.
- */
-const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
-
-/**
- * rfkill_set_led_trigger_name -- set the LED trigger name
- * @rfkill: rfkill struct
- * @name: LED trigger name
- *
- * This function sets the LED trigger name of the radio LED
- * trigger that rfkill creates. It is optional, but if called
- * must be called before rfkill_register() to be effective.
- */
-void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name);
-#else
-static inline const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
-{
-       return NULL;
-}
-
-static inline void
-rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
-{
-}
-#endif
-
 #endif /* __KERNEL__ */
 
 #endif /* RFKILL_H */
index 4f902e1908aaa191095c2f90741f96679b4b3113..bebb8efea0a66fd67bd19600bc08d1ad8d235d8a 100644 (file)
 #ifndef _LINUX_WL12XX_H
 #define _LINUX_WL12XX_H
 
+/* The board reference clock values */
+enum {
+       WL12XX_REFCLOCK_19 = 0, /* 19.2 MHz */
+       WL12XX_REFCLOCK_26 = 1, /* 26 MHz */
+       WL12XX_REFCLOCK_38 = 2, /* 38.4 MHz */
+       WL12XX_REFCLOCK_54 = 3, /* 54 MHz */
+};
+
 struct wl12xx_platform_data {
        void (*set_power)(bool enable);
        /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */
index 9402543fc20d2dc65cec9f0100397aee71b4dbf9..e54f6396fa4c0d1065ab219c0b3630be27cd03e6 100644 (file)
@@ -51,7 +51,7 @@ struct cfctrl_rsp {
        void (*restart_rsp)(void);
        void (*radioset_rsp)(void);
        void (*reject_rsp)(struct cflayer *layer, u8 linkid,
-                               struct cflayer *client_layer);;
+                               struct cflayer *client_layer);
 };
 
 /* Link Setup Parameters for CAIF-Links. */
index 97b8b7c9b63cf9854e3b8c2e83154c8ecb0bea06..772dea243e5dfd15affc97520e4735a931ce1af3 100644 (file)
@@ -1321,13 +1321,14 @@ struct cfg80211_ops {
  *     initiator is %REGDOM_SET_BY_CORE).
  * @WIPHY_FLAG_STRICT_REGULATORY: tells us the driver for this device will
  *     ignore regulatory domain settings until it gets its own regulatory
- *     domain via its regulatory_hint(). After its gets its own regulatory
- *     domain it will only allow further regulatory domain settings to
- *     further enhance compliance. For example if channel 13 and 14 are
- *     disabled by this regulatory domain no user regulatory domain can
- *     enable these channels at a later time. This can be used for devices
- *     which do not have calibration information gauranteed for frequencies
- *     or settings outside of its regulatory domain.
+ *     domain via its regulatory_hint() unless the regulatory hint is
+ *     from a country IE. After its gets its own regulatory domain it will
+ *     only allow further regulatory domain settings to further enhance
+ *     compliance. For example if channel 13 and 14 are disabled by this
+ *     regulatory domain no user regulatory domain can enable these channels
+ *     at a later time. This can be used for devices which do not have
+ *     calibration information guaranteed for frequencies or settings
+ *     outside of its regulatory domain.
  * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure
  *     that passive scan flags and beaconing flags may not be lifted by
  *     cfg80211 due to regulatory beacon hints. For more information on beacon
index 0916bbf3bdff065fbc91d191c05aeecd3019ad40..b9e32db03f2040e76d7d73295406772de0029930 100644 (file)
@@ -5,13 +5,14 @@
 struct dn_dev;
 
 struct dn_ifaddr {
-       struct dn_ifaddr *ifa_next;
+       struct dn_ifaddr __rcu *ifa_next;
        struct dn_dev    *ifa_dev;
        __le16            ifa_local;
        __le16            ifa_address;
        __u8              ifa_flags;
        __u8              ifa_scope;
        char              ifa_label[IFNAMSIZ];
+       struct rcu_head   rcu;
 };
 
 #define DN_DEV_S_RU  0 /* Run - working normally   */
@@ -83,7 +84,7 @@ struct dn_dev_parms {
 
 
 struct dn_dev {
-       struct dn_ifaddr *ifa_list;
+       struct dn_ifaddr __rcu *ifa_list;
        struct net_device *dev;
        struct dn_dev_parms parms;
        char use_long;
@@ -171,19 +172,27 @@ extern int unregister_dnaddr_notifier(struct notifier_block *nb);
 
 static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
 {
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db;
        struct dn_ifaddr *ifa;
+       int res = 0;
 
+       rcu_read_lock();
+       dn_db = rcu_dereference(dev->dn_ptr);
        if (dn_db == NULL) {
                printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n");
-               return 0;
+               goto out;
        }
 
-       for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next)
-               if ((addr ^ ifa->ifa_local) == 0)
-                       return 1;
-
-       return 0;
+       for (ifa = rcu_dereference(dn_db->ifa_list);
+            ifa != NULL;
+            ifa = rcu_dereference(ifa->ifa_next))
+               if ((addr ^ ifa->ifa_local) == 0) {
+                       res = 1;
+                       break;
+               }
+out:
+       rcu_read_unlock();
+       return res;
 }
 
 #endif /* _NET_DN_DEV_H */
index ccadab3aa3f6c5948e484062c82c022b5dbf132b..9b185df265fb5ed2fc5e7d690368d7dd5405b011 100644 (file)
@@ -80,6 +80,16 @@ struct dn_route {
        unsigned rt_type;
 };
 
+static inline bool dn_is_input_route(struct dn_route *rt)
+{
+       return rt->fl.iif != 0;
+}
+
+static inline bool dn_is_output_route(struct dn_route *rt)
+{
+       return rt->fl.iif == 0;
+}
+
 extern void dn_route_init(void);
 extern void dn_route_cleanup(void);
 
index ffe9cb719c0e526302bb2d4635c6f402ce31c867..a5bd72646d6510f18ff085674283f55386e118bc 100644 (file)
@@ -94,10 +94,10 @@ struct dst_entry {
        int                     __use;
        unsigned long           lastuse;
        union {
-               struct dst_entry *next;
-               struct rtable __rcu *rt_next;
-               struct rt6_info   *rt6_next;
-               struct dn_route  *dn_next;
+               struct dst_entry        *next;
+               struct rtable __rcu     *rt_next;
+               struct rt6_info         *rt6_next;
+               struct dn_route __rcu   *dn_next;
        };
 };
 
index 0ac3fb5e0973460f3046cdf1f00a0e1114a13dff..7196e6864b8d8e282e0a97c2d614372a77ba53b4 100644 (file)
@@ -67,6 +67,7 @@ struct flowi {
                } dnports;
 
                __be32          spi;
+               __be32          gre_key;
 
                struct {
                        __u8    type;
@@ -78,6 +79,7 @@ struct flowi {
 #define fl_icmp_code   uli_u.icmpt.code
 #define fl_ipsec_spi   uli_u.spi
 #define fl_mh_type     uli_u.mht.type
+#define fl_gre_key     uli_u.gre_key
        __u32           secid;  /* used by xfrm; see secid.txt */
 } __attribute__((__aligned__(BITS_PER_LONG/8)));
 
index 1989cfd7405fccfc6839f2742eb45b4b2d7f8712..8945f9fb192ab536d0e27f0b9617046f5086b9bc 100644 (file)
@@ -141,7 +141,7 @@ struct inet_sock {
                                nodefrag:1;
        int                     mc_index;
        __be32                  mc_addr;
-       struct ip_mc_socklist   *mc_list;
+       struct ip_mc_socklist __rcu     *mc_list;
        struct {
                unsigned int            flags;
                unsigned int            fragsize;
index 6beb1ffc2b7fcfe9d360717ccde3f70bd8ac0e3d..4014b623880cf529e3bb5b184a5b31dee650a8b5 100644 (file)
@@ -96,16 +96,16 @@ struct neighbour {
        struct neigh_parms      *parms;
        unsigned long           confirmed;
        unsigned long           updated;
-       __u8                    flags;
-       __u8                    nud_state;
-       __u8                    type;
-       __u8                    dead;
+       rwlock_t                lock;
        atomic_t                refcnt;
        struct sk_buff_head     arp_queue;
        struct timer_list       timer;
        unsigned long           used;
        atomic_t                probes;
-       rwlock_t                lock;
+       __u8                    flags;
+       __u8                    nud_state;
+       __u8                    type;
+       __u8                    dead;
        seqlock_t               ha_lock;
        unsigned char           ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
        struct hh_cache         *hh;
index 9801c55de5d64e5357006f9cfb129bce139572dd..373f1a900cf4784abf1f4e71791f501e222dfe3f 100644 (file)
@@ -225,13 +225,15 @@ extern int                nlmsg_notify(struct sock *sk, struct sk_buff *skb,
                                     u32 pid, unsigned int group, int report,
                                     gfp_t flags);
 
-extern int             nla_validate(struct nlattr *head, int len, int maxtype,
+extern int             nla_validate(const struct nlattr *head,
+                                    int len, int maxtype,
                                     const struct nla_policy *policy);
-extern int             nla_parse(struct nlattr *tb[], int maxtype,
-                                 struct nlattr *head, int len,
+extern int             nla_parse(struct nlattr **tb, int maxtype,
+                                 const struct nlattr *head, int len,
                                  const struct nla_policy *policy);
 extern int             nla_policy_len(const struct nla_policy *, int);
-extern struct nlattr * nla_find(struct nlattr *head, int len, int attrtype);
+extern struct nlattr * nla_find(const struct nlattr *head,
+                                int len, int attrtype);
 extern size_t          nla_strlcpy(char *dst, const struct nlattr *nla,
                                    size_t dstsize);
 extern int             nla_memcpy(void *dest, const struct nlattr *src, int count);
@@ -346,7 +348,8 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
  * Returns the next netlink message in the message stream and
  * decrements remaining by the size of the current message.
  */
-static inline struct nlmsghdr *nlmsg_next(struct nlmsghdr *nlh, int *remaining)
+static inline struct nlmsghdr *
+nlmsg_next(const struct nlmsghdr *nlh, int *remaining)
 {
        int totlen = NLMSG_ALIGN(nlh->nlmsg_len);
 
@@ -398,7 +401,8 @@ static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
  * @maxtype: maximum attribute type to be expected
  * @policy: validation policy
  */
-static inline int nlmsg_validate(struct nlmsghdr *nlh, int hdrlen, int maxtype,
+static inline int nlmsg_validate(const struct nlmsghdr *nlh,
+                                int hdrlen, int maxtype,
                                 const struct nla_policy *policy)
 {
        if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
@@ -727,7 +731,8 @@ static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
  *
  * Returns the first attribute which matches the specified type.
  */
-static inline struct nlattr *nla_find_nested(struct nlattr *nla, int attrtype)
+static inline struct nlattr *
+nla_find_nested(const struct nlattr *nla, int attrtype)
 {
        return nla_find(nla_data(nla), nla_len(nla), attrtype);
 }
@@ -1032,7 +1037,7 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
  *
  * Returns 0 on success or a negative error code.
  */
-static inline int nla_validate_nested(struct nlattr *start, int maxtype,
+static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
                                      const struct nla_policy *policy)
 {
        return nla_validate(nla_data(start), nla_len(start), maxtype, policy);
index 7e5e73bfa4dec8e2d45c834507f74d86484b8715..b8c1f7703fc6d7695ed0eba73552ffd349e7b484 100644 (file)
@@ -55,8 +55,6 @@ struct rtable {
        /* Cache lookup keys */
        struct flowi            fl;
 
-       struct in_device        *idev;
-       
        int                     rt_genid;
        unsigned                rt_flags;
        __u16                   rt_type;
@@ -73,6 +71,16 @@ struct rtable {
        struct inet_peer        *peer; /* long-living peer info */
 };
 
+static inline bool rt_is_input_route(struct rtable *rt)
+{
+       return rt->fl.iif != 0;
+}
+
+static inline bool rt_is_output_route(struct rtable *rt)
+{
+       return rt->fl.iif == 0;
+}
+
 struct ip_rt_acct {
        __u32   o_bytes;
        __u32   o_packets;
@@ -161,14 +169,12 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst,
 {
        struct flowi fl = { .oif = oif,
                            .mark = sk->sk_mark,
-                           .nl_u = { .ip4_u = { .daddr = dst,
-                                                .saddr = src,
-                                                .tos   = tos } },
+                           .fl4_dst = dst,
+                           .fl4_src = src,
+                           .fl4_tos = tos,
                            .proto = protocol,
-                           .uli_u = { .ports =
-                                      { .sport = sport,
-                                        .dport = dport } } };
-
+                           .fl_ip_sport = sport,
+                           .fl_ip_dport = dport };
        int err;
        struct net *net = sock_net(sk);
 
index e013c68bfb0047330f27d50cdb5714957b997403..35be0bbcd7da03d4587098b871923d42e1618d6c 100644 (file)
@@ -83,6 +83,37 @@ extern void  __rtnl_link_unregister(struct rtnl_link_ops *ops);
 extern int     rtnl_link_register(struct rtnl_link_ops *ops);
 extern void    rtnl_link_unregister(struct rtnl_link_ops *ops);
 
+/**
+ *     struct rtnl_af_ops - rtnetlink address family operations
+ *
+ *     @list: Used internally
+ *     @family: Address family
+ *     @fill_link_af: Function to fill IFLA_AF_SPEC with address family
+ *                    specific netlink attributes.
+ *     @get_link_af_size: Function to calculate size of address family specific
+ *                        netlink attributes exlusive the container attribute.
+ *     @parse_link_af: Function to parse a IFLA_AF_SPEC attribute and modify
+ *                     net_device accordingly.
+ */
+struct rtnl_af_ops {
+       struct list_head        list;
+       int                     family;
+
+       int                     (*fill_link_af)(struct sk_buff *skb,
+                                               const struct net_device *dev);
+       size_t                  (*get_link_af_size)(const struct net_device *dev);
+
+       int                     (*parse_link_af)(struct net_device *dev,
+                                                const struct nlattr *attr);
+};
+
+extern int     __rtnl_af_register(struct rtnl_af_ops *ops);
+extern void    __rtnl_af_unregister(struct rtnl_af_ops *ops);
+
+extern int     rtnl_af_register(struct rtnl_af_ops *ops);
+extern void    rtnl_af_unregister(struct rtnl_af_ops *ops);
+
+
 extern struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
 extern struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
        char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]);
index a6338d039857bf6b00bcff087f974ebf46ae0875..5557dfb3dd68056575156009ec7220db51514627 100644 (file)
@@ -57,7 +57,7 @@
 #include <linux/rculist_nulls.h>
 #include <linux/poll.h>
 
-#include <asm/atomic.h>
+#include <linux/atomic.h>
 #include <net/dst.h>
 #include <net/checksum.h>
 
@@ -241,59 +241,67 @@ struct sock {
 #define sk_bind_node           __sk_common.skc_bind_node
 #define sk_prot                        __sk_common.skc_prot
 #define sk_net                 __sk_common.skc_net
-       kmemcheck_bitfield_begin(flags);
-       unsigned int            sk_shutdown  : 2,
-                               sk_no_check  : 2,
-                               sk_userlocks : 4,
-                               sk_protocol  : 8,
-                               sk_type      : 16;
-       kmemcheck_bitfield_end(flags);
-       int                     sk_rcvbuf;
        socket_lock_t           sk_lock;
+       struct sk_buff_head     sk_receive_queue;
        /*
         * The backlog queue is special, it is always used with
         * the per-socket spinlock held and requires low latency
         * access. Therefore we special case it's implementation.
+        * Note : rmem_alloc is in this structure to fill a hole
+        * on 64bit arches, not because its logically part of
+        * backlog.
         */
        struct {
-               struct sk_buff *head;
-               struct sk_buff *tail;
-               int len;
+               atomic_t        rmem_alloc;
+               int             len;
+               struct sk_buff  *head;
+               struct sk_buff  *tail;
        } sk_backlog;
+#define sk_rmem_alloc sk_backlog.rmem_alloc
+       int                     sk_forward_alloc;
+#ifdef CONFIG_RPS
+       __u32                   sk_rxhash;
+#endif
+       atomic_t                sk_drops;
+       int                     sk_rcvbuf;
+
+       struct sk_filter __rcu  *sk_filter;
        struct socket_wq        *sk_wq;
-       struct dst_entry        *sk_dst_cache;
+
+#ifdef CONFIG_NET_DMA
+       struct sk_buff_head     sk_async_wait_queue;
+#endif
+
 #ifdef CONFIG_XFRM
        struct xfrm_policy      *sk_policy[2];
 #endif
+       unsigned long           sk_flags;
+       struct dst_entry        *sk_dst_cache;
        spinlock_t              sk_dst_lock;
-       atomic_t                sk_rmem_alloc;
        atomic_t                sk_wmem_alloc;
        atomic_t                sk_omem_alloc;
        int                     sk_sndbuf;
-       struct sk_buff_head     sk_receive_queue;
        struct sk_buff_head     sk_write_queue;
-#ifdef CONFIG_NET_DMA
-       struct sk_buff_head     sk_async_wait_queue;
-#endif
+       kmemcheck_bitfield_begin(flags);
+       unsigned int            sk_shutdown  : 2,
+                               sk_no_check  : 2,
+                               sk_userlocks : 4,
+                               sk_protocol  : 8,
+                               sk_type      : 16;
+       kmemcheck_bitfield_end(flags);
        int                     sk_wmem_queued;
-       int                     sk_forward_alloc;
        gfp_t                   sk_allocation;
        int                     sk_route_caps;
        int                     sk_route_nocaps;
        int                     sk_gso_type;
        unsigned int            sk_gso_max_size;
        int                     sk_rcvlowat;
-#ifdef CONFIG_RPS
-       __u32                   sk_rxhash;
-#endif
-       unsigned long           sk_flags;
        unsigned long           sk_lingertime;
        struct sk_buff_head     sk_error_queue;
        struct proto            *sk_prot_creator;
        rwlock_t                sk_callback_lock;
        int                     sk_err,
                                sk_err_soft;
-       atomic_t                sk_drops;
        unsigned short          sk_ack_backlog;
        unsigned short          sk_max_ack_backlog;
        __u32                   sk_priority;
@@ -301,7 +309,6 @@ struct sock {
        const struct cred       *sk_peer_cred;
        long                    sk_rcvtimeo;
        long                    sk_sndtimeo;
-       struct sk_filter __rcu  *sk_filter;
        void                    *sk_protinfo;
        struct timer_list       sk_timer;
        ktime_t                 sk_stamp;
index bcfb6b24b019cf398bb3674aa13cc29d5b423dc6..54b283229488f6d8a68d33e209a4274a14a57913 100644 (file)
@@ -805,6 +805,9 @@ __be16 xfrm_flowi_sport(struct flowi *fl)
        case IPPROTO_MH:
                port = htons(fl->fl_mh_type);
                break;
+       case IPPROTO_GRE:
+               port = htonl(fl->fl_gre_key) >> 16;
+               break;
        default:
                port = 0;       /*XXX*/
        }
@@ -826,6 +829,9 @@ __be16 xfrm_flowi_dport(struct flowi *fl)
        case IPPROTO_ICMPV6:
                port = htons(fl->fl_icmp_code);
                break;
+       case IPPROTO_GRE:
+               port = htonl(fl->fl_gre_key) & 0xffff;
+               break;
        default:
                port = 0;       /*XXX*/
        }
index c4706eb98d3dfc922ec19fdbb0c23a36fab47a58..00e8a02681a6f219251622337375b15fb3bc4acc 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/types.h>
 #include <net/netlink.h>
 
-static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = {
+static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
        [NLA_U8]        = sizeof(u8),
        [NLA_U16]       = sizeof(u16),
        [NLA_U32]       = sizeof(u32),
@@ -23,7 +23,7 @@ static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = {
        [NLA_NESTED]    = NLA_HDRLEN,
 };
 
-static int validate_nla(struct nlattr *nla, int maxtype,
+static int validate_nla(const struct nlattr *nla, int maxtype,
                        const struct nla_policy *policy)
 {
        const struct nla_policy *pt;
@@ -115,10 +115,10 @@ static int validate_nla(struct nlattr *nla, int maxtype,
  *
  * Returns 0 on success or a negative error code.
  */
-int nla_validate(struct nlattr *head, int len, int maxtype,
+int nla_validate(const struct nlattr *head, int len, int maxtype,
                 const struct nla_policy *policy)
 {
-       struct nlattr *nla;
+       const struct nlattr *nla;
        int rem, err;
 
        nla_for_each_attr(nla, head, len, rem) {
@@ -173,10 +173,10 @@ nla_policy_len(const struct nla_policy *p, int n)
  *
  * Returns 0 on success or a negative error code.
  */
-int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
-             const struct nla_policy *policy)
+int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
+             int len, const struct nla_policy *policy)
 {
-       struct nlattr *nla;
+       const struct nlattr *nla;
        int rem, err;
 
        memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
@@ -191,7 +191,7 @@ int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
                                        goto errout;
                        }
 
-                       tb[type] = nla;
+                       tb[type] = (struct nlattr *)nla;
                }
        }
 
@@ -212,14 +212,14 @@ errout:
  *
  * Returns the first attribute in the stream matching the specified type.
  */
-struct nlattr *nla_find(struct nlattr *head, int len, int attrtype)
+struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
 {
-       struct nlattr *nla;
+       const struct nlattr *nla;
        int rem;
 
        nla_for_each_attr(nla, head, len, rem)
                if (nla_type(nla) == attrtype)
-                       return nla;
+                       return (struct nlattr *)nla;
 
        return NULL;
 }
index 52077ca22072d6eeab3e12db0030fb5f3c2db68c..dc1071327d87a929893a8b02b8cfbf69e80dc562 100644 (file)
@@ -272,13 +272,11 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
                snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
        }
 
-       new_dev = alloc_netdev_mq(sizeof(struct vlan_dev_info), name,
-                                 vlan_setup, real_dev->num_tx_queues);
+       new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup);
 
        if (new_dev == NULL)
                return -ENOBUFS;
 
-       netif_copy_real_num_queues(new_dev, real_dev);
        dev_net_set(new_dev, net);
        /* need 4 bytes for extra VLAN header info,
         * hope the underlying device can handle it.
@@ -334,6 +332,12 @@ static void vlan_transfer_features(struct net_device *dev,
        vlandev->features &= ~dev->vlan_features;
        vlandev->features |= dev->features & dev->vlan_features;
        vlandev->gso_max_size = dev->gso_max_size;
+
+       if (dev->features & NETIF_F_HW_VLAN_TX)
+               vlandev->hard_header_len = dev->hard_header_len;
+       else
+               vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
+
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
        vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
 #endif
index db01b3181fdc4a9cd950dfaf60df1abecdec294f..5687c9b95f33b7f70234e0d3ef7220fa5f31f1b1 100644 (file)
@@ -19,19 +19,25 @@ struct vlan_priority_tci_mapping {
 
 
 /**
- *     struct vlan_rx_stats - VLAN percpu rx stats
+ *     struct vlan_pcpu_stats - VLAN percpu rx/tx stats
  *     @rx_packets: number of received packets
  *     @rx_bytes: number of received bytes
  *     @rx_multicast: number of received multicast packets
+ *     @tx_packets: number of transmitted packets
+ *     @tx_bytes: number of transmitted bytes
  *     @syncp: synchronization point for 64bit counters
- *     @rx_errors: number of errors
+ *     @rx_errors: number of rx errors
+ *     @tx_dropped: number of tx drops
  */
-struct vlan_rx_stats {
+struct vlan_pcpu_stats {
        u64                     rx_packets;
        u64                     rx_bytes;
        u64                     rx_multicast;
+       u64                     tx_packets;
+       u64                     tx_bytes;
        struct u64_stats_sync   syncp;
-       unsigned long           rx_errors;
+       u32                     rx_errors;
+       u32                     tx_dropped;
 };
 
 /**
@@ -45,9 +51,7 @@ struct vlan_rx_stats {
  *     @real_dev: underlying netdevice
  *     @real_dev_addr: address of underlying netdevice
  *     @dent: proc dir entry
- *     @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX
- *     @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX
- *     @vlan_rx_stats: ptr to percpu rx stats
+ *     @vlan_pcpu_stats: ptr to percpu rx stats
  */
 struct vlan_dev_info {
        unsigned int                            nr_ingress_mappings;
@@ -62,9 +66,7 @@ struct vlan_dev_info {
        unsigned char                           real_dev_addr[ETH_ALEN];
 
        struct proc_dir_entry                   *dent;
-       unsigned long                           cnt_inc_headroom_on_tx;
-       unsigned long                           cnt_encap_on_xmit;
-       struct vlan_rx_stats __percpu           *vlan_rx_stats;
+       struct vlan_pcpu_stats __percpu         *vlan_pcpu_stats;
 };
 
 static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
index 69b2f79800a52c3e07e0f7c668daa1028025931e..ce8e3ab3e7a5ab6d0f0000e699fbca27f4629d9b 100644 (file)
@@ -9,7 +9,7 @@ bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
        struct sk_buff *skb = *skbp;
        u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
        struct net_device *vlan_dev;
-       struct vlan_rx_stats *rx_stats;
+       struct vlan_pcpu_stats *rx_stats;
 
        vlan_dev = vlan_find_dev(skb->dev, vlan_id);
        if (!vlan_dev) {
@@ -26,7 +26,7 @@ bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
        skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
        skb->vlan_tci = 0;
 
-       rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_rx_stats);
+       rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
 
        u64_stats_update_begin(&rx_stats->syncp);
        rx_stats->rx_packets++;
index 14e3d1fa07a0f70df457515f3aede2d31e77c9cb..be737539f34d1be9849a142c2ed176e48c537298 100644 (file)
@@ -141,7 +141,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
                  struct packet_type *ptype, struct net_device *orig_dev)
 {
        struct vlan_hdr *vhdr;
-       struct vlan_rx_stats *rx_stats;
+       struct vlan_pcpu_stats *rx_stats;
        struct net_device *vlan_dev;
        u16 vlan_id;
        u16 vlan_tci;
@@ -177,7 +177,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
        } else {
                skb->dev = vlan_dev;
 
-               rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats);
+               rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats);
 
                u64_stats_update_begin(&rx_stats->syncp);
                rx_stats->rx_packets++;
@@ -274,9 +274,6 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
        u16 vlan_tci = 0;
        int rc;
 
-       if (WARN_ON(skb_headroom(skb) < dev->hard_header_len))
-               return -ENOSPC;
-
        if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
                vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
 
@@ -313,8 +310,6 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
 static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
                                            struct net_device *dev)
 {
-       int i = skb_get_queue_mapping(skb);
-       struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
        struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
        unsigned int len;
        int ret;
@@ -326,71 +321,31 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
         */
        if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
            vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
-               unsigned int orig_headroom = skb_headroom(skb);
                u16 vlan_tci;
-
-               vlan_dev_info(dev)->cnt_encap_on_xmit++;
-
                vlan_tci = vlan_dev_info(dev)->vlan_id;
                vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
-               skb = __vlan_put_tag(skb, vlan_tci);
-               if (!skb) {
-                       txq->tx_dropped++;
-                       return NETDEV_TX_OK;
-               }
-
-               if (orig_headroom < VLAN_HLEN)
-                       vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
+               skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
        }
 
-
        skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
        len = skb->len;
        ret = dev_queue_xmit(skb);
 
        if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
-               txq->tx_packets++;
-               txq->tx_bytes += len;
-       } else
-               txq->tx_dropped++;
+               struct vlan_pcpu_stats *stats;
 
-       return ret;
-}
-
-static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
-                                                   struct net_device *dev)
-{
-       int i = skb_get_queue_mapping(skb);
-       struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-       u16 vlan_tci;
-       unsigned int len;
-       int ret;
-
-       vlan_tci = vlan_dev_info(dev)->vlan_id;
-       vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
-       skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
-
-       skb->dev = vlan_dev_info(dev)->real_dev;
-       len = skb->len;
-       ret = dev_queue_xmit(skb);
-
-       if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
-               txq->tx_packets++;
-               txq->tx_bytes += len;
-       } else
-               txq->tx_dropped++;
+               stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
+               u64_stats_update_begin(&stats->syncp);
+               stats->tx_packets++;
+               stats->tx_bytes += len;
+               u64_stats_update_begin(&stats->syncp);
+       } else {
+               this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
+       }
 
        return ret;
 }
 
-static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
-       struct net_device *rdev = vlan_dev_info(dev)->real_dev;
-       const struct net_device_ops *ops = rdev->netdev_ops;
-
-       return ops->ndo_select_queue(rdev, skb);
-}
-
 static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
 {
        /* TODO: gotta make sure the underlying layer can handle it,
@@ -719,8 +674,7 @@ static const struct header_ops vlan_header_ops = {
        .parse   = eth_header_parse,
 };
 
-static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops,
-                   vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq;
+static const struct net_device_ops vlan_netdev_ops;
 
 static int vlan_dev_init(struct net_device *dev)
 {
@@ -738,6 +692,7 @@ static int vlan_dev_init(struct net_device *dev)
                      (1<<__LINK_STATE_PRESENT);
 
        dev->features |= real_dev->features & real_dev->vlan_features;
+       dev->features |= NETIF_F_LLTX;
        dev->gso_max_size = real_dev->gso_max_size;
 
        /* ipv6 shared card related stuff */
@@ -755,26 +710,20 @@ static int vlan_dev_init(struct net_device *dev)
        if (real_dev->features & NETIF_F_HW_VLAN_TX) {
                dev->header_ops      = real_dev->header_ops;
                dev->hard_header_len = real_dev->hard_header_len;
-               if (real_dev->netdev_ops->ndo_select_queue)
-                       dev->netdev_ops = &vlan_netdev_accel_ops_sq;
-               else
-                       dev->netdev_ops = &vlan_netdev_accel_ops;
        } else {
                dev->header_ops      = &vlan_header_ops;
                dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
-               if (real_dev->netdev_ops->ndo_select_queue)
-                       dev->netdev_ops = &vlan_netdev_ops_sq;
-               else
-                       dev->netdev_ops = &vlan_netdev_ops;
        }
 
+       dev->netdev_ops = &vlan_netdev_ops;
+
        if (is_vlan_dev(real_dev))
                subclass = 1;
 
        vlan_dev_set_lockdep_class(dev, subclass);
 
-       vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats);
-       if (!vlan_dev_info(dev)->vlan_rx_stats)
+       vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+       if (!vlan_dev_info(dev)->vlan_pcpu_stats)
                return -ENOMEM;
 
        return 0;
@@ -786,8 +735,8 @@ static void vlan_dev_uninit(struct net_device *dev)
        struct vlan_dev_info *vlan = vlan_dev_info(dev);
        int i;
 
-       free_percpu(vlan->vlan_rx_stats);
-       vlan->vlan_rx_stats = NULL;
+       free_percpu(vlan->vlan_pcpu_stats);
+       vlan->vlan_pcpu_stats = NULL;
        for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
                while ((pm = vlan->egress_priority_map[i]) != NULL) {
                        vlan->egress_priority_map[i] = pm->next;
@@ -825,33 +774,37 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev)
 
 static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
-       dev_txq_stats_fold(dev, stats);
 
-       if (vlan_dev_info(dev)->vlan_rx_stats) {
-               struct vlan_rx_stats *p, accum = {0};
+       if (vlan_dev_info(dev)->vlan_pcpu_stats) {
+               struct vlan_pcpu_stats *p;
+               u32 rx_errors = 0, tx_dropped = 0;
                int i;
 
                for_each_possible_cpu(i) {
-                       u64 rxpackets, rxbytes, rxmulticast;
+                       u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
                        unsigned int start;
 
-                       p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
+                       p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
                        do {
                                start = u64_stats_fetch_begin_bh(&p->syncp);
                                rxpackets       = p->rx_packets;
                                rxbytes         = p->rx_bytes;
                                rxmulticast     = p->rx_multicast;
+                               txpackets       = p->tx_packets;
+                               txbytes         = p->tx_bytes;
                        } while (u64_stats_fetch_retry_bh(&p->syncp, start));
-                       accum.rx_packets += rxpackets;
-                       accum.rx_bytes   += rxbytes;
-                       accum.rx_multicast += rxmulticast;
-                       /* rx_errors is ulong, not protected by syncp */
-                       accum.rx_errors  += p->rx_errors;
+
+                       stats->rx_packets       += rxpackets;
+                       stats->rx_bytes         += rxbytes;
+                       stats->multicast        += rxmulticast;
+                       stats->tx_packets       += txpackets;
+                       stats->tx_bytes         += txbytes;
+                       /* rx_errors & tx_dropped are u32 */
+                       rx_errors       += p->rx_errors;
+                       tx_dropped      += p->tx_dropped;
                }
-               stats->rx_packets = accum.rx_packets;
-               stats->rx_bytes   = accum.rx_bytes;
-               stats->rx_errors  = accum.rx_errors;
-               stats->multicast  = accum.rx_multicast;
+               stats->rx_errors  = rx_errors;
+               stats->tx_dropped = tx_dropped;
        }
        return stats;
 }
@@ -908,80 +861,6 @@ static const struct net_device_ops vlan_netdev_ops = {
 #endif
 };
 
-static const struct net_device_ops vlan_netdev_accel_ops = {
-       .ndo_change_mtu         = vlan_dev_change_mtu,
-       .ndo_init               = vlan_dev_init,
-       .ndo_uninit             = vlan_dev_uninit,
-       .ndo_open               = vlan_dev_open,
-       .ndo_stop               = vlan_dev_stop,
-       .ndo_start_xmit =  vlan_dev_hwaccel_hard_start_xmit,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = vlan_dev_set_mac_address,
-       .ndo_set_rx_mode        = vlan_dev_set_rx_mode,
-       .ndo_set_multicast_list = vlan_dev_set_rx_mode,
-       .ndo_change_rx_flags    = vlan_dev_change_rx_flags,
-       .ndo_do_ioctl           = vlan_dev_ioctl,
-       .ndo_neigh_setup        = vlan_dev_neigh_setup,
-       .ndo_get_stats64        = vlan_dev_get_stats64,
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-       .ndo_fcoe_ddp_setup     = vlan_dev_fcoe_ddp_setup,
-       .ndo_fcoe_ddp_done      = vlan_dev_fcoe_ddp_done,
-       .ndo_fcoe_enable        = vlan_dev_fcoe_enable,
-       .ndo_fcoe_disable       = vlan_dev_fcoe_disable,
-       .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
-#endif
-};
-
-static const struct net_device_ops vlan_netdev_ops_sq = {
-       .ndo_select_queue       = vlan_dev_select_queue,
-       .ndo_change_mtu         = vlan_dev_change_mtu,
-       .ndo_init               = vlan_dev_init,
-       .ndo_uninit             = vlan_dev_uninit,
-       .ndo_open               = vlan_dev_open,
-       .ndo_stop               = vlan_dev_stop,
-       .ndo_start_xmit =  vlan_dev_hard_start_xmit,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = vlan_dev_set_mac_address,
-       .ndo_set_rx_mode        = vlan_dev_set_rx_mode,
-       .ndo_set_multicast_list = vlan_dev_set_rx_mode,
-       .ndo_change_rx_flags    = vlan_dev_change_rx_flags,
-       .ndo_do_ioctl           = vlan_dev_ioctl,
-       .ndo_neigh_setup        = vlan_dev_neigh_setup,
-       .ndo_get_stats64        = vlan_dev_get_stats64,
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-       .ndo_fcoe_ddp_setup     = vlan_dev_fcoe_ddp_setup,
-       .ndo_fcoe_ddp_done      = vlan_dev_fcoe_ddp_done,
-       .ndo_fcoe_enable        = vlan_dev_fcoe_enable,
-       .ndo_fcoe_disable       = vlan_dev_fcoe_disable,
-       .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
-#endif
-};
-
-static const struct net_device_ops vlan_netdev_accel_ops_sq = {
-       .ndo_select_queue       = vlan_dev_select_queue,
-       .ndo_change_mtu         = vlan_dev_change_mtu,
-       .ndo_init               = vlan_dev_init,
-       .ndo_uninit             = vlan_dev_uninit,
-       .ndo_open               = vlan_dev_open,
-       .ndo_stop               = vlan_dev_stop,
-       .ndo_start_xmit =  vlan_dev_hwaccel_hard_start_xmit,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = vlan_dev_set_mac_address,
-       .ndo_set_rx_mode        = vlan_dev_set_rx_mode,
-       .ndo_set_multicast_list = vlan_dev_set_rx_mode,
-       .ndo_change_rx_flags    = vlan_dev_change_rx_flags,
-       .ndo_do_ioctl           = vlan_dev_ioctl,
-       .ndo_neigh_setup        = vlan_dev_neigh_setup,
-       .ndo_get_stats64        = vlan_dev_get_stats64,
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-       .ndo_fcoe_ddp_setup     = vlan_dev_fcoe_ddp_setup,
-       .ndo_fcoe_ddp_done      = vlan_dev_fcoe_ddp_done,
-       .ndo_fcoe_enable        = vlan_dev_fcoe_enable,
-       .ndo_fcoe_disable       = vlan_dev_fcoe_disable,
-       .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
-#endif
-};
-
 void vlan_setup(struct net_device *dev)
 {
        ether_setup(dev);
index ddc105734af7ae5664d3199ba8f4467f53a84175..be9a5c19a775aa64751b93795ccd5df1a453297a 100644 (file)
@@ -101,25 +101,6 @@ static int vlan_changelink(struct net_device *dev,
        return 0;
 }
 
-static int vlan_get_tx_queues(struct net *net,
-                             struct nlattr *tb[],
-                             unsigned int *num_tx_queues,
-                             unsigned int *real_num_tx_queues)
-{
-       struct net_device *real_dev;
-
-       if (!tb[IFLA_LINK])
-               return -EINVAL;
-
-       real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
-       if (!real_dev)
-               return -ENODEV;
-
-       *num_tx_queues      = real_dev->num_tx_queues;
-       *real_num_tx_queues = real_dev->real_num_tx_queues;
-       return 0;
-}
-
 static int vlan_newlink(struct net *src_net, struct net_device *dev,
                        struct nlattr *tb[], struct nlattr *data[])
 {
@@ -237,7 +218,6 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = {
        .maxtype        = IFLA_VLAN_MAX,
        .policy         = vlan_policy,
        .priv_size      = sizeof(struct vlan_dev_info),
-       .get_tx_queues  = vlan_get_tx_queues,
        .setup          = vlan_setup,
        .validate       = vlan_validate,
        .newlink        = vlan_newlink,
index 80e280f56686989796264db386f69057a65dc165..d1314cf18adf53dfeaf81e1f97ff7e853ca42b93 100644 (file)
@@ -280,7 +280,6 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
        const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
        struct rtnl_link_stats64 temp;
        const struct rtnl_link_stats64 *stats;
-       static const char fmt[] = "%30s %12lu\n";
        static const char fmt64[] = "%30s %12llu\n";
        int i;
 
@@ -299,10 +298,6 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
        seq_puts(seq, "\n");
        seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
        seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
-       seq_printf(seq, fmt, "total headroom inc",
-                  dev_info->cnt_inc_headroom_on_tx);
-       seq_printf(seq, fmt, "total encap on xmit",
-                  dev_info->cnt_encap_on_xmit);
        seq_printf(seq, "Device: %s", dev_info->real_dev->name);
        /* now show all PRIORITY mappings relating to this VLAN */
        seq_printf(seq, "\nINGRESS priority mappings: "
index ad2b232a2055fbc241828832078087c62e4c4315..fce2eae8d47697ba4e4a6ce1deea8e3359112539 100644 (file)
@@ -97,7 +97,7 @@ static LIST_HEAD(br2684_devs);
 
 static inline struct br2684_dev *BRPRIV(const struct net_device *net_dev)
 {
-       return (struct br2684_dev *)netdev_priv(net_dev);
+       return netdev_priv(net_dev);
 }
 
 static inline struct net_device *list_entry_brdev(const struct list_head *le)
index ff956d1115bcee4636a68011a38457d87bea3cdd..d257da50fcfb92417cad07bfb60e0377b2f2f933 100644 (file)
@@ -502,7 +502,8 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
        struct atmarp_entry *entry;
        int error;
        struct clip_vcc *clip_vcc;
-       struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip, .tos = 1}} };
+       struct flowi fl = { .fl4_dst = ip,
+                           .fl4_tos = 1 };
        struct rtable *rt;
 
        if (vcc->push != clip_push) {
index 181d70c73d708bd730f49e4b61f88ac72e373a20..179e04bc99dd8ee00cd8bb9e1b10efa01bb48de4 100644 (file)
@@ -816,8 +816,7 @@ static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
        if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
                return -EINVAL;
        vcc->proto_data = dev_lec[arg];
-       return lec_mcast_make((struct lec_priv *)netdev_priv(dev_lec[arg]),
-                               vcc);
+       return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
 }
 
 /* Initialize device. */
index c8436fa31344000ffebe460a9f496b7cfc339ce8..84bbb82599b2582adcdd0a8ee352a69a1e67c8ec 100644 (file)
@@ -22,8 +22,6 @@
 
 #include "br_private.h"
 
-int (*br_should_route_hook)(struct sk_buff *skb);
-
 static const struct stp_proto br_stp_proto = {
        .rcv    = br_stp_rcv,
 };
@@ -102,8 +100,6 @@ static void __exit br_deinit(void)
        br_fdb_fini();
 }
 
-EXPORT_SYMBOL(br_should_route_hook);
-
 module_init(br_init)
 module_exit(br_deinit)
 MODULE_LICENSE("GPL");
index 90512ccfd3e973c19adade047de7eda77ffd2963..2872393b2939556d1492f195dd6e5247deafcb75 100644 (file)
@@ -238,15 +238,18 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
 int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
 {
        struct net_bridge_fdb_entry *fdb;
+       struct net_bridge_port *port;
        int ret;
 
-       if (!br_port_exists(dev))
-               return 0;
-
        rcu_read_lock();
-       fdb = __br_fdb_get(br_port_get_rcu(dev)->br, addr);
-       ret = fdb && fdb->dst->dev != dev &&
-               fdb->dst->state == BR_STATE_FORWARDING;
+       port = br_port_get_rcu(dev);
+       if (!port)
+               ret = 0;
+       else {
+               fdb = __br_fdb_get(port->br, addr);
+               ret = fdb && fdb->dst->dev != dev &&
+                       fdb->dst->state == BR_STATE_FORWARDING;
+       }
        rcu_read_unlock();
 
        return ret;
index cbfe87f0f34ae7d81b7e10396018c99113b7a16d..2bd11ec6d1664d29bce00a55e88d4e527d1a03e3 100644 (file)
@@ -223,7 +223,7 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
        struct net_bridge_port_group *p;
        struct hlist_node *rp;
 
-       rp = rcu_dereference(br->router_list.first);
+       rp = rcu_dereference(hlist_first_rcu(&br->router_list));
        p = mdst ? rcu_dereference(mdst->ports) : NULL;
        while (p || rp) {
                struct net_bridge_port *port, *lport, *rport;
@@ -242,7 +242,7 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
                if ((unsigned long)lport >= (unsigned long)port)
                        p = rcu_dereference(p->next);
                if ((unsigned long)rport >= (unsigned long)port)
-                       rp = rcu_dereference(rp->next);
+                       rp = rcu_dereference(hlist_next_rcu(rp));
        }
 
        if (!prev)
index 89ad25a76202924eadf9bce8aadf761dcea3e8d5..d9d1e2bac1d6efc826ed3ef826a91cd06bba78ca 100644 (file)
@@ -475,11 +475,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
 {
        struct net_bridge_port *p;
 
-       if (!br_port_exists(dev))
-               return -EINVAL;
-
-       p = br_port_get(dev);
-       if (p->br != br)
+       p = br_port_get_rtnl(dev);
+       if (!p || p->br != br)
                return -EINVAL;
 
        del_nbp(p);
index 25207a1f182be33d9b588a3d5fdbc1273c269409..6f6d8e1b776f704ee45e93d4d827f320f8becf62 100644 (file)
 /* Bridge group multicast address 802.1d (pg 51). */
 const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
 
+/* Hook for brouter */
+br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
+EXPORT_SYMBOL(br_should_route_hook);
+
 static int br_pass_frame_up(struct sk_buff *skb)
 {
        struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
@@ -139,7 +143,7 @@ struct sk_buff *br_handle_frame(struct sk_buff *skb)
 {
        struct net_bridge_port *p;
        const unsigned char *dest = eth_hdr(skb)->h_dest;
-       int (*rhook)(struct sk_buff *skb);
+       br_should_route_hook_t *rhook;
 
        if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
                return skb;
@@ -173,8 +177,8 @@ forward:
        switch (p->state) {
        case BR_STATE_FORWARDING:
                rhook = rcu_dereference(br_should_route_hook);
-               if (rhook != NULL) {
-                       if (rhook(skb))
+               if (rhook) {
+                       if ((*rhook)(skb))
                                return skb;
                        dest = eth_hdr(skb)->h_dest;
                }
index eb5b256ffc8801ff7e187c64ee3dde5da268f570..326e599f83fb2a8fa80b55a46819e708f7040a14 100644 (file)
@@ -33,6 +33,9 @@
 
 #include "br_private.h"
 
+#define mlock_dereference(X, br) \
+       rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
+
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 static inline int ipv6_is_local_multicast(const struct in6_addr *addr)
 {
@@ -135,7 +138,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(
 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
                                        struct sk_buff *skb)
 {
-       struct net_bridge_mdb_htable *mdb = br->mdb;
+       struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
        struct br_ip ip;
 
        if (br->multicast_disabled)
@@ -235,7 +238,8 @@ static void br_multicast_group_expired(unsigned long data)
        if (mp->ports)
                goto out;
 
-       mdb = br->mdb;
+       mdb = mlock_dereference(br->mdb, br);
+
        hlist_del_rcu(&mp->hlist[mdb->ver]);
        mdb->size--;
 
@@ -249,16 +253,20 @@ out:
 static void br_multicast_del_pg(struct net_bridge *br,
                                struct net_bridge_port_group *pg)
 {
-       struct net_bridge_mdb_htable *mdb = br->mdb;
+       struct net_bridge_mdb_htable *mdb;
        struct net_bridge_mdb_entry *mp;
        struct net_bridge_port_group *p;
-       struct net_bridge_port_group **pp;
+       struct net_bridge_port_group __rcu **pp;
+
+       mdb = mlock_dereference(br->mdb, br);
 
        mp = br_mdb_ip_get(mdb, &pg->addr);
        if (WARN_ON(!mp))
                return;
 
-       for (pp = &mp->ports; (p = *pp); pp = &p->next) {
+       for (pp = &mp->ports;
+            (p = mlock_dereference(*pp, br)) != NULL;
+            pp = &p->next) {
                if (p != pg)
                        continue;
 
@@ -294,10 +302,10 @@ out:
        spin_unlock(&br->multicast_lock);
 }
 
-static int br_mdb_rehash(struct net_bridge_mdb_htable **mdbp, int max,
+static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
                         int elasticity)
 {
-       struct net_bridge_mdb_htable *old = *mdbp;
+       struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
        struct net_bridge_mdb_htable *mdb;
        int err;
 
@@ -569,7 +577,7 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
        struct net_bridge *br, struct net_bridge_port *port,
        struct br_ip *group, int hash)
 {
-       struct net_bridge_mdb_htable *mdb = br->mdb;
+       struct net_bridge_mdb_htable *mdb;
        struct net_bridge_mdb_entry *mp;
        struct hlist_node *p;
        unsigned count = 0;
@@ -577,6 +585,7 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
        int elasticity;
        int err;
 
+       mdb = rcu_dereference_protected(br->mdb, 1);
        hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
                count++;
                if (unlikely(br_ip_equal(group, &mp->addr)))
@@ -642,10 +651,11 @@ static struct net_bridge_mdb_entry *br_multicast_new_group(
        struct net_bridge *br, struct net_bridge_port *port,
        struct br_ip *group)
 {
-       struct net_bridge_mdb_htable *mdb = br->mdb;
+       struct net_bridge_mdb_htable *mdb;
        struct net_bridge_mdb_entry *mp;
        int hash;
 
+       mdb = rcu_dereference_protected(br->mdb, 1);
        if (!mdb) {
                if (br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0))
                        return NULL;
@@ -660,7 +670,7 @@ static struct net_bridge_mdb_entry *br_multicast_new_group(
 
        case -EAGAIN:
 rehash:
-               mdb = br->mdb;
+               mdb = rcu_dereference_protected(br->mdb, 1);
                hash = br_ip_hash(mdb, group);
                break;
 
@@ -692,7 +702,7 @@ static int br_multicast_add_group(struct net_bridge *br,
 {
        struct net_bridge_mdb_entry *mp;
        struct net_bridge_port_group *p;
-       struct net_bridge_port_group **pp;
+       struct net_bridge_port_group __rcu **pp;
        unsigned long now = jiffies;
        int err;
 
@@ -712,7 +722,9 @@ static int br_multicast_add_group(struct net_bridge *br,
                goto out;
        }
 
-       for (pp = &mp->ports; (p = *pp); pp = &p->next) {
+       for (pp = &mp->ports;
+            (p = mlock_dereference(*pp, br)) != NULL;
+            pp = &p->next) {
                if (p->port == port)
                        goto found;
                if ((unsigned long)p->port < (unsigned long)port)
@@ -1106,7 +1118,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
        struct net_bridge_mdb_entry *mp;
        struct igmpv3_query *ih3;
        struct net_bridge_port_group *p;
-       struct net_bridge_port_group **pp;
+       struct net_bridge_port_group __rcu **pp;
        unsigned long max_delay;
        unsigned long now = jiffies;
        __be32 group;
@@ -1145,7 +1157,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
        if (!group)
                goto out;
 
-       mp = br_mdb_ip4_get(br->mdb, group);
+       mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group);
        if (!mp)
                goto out;
 
@@ -1157,7 +1169,9 @@ static int br_ip4_multicast_query(struct net_bridge *br,
             try_to_del_timer_sync(&mp->timer) >= 0))
                mod_timer(&mp->timer, now + max_delay);
 
-       for (pp = &mp->ports; (p = *pp); pp = &p->next) {
+       for (pp = &mp->ports;
+            (p = mlock_dereference(*pp, br)) != NULL;
+            pp = &p->next) {
                if (timer_pending(&p->timer) ?
                    time_after(p->timer.expires, now + max_delay) :
                    try_to_del_timer_sync(&p->timer) >= 0)
@@ -1178,7 +1192,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
        struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
        struct net_bridge_mdb_entry *mp;
        struct mld2_query *mld2q;
-       struct net_bridge_port_group *p, **pp;
+       struct net_bridge_port_group *p;
+       struct net_bridge_port_group __rcu **pp;
        unsigned long max_delay;
        unsigned long now = jiffies;
        struct in6_addr *group = NULL;
@@ -1214,7 +1229,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
        if (!group)
                goto out;
 
-       mp = br_mdb_ip6_get(br->mdb, group);
+       mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group);
        if (!mp)
                goto out;
 
@@ -1225,7 +1240,9 @@ static int br_ip6_multicast_query(struct net_bridge *br,
             try_to_del_timer_sync(&mp->timer) >= 0))
                mod_timer(&mp->timer, now + max_delay);
 
-       for (pp = &mp->ports; (p = *pp); pp = &p->next) {
+       for (pp = &mp->ports;
+            (p = mlock_dereference(*pp, br)) != NULL;
+            pp = &p->next) {
                if (timer_pending(&p->timer) ?
                    time_after(p->timer.expires, now + max_delay) :
                    try_to_del_timer_sync(&p->timer) >= 0)
@@ -1254,7 +1271,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
            timer_pending(&br->multicast_querier_timer))
                goto out;
 
-       mdb = br->mdb;
+       mdb = mlock_dereference(br->mdb, br);
        mp = br_mdb_ip_get(mdb, group);
        if (!mp)
                goto out;
@@ -1277,7 +1294,9 @@ static void br_multicast_leave_group(struct net_bridge *br,
                goto out;
        }
 
-       for (p = mp->ports; p; p = p->next) {
+       for (p = mlock_dereference(mp->ports, br);
+            p != NULL;
+            p = mlock_dereference(p->next, br)) {
                if (p->port != port)
                        continue;
 
@@ -1625,7 +1644,7 @@ void br_multicast_stop(struct net_bridge *br)
        del_timer_sync(&br->multicast_query_timer);
 
        spin_lock_bh(&br->multicast_lock);
-       mdb = br->mdb;
+       mdb = mlock_dereference(br->mdb, br);
        if (!mdb)
                goto out;
 
@@ -1729,6 +1748,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
 {
        struct net_bridge_port *port;
        int err = 0;
+       struct net_bridge_mdb_htable *mdb;
 
        spin_lock(&br->multicast_lock);
        if (br->multicast_disabled == !val)
@@ -1741,15 +1761,16 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
        if (!netif_running(br->dev))
                goto unlock;
 
-       if (br->mdb) {
-               if (br->mdb->old) {
+       mdb = mlock_dereference(br->mdb, br);
+       if (mdb) {
+               if (mdb->old) {
                        err = -EEXIST;
 rollback:
                        br->multicast_disabled = !!val;
                        goto unlock;
                }
 
-               err = br_mdb_rehash(&br->mdb, br->mdb->max,
+               err = br_mdb_rehash(&br->mdb, mdb->max,
                                    br->hash_elasticity);
                if (err)
                        goto rollback;
@@ -1774,6 +1795,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
 {
        int err = -ENOENT;
        u32 old;
+       struct net_bridge_mdb_htable *mdb;
 
        spin_lock(&br->multicast_lock);
        if (!netif_running(br->dev))
@@ -1782,7 +1804,9 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
        err = -EINVAL;
        if (!is_power_of_2(val))
                goto unlock;
-       if (br->mdb && val < br->mdb->size)
+
+       mdb = mlock_dereference(br->mdb, br);
+       if (mdb && val < mdb->size)
                goto unlock;
 
        err = 0;
@@ -1790,8 +1814,8 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
        old = br->hash_max;
        br->hash_max = val;
 
-       if (br->mdb) {
-               if (br->mdb->old) {
+       if (mdb) {
+               if (mdb->old) {
                        err = -EEXIST;
 rollback:
                        br->hash_max = old;
index 865fd7634b673d4233c8a6758e2426db55696514..6e13920939113b07e7ae1bbed452323e261865d2 100644 (file)
@@ -131,17 +131,18 @@ void br_netfilter_rtable_init(struct net_bridge *br)
 
 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
 {
-       if (!br_port_exists(dev))
-               return NULL;
-       return &br_port_get_rcu(dev)->br->fake_rtable;
+       struct net_bridge_port *port;
+
+       port = br_port_get_rcu(dev);
+       return port ? &port->br->fake_rtable : NULL;
 }
 
 static inline struct net_device *bridge_parent(const struct net_device *dev)
 {
-       if (!br_port_exists(dev))
-               return NULL;
+       struct net_bridge_port *port;
 
-       return br_port_get_rcu(dev)->br->dev;
+       port = br_port_get_rcu(dev);
+       return port ? port->br->dev : NULL;
 }
 
 static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
@@ -412,13 +413,8 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
        if (dnat_took_place(skb)) {
                if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
                        struct flowi fl = {
-                               .nl_u = {
-                                       .ip4_u = {
-                                                .daddr = iph->daddr,
-                                                .saddr = 0,
-                                                .tos = RT_TOS(iph->tos) },
-                               },
-                               .proto = 0,
+                               .fl4_dst = iph->daddr,
+                               .fl4_tos = RT_TOS(iph->tos),
                        };
                        struct in_device *in_dev = __in_dev_get_rcu(dev);
 
index 4a6a378c84e357d06f45ae70fad7cd808c4e8bc8..f8bf4c7f842c1714850ef21926035d44ee740bd4 100644 (file)
@@ -119,11 +119,13 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
 
        idx = 0;
        for_each_netdev(net, dev) {
+               struct net_bridge_port *port = br_port_get_rtnl(dev);
+
                /* not a bridge port */
-               if (!br_port_exists(dev) || idx < cb->args[0])
+               if (!port || idx < cb->args[0])
                        goto skip;
 
-               if (br_fill_ifinfo(skb, br_port_get(dev),
+               if (br_fill_ifinfo(skb, port,
                                   NETLINK_CB(cb->skb).pid,
                                   cb->nlh->nlmsg_seq, RTM_NEWLINK,
                                   NLM_F_MULTI) < 0)
@@ -169,9 +171,9 @@ static int br_rtm_setlink(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)
        if (!dev)
                return -ENODEV;
 
-       if (!br_port_exists(dev))
+       p = br_port_get_rtnl(dev);
+       if (!p)
                return -EINVAL;
-       p = br_port_get(dev);
 
        /* if kernel STP is running, don't allow changes */
        if (p->br->stp_enabled == BR_KERNEL_STP)
index 404d4e14c6a7702293521a668914765c3ec59daa..7d337c9b6082a5f3d23efa47823db5b2d99f7e5a 100644 (file)
@@ -32,15 +32,15 @@ struct notifier_block br_device_notifier = {
 static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
 {
        struct net_device *dev = ptr;
-       struct net_bridge_port *p = br_port_get(dev);
+       struct net_bridge_port *p;
        struct net_bridge *br;
        int err;
 
        /* not a port of a bridge */
-       if (!br_port_exists(dev))
+       p = br_port_get_rtnl(dev);
+       if (!p)
                return NOTIFY_DONE;
 
-       p = br_port_get(dev);
        br = p->br;
 
        switch (event) {
index 75c90edaf7db097432dd959069ed043a89d03262..84aac7734bfc7e2c219680dc48d8c3ba75ca6924 100644 (file)
@@ -72,7 +72,7 @@ struct net_bridge_fdb_entry
 
 struct net_bridge_port_group {
        struct net_bridge_port          *port;
-       struct net_bridge_port_group    *next;
+       struct net_bridge_port_group __rcu *next;
        struct hlist_node               mglist;
        struct rcu_head                 rcu;
        struct timer_list               timer;
@@ -86,7 +86,7 @@ struct net_bridge_mdb_entry
        struct hlist_node               hlist[2];
        struct hlist_node               mglist;
        struct net_bridge               *br;
-       struct net_bridge_port_group    *ports;
+       struct net_bridge_port_group __rcu *ports;
        struct rcu_head                 rcu;
        struct timer_list               timer;
        struct timer_list               query_timer;
@@ -151,11 +151,20 @@ struct net_bridge_port
 #endif
 };
 
-#define br_port_get_rcu(dev) \
-       ((struct net_bridge_port *) rcu_dereference(dev->rx_handler_data))
-#define br_port_get(dev) ((struct net_bridge_port *) dev->rx_handler_data)
 #define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
 
+static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
+{
+       struct net_bridge_port *port = rcu_dereference(dev->rx_handler_data);
+       return br_port_exists(dev) ? port : NULL;
+}
+
+static inline struct net_bridge_port *br_port_get_rtnl(struct net_device *dev)
+{
+       return br_port_exists(dev) ?
+               rtnl_dereference(dev->rx_handler_data) : NULL;
+}
+
 struct br_cpu_netstats {
        u64                     rx_packets;
        u64                     rx_bytes;
@@ -227,7 +236,7 @@ struct net_bridge
        unsigned long                   multicast_startup_query_interval;
 
        spinlock_t                      multicast_lock;
-       struct net_bridge_mdb_htable    *mdb;
+       struct net_bridge_mdb_htable __rcu *mdb;
        struct hlist_head               router_list;
        struct hlist_head               mglist;
 
index 35cf27087b561d6e9955fd75b4b03213a6e9e8d8..3d9a55d3822f0605eb333b22101833723cdd61d7 100644 (file)
@@ -141,10 +141,6 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
        struct net_bridge *br;
        const unsigned char *buf;
 
-       if (!br_port_exists(dev))
-               goto err;
-       p = br_port_get_rcu(dev);
-
        if (!pskb_may_pull(skb, 4))
                goto err;
 
@@ -153,6 +149,10 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
        if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
                goto err;
 
+       p = br_port_get_rcu(dev);
+       if (!p)
+               goto err;
+
        br = p->br;
        spin_lock(&br->lock);
 
index ae3f106c39081c445a65dcd2fd07eff90c8403c8..1bcaf36ad612739f54e411984218f79ec645e808 100644 (file)
@@ -87,7 +87,8 @@ static int __init ebtable_broute_init(void)
        if (ret < 0)
                return ret;
        /* see br_input.c */
-       rcu_assign_pointer(br_should_route_hook, ebt_broute);
+       rcu_assign_pointer(br_should_route_hook,
+                          (br_should_route_hook_t *)ebt_broute);
        return 0;
 }
 
index a1dcf83f0d5860743906f75bcae5a23477c9b098..cbc9f395ab1ed1119bf51f460c992b1b2e315e30 100644 (file)
@@ -128,6 +128,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
                 const struct net_device *in, const struct net_device *out)
 {
        const struct ethhdr *h = eth_hdr(skb);
+       const struct net_bridge_port *p;
        __be16 ethproto;
        int verdict, i;
 
@@ -148,13 +149,11 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
        if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
                return 1;
        /* rcu_read_lock()ed by nf_hook_slow */
-       if (in && br_port_exists(in) &&
-           FWINV2(ebt_dev_check(e->logical_in, br_port_get_rcu(in)->br->dev),
-                  EBT_ILOGICALIN))
+       if (in && (p = br_port_get_rcu(in)) != NULL &&
+           FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
                return 1;
-       if (out && br_port_exists(out) &&
-           FWINV2(ebt_dev_check(e->logical_out, br_port_get_rcu(out)->br->dev),
-                  EBT_ILOGICALOUT))
+       if (out && (p = br_port_get_rcu(out)) != NULL &&
+           FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
                return 1;
 
        if (e->bitmask & EBT_SOURCEMAC) {
index 0dd54a69dace255fcdf54732d982e8c521c574a5..381b8e280162f55b654fae40576fcaf86b8f3b56 100644 (file)
@@ -1794,16 +1794,18 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
        struct packet_type *ptype;
        __be16 type = skb->protocol;
+       int vlan_depth = ETH_HLEN;
        int err;
 
-       if (type == htons(ETH_P_8021Q)) {
-               struct vlan_ethhdr *veh;
+       while (type == htons(ETH_P_8021Q)) {
+               struct vlan_hdr *vh;
 
-               if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
+               if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
                        return ERR_PTR(-EINVAL);
 
-               veh = (struct vlan_ethhdr *)skb->data;
-               type = veh->h_vlan_encapsulated_proto;
+               vh = (struct vlan_hdr *)(skb->data + vlan_depth);
+               type = vh->h_vlan_encapsulated_proto;
+               vlan_depth += VLAN_HLEN;
        }
 
        skb_reset_mac_header(skb);
@@ -1817,8 +1819,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
                if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
                        dev->ethtool_ops->get_drvinfo(dev, &info);
 
-               WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
-                       "ip_summed=%d",
+               WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
                     info.driver, dev ? dev->features : 0L,
                     skb->sk ? skb->sk->sk_route_caps : 0L,
                     skb->len, skb->data_len, skb->ip_summed);
@@ -1967,6 +1968,23 @@ static inline void skb_orphan_try(struct sk_buff *skb)
        }
 }
 
+int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev)
+{
+       __be16 protocol = skb->protocol;
+
+       if (protocol == htons(ETH_P_8021Q)) {
+               struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+               protocol = veh->h_vlan_encapsulated_proto;
+       } else if (!skb->vlan_tci)
+               return dev->features;
+
+       if (protocol != htons(ETH_P_8021Q))
+               return dev->features & dev->vlan_features;
+       else
+               return 0;
+}
+EXPORT_SYMBOL(netif_get_vlan_features);
+
 /*
  * Returns true if either:
  *     1. skb has frag_list and the device doesn't support FRAGLIST, or
@@ -1977,15 +1995,20 @@ static inline void skb_orphan_try(struct sk_buff *skb)
 static inline int skb_needs_linearize(struct sk_buff *skb,
                                      struct net_device *dev)
 {
-       int features = dev->features;
+       if (skb_is_nonlinear(skb)) {
+               int features = dev->features;
 
-       if (skb->protocol == htons(ETH_P_8021Q) || vlan_tx_tag_present(skb))
-               features &= dev->vlan_features;
+               if (vlan_tx_tag_present(skb))
+                       features &= dev->vlan_features;
 
-       return skb_is_nonlinear(skb) &&
-              ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
-               (skb_shinfo(skb)->nr_frags && (!(features & NETIF_F_SG) ||
-                                             illegal_highdma(dev, skb))));
+               return (skb_has_frag_list(skb) &&
+                       !(features & NETIF_F_FRAGLIST)) ||
+                       (skb_shinfo(skb)->nr_frags &&
+                       (!(features & NETIF_F_SG) ||
+                       illegal_highdma(dev, skb)));
+       }
+
+       return 0;
 }
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -5029,12 +5052,8 @@ static int netif_alloc_rx_queues(struct net_device *dev)
        }
        dev->_rx = rx;
 
-       /*
-        * Set a pointer to first element in the array which holds the
-        * reference count.
-        */
        for (i = 0; i < count; i++)
-               rx[i].first = rx;
+               rx[i].dev = dev;
 #endif
        return 0;
 }
@@ -5110,14 +5129,6 @@ int register_netdevice(struct net_device *dev)
 
        dev->iflink = -1;
 
-       ret = netif_alloc_rx_queues(dev);
-       if (ret)
-               goto out;
-
-       ret = netif_alloc_netdev_queues(dev);
-       if (ret)
-               goto out;
-
        netdev_init_queues(dev);
 
        /* Init, if this function is available */
@@ -5577,10 +5588,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
 
        dev->num_tx_queues = queue_count;
        dev->real_num_tx_queues = queue_count;
+       if (netif_alloc_netdev_queues(dev))
+               goto free_pcpu;
 
 #ifdef CONFIG_RPS
        dev->num_rx_queues = queue_count;
        dev->real_num_rx_queues = queue_count;
+       if (netif_alloc_rx_queues(dev))
+               goto free_pcpu;
 #endif
 
        dev->gso_max_size = GSO_MAX_SIZE;
@@ -5597,6 +5612,11 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
 
 free_pcpu:
        free_percpu(dev->pcpu_refcnt);
+       kfree(dev->_tx);
+#ifdef CONFIG_RPS
+       kfree(dev->_rx);
+#endif
+
 free_p:
        kfree(p);
        return NULL;
@@ -5618,6 +5638,9 @@ void free_netdev(struct net_device *dev)
        release_net(dev_net(dev));
 
        kfree(dev->_tx);
+#ifdef CONFIG_RPS
+       kfree(dev->_rx);
+#endif
 
        kfree(rcu_dereference_raw(dev->ingress_queue));
 
index c1ee800bc080c3faf33033e1659021a0a356f7ce..a44d27f9f0f0aa4975d95b8e8c56bebb3f0fda6b 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/unaligned.h>
 #include <linux/filter.h>
+#include <linux/reciprocal_div.h>
+
+enum {
+       BPF_S_RET_K = 1,
+       BPF_S_RET_A,
+       BPF_S_ALU_ADD_K,
+       BPF_S_ALU_ADD_X,
+       BPF_S_ALU_SUB_K,
+       BPF_S_ALU_SUB_X,
+       BPF_S_ALU_MUL_K,
+       BPF_S_ALU_MUL_X,
+       BPF_S_ALU_DIV_X,
+       BPF_S_ALU_AND_K,
+       BPF_S_ALU_AND_X,
+       BPF_S_ALU_OR_K,
+       BPF_S_ALU_OR_X,
+       BPF_S_ALU_LSH_K,
+       BPF_S_ALU_LSH_X,
+       BPF_S_ALU_RSH_K,
+       BPF_S_ALU_RSH_X,
+       BPF_S_ALU_NEG,
+       BPF_S_LD_W_ABS,
+       BPF_S_LD_H_ABS,
+       BPF_S_LD_B_ABS,
+       BPF_S_LD_W_LEN,
+       BPF_S_LD_W_IND,
+       BPF_S_LD_H_IND,
+       BPF_S_LD_B_IND,
+       BPF_S_LD_IMM,
+       BPF_S_LDX_W_LEN,
+       BPF_S_LDX_B_MSH,
+       BPF_S_LDX_IMM,
+       BPF_S_MISC_TAX,
+       BPF_S_MISC_TXA,
+       BPF_S_ALU_DIV_K,
+       BPF_S_LD_MEM,
+       BPF_S_LDX_MEM,
+       BPF_S_ST,
+       BPF_S_STX,
+       BPF_S_JMP_JA,
+       BPF_S_JMP_JEQ_K,
+       BPF_S_JMP_JEQ_X,
+       BPF_S_JMP_JGE_K,
+       BPF_S_JMP_JGE_X,
+       BPF_S_JMP_JGT_K,
+       BPF_S_JMP_JGT_X,
+       BPF_S_JMP_JSET_K,
+       BPF_S_JMP_JSET_X,
+};
 
 /* No hurry in this branch */
 static void *__load_pointer(struct sk_buff *skb, int k)
@@ -89,7 +138,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
        rcu_read_lock_bh();
        filter = rcu_dereference_bh(sk->sk_filter);
        if (filter) {
-               unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len);
+               unsigned int pkt_len = sk_run_filter(skb, filter->insns);
 
                err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
        }
@@ -103,14 +152,15 @@ EXPORT_SYMBOL(sk_filter);
  *     sk_run_filter - run a filter on a socket
  *     @skb: buffer to run the filter on
  *     @filter: filter to apply
- *     @flen: length of filter
  *
  * Decode and apply filter instructions to the skb->data.
- * Return length to keep, 0 for none. skb is the data we are
- * filtering, filter is the array of filter instructions, and
- * len is the number of filter blocks in the array.
+ * Return length to keep, 0 for none. @skb is the data we are
+ * filtering, @filter is the array of filter instructions.
+ * Because all jumps are guaranteed to be before last instruction,
+ * and last instruction guaranteed to be a RET, we dont need to check
+ * flen. (We used to pass to this function the length of filter)
  */
-unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
+unsigned int sk_run_filter(struct sk_buff *skb, const struct sock_filter *fentry)
 {
        void *ptr;
        u32 A = 0;                      /* Accumulator */
@@ -119,34 +169,36 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
        unsigned long memvalid = 0;
        u32 tmp;
        int k;
-       int pc;
 
        BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
        /*
         * Process array of filter instructions.
         */
-       for (pc = 0; pc < flen; pc++) {
-               const struct sock_filter *fentry = &filter[pc];
-               u32 f_k = fentry->k;
+       for (;; fentry++) {
+#if defined(CONFIG_X86_32)
+#define        K (fentry->k)
+#else
+               const u32 K = fentry->k;
+#endif
 
                switch (fentry->code) {
                case BPF_S_ALU_ADD_X:
                        A += X;
                        continue;
                case BPF_S_ALU_ADD_K:
-                       A += f_k;
+                       A += K;
                        continue;
                case BPF_S_ALU_SUB_X:
                        A -= X;
                        continue;
                case BPF_S_ALU_SUB_K:
-                       A -= f_k;
+                       A -= K;
                        continue;
                case BPF_S_ALU_MUL_X:
                        A *= X;
                        continue;
                case BPF_S_ALU_MUL_K:
-                       A *= f_k;
+                       A *= K;
                        continue;
                case BPF_S_ALU_DIV_X:
                        if (X == 0)
@@ -154,64 +206,64 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
                        A /= X;
                        continue;
                case BPF_S_ALU_DIV_K:
-                       A /= f_k;
+                       A = reciprocal_divide(A, K);
                        continue;
                case BPF_S_ALU_AND_X:
                        A &= X;
                        continue;
                case BPF_S_ALU_AND_K:
-                       A &= f_k;
+                       A &= K;
                        continue;
                case BPF_S_ALU_OR_X:
                        A |= X;
                        continue;
                case BPF_S_ALU_OR_K:
-                       A |= f_k;
+                       A |= K;
                        continue;
                case BPF_S_ALU_LSH_X:
                        A <<= X;
                        continue;
                case BPF_S_ALU_LSH_K:
-                       A <<= f_k;
+                       A <<= K;
                        continue;
                case BPF_S_ALU_RSH_X:
                        A >>= X;
                        continue;
                case BPF_S_ALU_RSH_K:
-                       A >>= f_k;
+                       A >>= K;
                        continue;
                case BPF_S_ALU_NEG:
                        A = -A;
                        continue;
                case BPF_S_JMP_JA:
-                       pc += f_k;
+                       fentry += K;
                        continue;
                case BPF_S_JMP_JGT_K:
-                       pc += (A > f_k) ? fentry->jt : fentry->jf;
+                       fentry += (A > K) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_JMP_JGE_K:
-                       pc += (A >= f_k) ? fentry->jt : fentry->jf;
+                       fentry += (A >= K) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_JMP_JEQ_K:
-                       pc += (A == f_k) ? fentry->jt : fentry->jf;
+                       fentry += (A == K) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_JMP_JSET_K:
-                       pc += (A & f_k) ? fentry->jt : fentry->jf;
+                       fentry += (A & K) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_JMP_JGT_X:
-                       pc += (A > X) ? fentry->jt : fentry->jf;
+                       fentry += (A > X) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_JMP_JGE_X:
-                       pc += (A >= X) ? fentry->jt : fentry->jf;
+                       fentry += (A >= X) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_JMP_JEQ_X:
-                       pc += (A == X) ? fentry->jt : fentry->jf;
+                       fentry += (A == X) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_JMP_JSET_X:
-                       pc += (A & X) ? fentry->jt : fentry->jf;
+                       fentry += (A & X) ? fentry->jt : fentry->jf;
                        continue;
                case BPF_S_LD_W_ABS:
-                       k = f_k;
+                       k = K;
 load_w:
                        ptr = load_pointer(skb, k, 4, &tmp);
                        if (ptr != NULL) {
@@ -220,7 +272,7 @@ load_w:
                        }
                        break;
                case BPF_S_LD_H_ABS:
-                       k = f_k;
+                       k = K;
 load_h:
                        ptr = load_pointer(skb, k, 2, &tmp);
                        if (ptr != NULL) {
@@ -229,7 +281,7 @@ load_h:
                        }
                        break;
                case BPF_S_LD_B_ABS:
-                       k = f_k;
+                       k = K;
 load_b:
                        ptr = load_pointer(skb, k, 1, &tmp);
                        if (ptr != NULL) {
@@ -244,34 +296,34 @@ load_b:
                        X = skb->len;
                        continue;
                case BPF_S_LD_W_IND:
-                       k = X + f_k;
+                       k = X + K;
                        goto load_w;
                case BPF_S_LD_H_IND:
-                       k = X + f_k;
+                       k = X + K;
                        goto load_h;
                case BPF_S_LD_B_IND:
-                       k = X + f_k;
+                       k = X + K;
                        goto load_b;
                case BPF_S_LDX_B_MSH:
-                       ptr = load_pointer(skb, f_k, 1, &tmp);
+                       ptr = load_pointer(skb, K, 1, &tmp);
                        if (ptr != NULL) {
                                X = (*(u8 *)ptr & 0xf) << 2;
                                continue;
                        }
                        return 0;
                case BPF_S_LD_IMM:
-                       A = f_k;
+                       A = K;
                        continue;
                case BPF_S_LDX_IMM:
-                       X = f_k;
+                       X = K;
                        continue;
                case BPF_S_LD_MEM:
-                       A = (memvalid & (1UL << f_k)) ?
-                               mem[f_k] : 0;
+                       A = (memvalid & (1UL << K)) ?
+                               mem[K] : 0;
                        continue;
                case BPF_S_LDX_MEM:
-                       X = (memvalid & (1UL << f_k)) ?
-                               mem[f_k] : 0;
+                       X = (memvalid & (1UL << K)) ?
+                               mem[K] : 0;
                        continue;
                case BPF_S_MISC_TAX:
                        X = A;
@@ -280,16 +332,16 @@ load_b:
                        A = X;
                        continue;
                case BPF_S_RET_K:
-                       return f_k;
+                       return K;
                case BPF_S_RET_A:
                        return A;
                case BPF_S_ST:
-                       memvalid |= 1UL << f_k;
-                       mem[f_k] = A;
+                       memvalid |= 1UL << K;
+                       mem[K] = A;
                        continue;
                case BPF_S_STX:
-                       memvalid |= 1UL << f_k;
-                       mem[f_k] = X;
+                       memvalid |= 1UL << K;
+                       mem[K] = X;
                        continue;
                default:
                        WARN_ON(1);
@@ -383,7 +435,57 @@ EXPORT_SYMBOL(sk_run_filter);
  */
 int sk_chk_filter(struct sock_filter *filter, int flen)
 {
-       struct sock_filter *ftest;
+       /*
+        * Valid instructions are initialized to non-0.
+        * Invalid instructions are initialized to 0.
+        */
+       static const u8 codes[] = {
+               [BPF_ALU|BPF_ADD|BPF_K]  = BPF_S_ALU_ADD_K,
+               [BPF_ALU|BPF_ADD|BPF_X]  = BPF_S_ALU_ADD_X,
+               [BPF_ALU|BPF_SUB|BPF_K]  = BPF_S_ALU_SUB_K,
+               [BPF_ALU|BPF_SUB|BPF_X]  = BPF_S_ALU_SUB_X,
+               [BPF_ALU|BPF_MUL|BPF_K]  = BPF_S_ALU_MUL_K,
+               [BPF_ALU|BPF_MUL|BPF_X]  = BPF_S_ALU_MUL_X,
+               [BPF_ALU|BPF_DIV|BPF_X]  = BPF_S_ALU_DIV_X,
+               [BPF_ALU|BPF_AND|BPF_K]  = BPF_S_ALU_AND_K,
+               [BPF_ALU|BPF_AND|BPF_X]  = BPF_S_ALU_AND_X,
+               [BPF_ALU|BPF_OR|BPF_K]   = BPF_S_ALU_OR_K,
+               [BPF_ALU|BPF_OR|BPF_X]   = BPF_S_ALU_OR_X,
+               [BPF_ALU|BPF_LSH|BPF_K]  = BPF_S_ALU_LSH_K,
+               [BPF_ALU|BPF_LSH|BPF_X]  = BPF_S_ALU_LSH_X,
+               [BPF_ALU|BPF_RSH|BPF_K]  = BPF_S_ALU_RSH_K,
+               [BPF_ALU|BPF_RSH|BPF_X]  = BPF_S_ALU_RSH_X,
+               [BPF_ALU|BPF_NEG]        = BPF_S_ALU_NEG,
+               [BPF_LD|BPF_W|BPF_ABS]   = BPF_S_LD_W_ABS,
+               [BPF_LD|BPF_H|BPF_ABS]   = BPF_S_LD_H_ABS,
+               [BPF_LD|BPF_B|BPF_ABS]   = BPF_S_LD_B_ABS,
+               [BPF_LD|BPF_W|BPF_LEN]   = BPF_S_LD_W_LEN,
+               [BPF_LD|BPF_W|BPF_IND]   = BPF_S_LD_W_IND,
+               [BPF_LD|BPF_H|BPF_IND]   = BPF_S_LD_H_IND,
+               [BPF_LD|BPF_B|BPF_IND]   = BPF_S_LD_B_IND,
+               [BPF_LD|BPF_IMM]         = BPF_S_LD_IMM,
+               [BPF_LDX|BPF_W|BPF_LEN]  = BPF_S_LDX_W_LEN,
+               [BPF_LDX|BPF_B|BPF_MSH]  = BPF_S_LDX_B_MSH,
+               [BPF_LDX|BPF_IMM]        = BPF_S_LDX_IMM,
+               [BPF_MISC|BPF_TAX]       = BPF_S_MISC_TAX,
+               [BPF_MISC|BPF_TXA]       = BPF_S_MISC_TXA,
+               [BPF_RET|BPF_K]          = BPF_S_RET_K,
+               [BPF_RET|BPF_A]          = BPF_S_RET_A,
+               [BPF_ALU|BPF_DIV|BPF_K]  = BPF_S_ALU_DIV_K,
+               [BPF_LD|BPF_MEM]         = BPF_S_LD_MEM,
+               [BPF_LDX|BPF_MEM]        = BPF_S_LDX_MEM,
+               [BPF_ST]                 = BPF_S_ST,
+               [BPF_STX]                = BPF_S_STX,
+               [BPF_JMP|BPF_JA]         = BPF_S_JMP_JA,
+               [BPF_JMP|BPF_JEQ|BPF_K]  = BPF_S_JMP_JEQ_K,
+               [BPF_JMP|BPF_JEQ|BPF_X]  = BPF_S_JMP_JEQ_X,
+               [BPF_JMP|BPF_JGE|BPF_K]  = BPF_S_JMP_JGE_K,
+               [BPF_JMP|BPF_JGE|BPF_X]  = BPF_S_JMP_JGE_X,
+               [BPF_JMP|BPF_JGT|BPF_K]  = BPF_S_JMP_JGT_K,
+               [BPF_JMP|BPF_JGT|BPF_X]  = BPF_S_JMP_JGT_X,
+               [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
+               [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
+       };
        int pc;
 
        if (flen == 0 || flen > BPF_MAXINSNS)
@@ -391,136 +493,31 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
 
        /* check the filter code now */
        for (pc = 0; pc < flen; pc++) {
-               ftest = &filter[pc];
-
-               /* Only allow valid instructions */
-               switch (ftest->code) {
-               case BPF_ALU|BPF_ADD|BPF_K:
-                       ftest->code = BPF_S_ALU_ADD_K;
-                       break;
-               case BPF_ALU|BPF_ADD|BPF_X:
-                       ftest->code = BPF_S_ALU_ADD_X;
-                       break;
-               case BPF_ALU|BPF_SUB|BPF_K:
-                       ftest->code = BPF_S_ALU_SUB_K;
-                       break;
-               case BPF_ALU|BPF_SUB|BPF_X:
-                       ftest->code = BPF_S_ALU_SUB_X;
-                       break;
-               case BPF_ALU|BPF_MUL|BPF_K:
-                       ftest->code = BPF_S_ALU_MUL_K;
-                       break;
-               case BPF_ALU|BPF_MUL|BPF_X:
-                       ftest->code = BPF_S_ALU_MUL_X;
-                       break;
-               case BPF_ALU|BPF_DIV|BPF_X:
-                       ftest->code = BPF_S_ALU_DIV_X;
-                       break;
-               case BPF_ALU|BPF_AND|BPF_K:
-                       ftest->code = BPF_S_ALU_AND_K;
-                       break;
-               case BPF_ALU|BPF_AND|BPF_X:
-                       ftest->code = BPF_S_ALU_AND_X;
-                       break;
-               case BPF_ALU|BPF_OR|BPF_K:
-                       ftest->code = BPF_S_ALU_OR_K;
-                       break;
-               case BPF_ALU|BPF_OR|BPF_X:
-                       ftest->code = BPF_S_ALU_OR_X;
-                       break;
-               case BPF_ALU|BPF_LSH|BPF_K:
-                       ftest->code = BPF_S_ALU_LSH_K;
-                       break;
-               case BPF_ALU|BPF_LSH|BPF_X:
-                       ftest->code = BPF_S_ALU_LSH_X;
-                       break;
-               case BPF_ALU|BPF_RSH|BPF_K:
-                       ftest->code = BPF_S_ALU_RSH_K;
-                       break;
-               case BPF_ALU|BPF_RSH|BPF_X:
-                       ftest->code = BPF_S_ALU_RSH_X;
-                       break;
-               case BPF_ALU|BPF_NEG:
-                       ftest->code = BPF_S_ALU_NEG;
-                       break;
-               case BPF_LD|BPF_W|BPF_ABS:
-                       ftest->code = BPF_S_LD_W_ABS;
-                       break;
-               case BPF_LD|BPF_H|BPF_ABS:
-                       ftest->code = BPF_S_LD_H_ABS;
-                       break;
-               case BPF_LD|BPF_B|BPF_ABS:
-                       ftest->code = BPF_S_LD_B_ABS;
-                       break;
-               case BPF_LD|BPF_W|BPF_LEN:
-                       ftest->code = BPF_S_LD_W_LEN;
-                       break;
-               case BPF_LD|BPF_W|BPF_IND:
-                       ftest->code = BPF_S_LD_W_IND;
-                       break;
-               case BPF_LD|BPF_H|BPF_IND:
-                       ftest->code = BPF_S_LD_H_IND;
-                       break;
-               case BPF_LD|BPF_B|BPF_IND:
-                       ftest->code = BPF_S_LD_B_IND;
-                       break;
-               case BPF_LD|BPF_IMM:
-                       ftest->code = BPF_S_LD_IMM;
-                       break;
-               case BPF_LDX|BPF_W|BPF_LEN:
-                       ftest->code = BPF_S_LDX_W_LEN;
-                       break;
-               case BPF_LDX|BPF_B|BPF_MSH:
-                       ftest->code = BPF_S_LDX_B_MSH;
-                       break;
-               case BPF_LDX|BPF_IMM:
-                       ftest->code = BPF_S_LDX_IMM;
-                       break;
-               case BPF_MISC|BPF_TAX:
-                       ftest->code = BPF_S_MISC_TAX;
-                       break;
-               case BPF_MISC|BPF_TXA:
-                       ftest->code = BPF_S_MISC_TXA;
-                       break;
-               case BPF_RET|BPF_K:
-                       ftest->code = BPF_S_RET_K;
-                       break;
-               case BPF_RET|BPF_A:
-                       ftest->code = BPF_S_RET_A;
-                       break;
+               struct sock_filter *ftest = &filter[pc];
+               u16 code = ftest->code;
 
+               if (code >= ARRAY_SIZE(codes))
+                       return -EINVAL;
+               code = codes[code];
+               if (!code)
+                       return -EINVAL;
                /* Some instructions need special checks */
-
+               switch (code) {
+               case BPF_S_ALU_DIV_K:
                        /* check for division by zero */
-               case BPF_ALU|BPF_DIV|BPF_K:
                        if (ftest->k == 0)
                                return -EINVAL;
-                       ftest->code = BPF_S_ALU_DIV_K;
-                       break;
-
-               /* check for invalid memory addresses */
-               case BPF_LD|BPF_MEM:
-                       if (ftest->k >= BPF_MEMWORDS)
-                               return -EINVAL;
-                       ftest->code = BPF_S_LD_MEM;
-                       break;
-               case BPF_LDX|BPF_MEM:
-                       if (ftest->k >= BPF_MEMWORDS)
-                               return -EINVAL;
-                       ftest->code = BPF_S_LDX_MEM;
-                       break;
-               case BPF_ST:
-                       if (ftest->k >= BPF_MEMWORDS)
-                               return -EINVAL;
-                       ftest->code = BPF_S_ST;
+                       ftest->k = reciprocal_value(ftest->k);
                        break;
-               case BPF_STX:
+               case BPF_S_LD_MEM:
+               case BPF_S_LDX_MEM:
+               case BPF_S_ST:
+               case BPF_S_STX:
+                       /* check for invalid memory addresses */
                        if (ftest->k >= BPF_MEMWORDS)
                                return -EINVAL;
-                       ftest->code = BPF_S_STX;
                        break;
-
-               case BPF_JMP|BPF_JA:
+               case BPF_S_JMP_JA:
                        /*
                         * Note, the large ftest->k might cause loops.
                         * Compare this with conditional jumps below,
@@ -528,40 +525,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
                         */
                        if (ftest->k >= (unsigned)(flen-pc-1))
                                return -EINVAL;
-                       ftest->code = BPF_S_JMP_JA;
-                       break;
-
-               case BPF_JMP|BPF_JEQ|BPF_K:
-                       ftest->code = BPF_S_JMP_JEQ_K;
                        break;
-               case BPF_JMP|BPF_JEQ|BPF_X:
-                       ftest->code = BPF_S_JMP_JEQ_X;
-                       break;
-               case BPF_JMP|BPF_JGE|BPF_K:
-                       ftest->code = BPF_S_JMP_JGE_K;
-                       break;
-               case BPF_JMP|BPF_JGE|BPF_X:
-                       ftest->code = BPF_S_JMP_JGE_X;
-                       break;
-               case BPF_JMP|BPF_JGT|BPF_K:
-                       ftest->code = BPF_S_JMP_JGT_K;
-                       break;
-               case BPF_JMP|BPF_JGT|BPF_X:
-                       ftest->code = BPF_S_JMP_JGT_X;
-                       break;
-               case BPF_JMP|BPF_JSET|BPF_K:
-                       ftest->code = BPF_S_JMP_JSET_K;
-                       break;
-               case BPF_JMP|BPF_JSET|BPF_X:
-                       ftest->code = BPF_S_JMP_JSET_X;
-                       break;
-
-               default:
-                       return -EINVAL;
-               }
-
-                       /* for conditionals both must be safe */
-               switch (ftest->code) {
                case BPF_S_JMP_JEQ_K:
                case BPF_S_JMP_JEQ_X:
                case BPF_S_JMP_JGE_K:
@@ -570,10 +534,13 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
                case BPF_S_JMP_JGT_X:
                case BPF_S_JMP_JSET_X:
                case BPF_S_JMP_JSET_K:
+                       /* for conditionals both must be safe */
                        if (pc + ftest->jt + 1 >= flen ||
                            pc + ftest->jf + 1 >= flen)
                                return -EINVAL;
+                       break;
                }
+               ftest->code = code;
        }
 
        /* last instruction must be a RET code */
@@ -581,10 +548,8 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
        case BPF_S_RET_K:
        case BPF_S_RET_A:
                return 0;
-               break;
-               default:
-                       return -EINVAL;
-               }
+       }
+       return -EINVAL;
 }
 EXPORT_SYMBOL(sk_chk_filter);
 
index 7f902cad10f8b13d7096f53905e3a47bb3c12d25..7abeb7ceaa4c8d73bfa326090323cee654491fab 100644 (file)
@@ -706,7 +706,6 @@ static struct attribute *rx_queue_default_attrs[] = {
 static void rx_queue_release(struct kobject *kobj)
 {
        struct netdev_rx_queue *queue = to_rx_queue(kobj);
-       struct netdev_rx_queue *first = queue->first;
        struct rps_map *map;
        struct rps_dev_flow_table *flow_table;
 
@@ -723,10 +722,8 @@ static void rx_queue_release(struct kobject *kobj)
                call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
        }
 
-       if (atomic_dec_and_test(&first->count))
-               kfree(first);
-       else
-               memset(kobj, 0, sizeof(*kobj));
+       memset(kobj, 0, sizeof(*kobj));
+       dev_put(queue->dev);
 }
 
 static struct kobj_type rx_queue_ktype = {
@@ -738,7 +735,6 @@ static struct kobj_type rx_queue_ktype = {
 static int rx_queue_add_kobject(struct net_device *net, int index)
 {
        struct netdev_rx_queue *queue = net->_rx + index;
-       struct netdev_rx_queue *first = queue->first;
        struct kobject *kobj = &queue->kobj;
        int error = 0;
 
@@ -751,7 +747,7 @@ static int rx_queue_add_kobject(struct net_device *net, int index)
        }
 
        kobject_uevent(kobj, KOBJ_ADD);
-       atomic_inc(&first->count);
+       dev_hold(queue->dev);
 
        return error;
 }
index 33bc3823ac6fce64b227fafe670f2b12e3816a86..52fc1e08a7c47d2a2d09e546d817e312c7d337cb 100644 (file)
@@ -378,6 +378,7 @@ struct pktgen_dev {
 
        u16 queue_map_min;
        u16 queue_map_max;
+       __u32 skb_priority;     /* skb priority field */
        int node;               /* Memory node */
 
 #ifdef CONFIG_XFRM
@@ -547,6 +548,10 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
                   pkt_dev->queue_map_min,
                   pkt_dev->queue_map_max);
 
+       if (pkt_dev->skb_priority)
+               seq_printf(seq, "     skb_priority: %u\n",
+                          pkt_dev->skb_priority);
+
        if (pkt_dev->flags & F_IPV6) {
                char b1[128], b2[128], b3[128];
                fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr);
@@ -1711,6 +1716,18 @@ static ssize_t pktgen_if_write(struct file *file,
                return count;
        }
 
+       if (!strcmp(name, "skb_priority")) {
+               len = num_arg(&user_buffer[i], 9, &value);
+               if (len < 0)
+                       return len;
+
+               i += len;
+               pkt_dev->skb_priority = value;
+               sprintf(pg_result, "OK: skb_priority=%i",
+                       pkt_dev->skb_priority);
+               return count;
+       }
+
        sprintf(pkt_dev->result, "No such parameter \"%s\"", name);
        return -EINVAL;
 }
@@ -2671,6 +2688,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
        skb->transport_header = skb->network_header + sizeof(struct iphdr);
        skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
        skb_set_queue_mapping(skb, queue_map);
+       skb->priority = pkt_dev->skb_priority;
+
        iph = ip_hdr(skb);
        udph = udp_hdr(skb);
 
@@ -3016,6 +3035,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
        skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
        skb_set_queue_mapping(skb, queue_map);
+       skb->priority = pkt_dev->skb_priority;
        iph = ipv6_hdr(skb);
        udph = udp_hdr(skb);
 
index 841c287ef40a4706c3465425880f95abd608e954..bf69e5871b1a3a891a200bebe25b85d10b6a02f2 100644 (file)
@@ -362,6 +362,95 @@ static size_t rtnl_link_get_size(const struct net_device *dev)
        return size;
 }
 
+static LIST_HEAD(rtnl_af_ops);
+
+static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
+{
+       const struct rtnl_af_ops *ops;
+
+       list_for_each_entry(ops, &rtnl_af_ops, list) {
+               if (ops->family == family)
+                       return ops;
+       }
+
+       return NULL;
+}
+
+/**
+ * __rtnl_af_register - Register rtnl_af_ops with rtnetlink.
+ * @ops: struct rtnl_af_ops * to register
+ *
+ * The caller must hold the rtnl_mutex.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int __rtnl_af_register(struct rtnl_af_ops *ops)
+{
+       list_add_tail(&ops->list, &rtnl_af_ops);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(__rtnl_af_register);
+
+/**
+ * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
+ * @ops: struct rtnl_af_ops * to register
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int rtnl_af_register(struct rtnl_af_ops *ops)
+{
+       int err;
+
+       rtnl_lock();
+       err = __rtnl_af_register(ops);
+       rtnl_unlock();
+       return err;
+}
+EXPORT_SYMBOL_GPL(rtnl_af_register);
+
+/**
+ * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
+ * @ops: struct rtnl_af_ops * to unregister
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+void __rtnl_af_unregister(struct rtnl_af_ops *ops)
+{
+       list_del(&ops->list);
+}
+EXPORT_SYMBOL_GPL(__rtnl_af_unregister);
+
+/**
+ * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
+ * @ops: struct rtnl_af_ops * to unregister
+ */
+void rtnl_af_unregister(struct rtnl_af_ops *ops)
+{
+       rtnl_lock();
+       __rtnl_af_unregister(ops);
+       rtnl_unlock();
+}
+EXPORT_SYMBOL_GPL(rtnl_af_unregister);
+
+static size_t rtnl_link_get_af_size(const struct net_device *dev)
+{
+       struct rtnl_af_ops *af_ops;
+       size_t size;
+
+       /* IFLA_AF_SPEC */
+       size = nla_total_size(sizeof(struct nlattr));
+
+       list_for_each_entry(af_ops, &rtnl_af_ops, list) {
+               if (af_ops->get_link_af_size) {
+                       /* AF_* + nested data */
+                       size += nla_total_size(sizeof(struct nlattr)) +
+                               af_ops->get_link_af_size(dev);
+               }
+       }
+
+       return size;
+}
+
 static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
 {
        const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
@@ -671,7 +760,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev)
               + nla_total_size(4) /* IFLA_NUM_VF */
               + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */
               + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
-              + rtnl_link_get_size(dev); /* IFLA_LINKINFO */
+              + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+              + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
 }
 
 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -757,7 +847,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
        struct nlmsghdr *nlh;
        struct rtnl_link_stats64 temp;
        const struct rtnl_link_stats64 *stats;
-       struct nlattr *attr;
+       struct nlattr *attr, *af_spec;
+       struct rtnl_af_ops *af_ops;
 
        nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
        if (nlh == NULL)
@@ -866,6 +957,36 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                        goto nla_put_failure;
        }
 
+       if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
+               goto nla_put_failure;
+
+       list_for_each_entry(af_ops, &rtnl_af_ops, list) {
+               if (af_ops->fill_link_af) {
+                       struct nlattr *af;
+                       int err;
+
+                       if (!(af = nla_nest_start(skb, af_ops->family)))
+                               goto nla_put_failure;
+
+                       err = af_ops->fill_link_af(skb, dev);
+
+                       /*
+                        * Caller may return ENODATA to indicate that there
+                        * was no data to be dumped. This is not an error, it
+                        * means we should trim the attribute header and
+                        * continue.
+                        */
+                       if (err == -ENODATA)
+                               nla_nest_cancel(skb, af);
+                       else if (err < 0)
+                               goto nla_put_failure;
+
+                       nla_nest_end(skb, af);
+               }
+       }
+
+       nla_nest_end(skb, af_spec);
+
        return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -924,6 +1045,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_VFINFO_LIST]      = {. type = NLA_NESTED },
        [IFLA_VF_PORTS]         = { .type = NLA_NESTED },
        [IFLA_PORT_SELF]        = { .type = NLA_NESTED },
+       [IFLA_AF_SPEC]          = { .type = NLA_NESTED },
 };
 EXPORT_SYMBOL(ifla_policy);
 
@@ -1225,6 +1347,27 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
                        goto errout;
                modified = 1;
        }
+
+       if (tb[IFLA_AF_SPEC]) {
+               struct nlattr *af;
+               int rem;
+
+               nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
+                       const struct rtnl_af_ops *af_ops;
+
+                       if (!(af_ops = rtnl_af_lookup(nla_type(af))))
+                               continue;
+
+                       if (!af_ops->parse_link_af)
+                               continue;
+
+                       err = af_ops->parse_link_af(dev, af);
+                       if (err < 0)
+                               goto errout;
+
+                       modified = 1;
+               }
+       }
        err = 0;
 
 errout:
index 0ae6c22da85b2a3516980691c8fbcf2f7228e3d6..dac7ed687f609c83e7be6681d871d9444658006e 100644 (file)
@@ -31,7 +31,7 @@ static unsigned int classify(struct sk_buff *skb)
        if (likely(skb->dev &&
                   skb->dev->phydev &&
                   skb->dev->phydev->drv))
-               return sk_run_filter(skb, ptp_filter, ARRAY_SIZE(ptp_filter));
+               return sk_run_filter(skb, ptp_filter);
        else
                return PTP_CLASS_NONE;
 }
index 92a6fcb40d7daa4c3e1bc3b8e2b5a49c4948cf3d..25b7a8d1ad58eac67d2306e0c86ff8da7cea0b3b 100644 (file)
 /*
  *  net/dccp/ackvec.c
  *
- *  An implementation of the DCCP protocol
+ *  An implementation of Ack Vectors for the DCCP protocol
+ *  Copyright (c) 2007 University of Aberdeen, Scotland, UK
  *  Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
  *
  *      This program is free software; you can redistribute it and/or modify it
  *      under the terms of the GNU General Public License as published by the
  *      Free Software Foundation; version 2 of the License;
  */
-
-#include "ackvec.h"
 #include "dccp.h"
-
-#include <linux/init.h>
-#include <linux/errno.h>
 #include <linux/kernel.h>
-#include <linux/skbuff.h>
 #include <linux/slab.h>
 
-#include <net/sock.h>
-
 static struct kmem_cache *dccp_ackvec_slab;
 static struct kmem_cache *dccp_ackvec_record_slab;
 
-static struct dccp_ackvec_record *dccp_ackvec_record_new(void)
+struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
 {
-       struct dccp_ackvec_record *avr =
-                       kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
+       struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority);
 
-       if (avr != NULL)
-               INIT_LIST_HEAD(&avr->avr_node);
-
-       return avr;
+       if (av != NULL) {
+               av->av_buf_head = av->av_buf_tail = DCCPAV_MAX_ACKVEC_LEN - 1;
+               INIT_LIST_HEAD(&av->av_records);
+       }
+       return av;
 }
 
-static void dccp_ackvec_record_delete(struct dccp_ackvec_record *avr)
+static void dccp_ackvec_purge_records(struct dccp_ackvec *av)
 {
-       if (unlikely(avr == NULL))
-               return;
-       /* Check if deleting a linked record */
-       WARN_ON(!list_empty(&avr->avr_node));
-       kmem_cache_free(dccp_ackvec_record_slab, avr);
+       struct dccp_ackvec_record *cur, *next;
+
+       list_for_each_entry_safe(cur, next, &av->av_records, avr_node)
+               kmem_cache_free(dccp_ackvec_record_slab, cur);
+       INIT_LIST_HEAD(&av->av_records);
 }
 
-static void dccp_ackvec_insert_avr(struct dccp_ackvec *av,
-                                  struct dccp_ackvec_record *avr)
+void dccp_ackvec_free(struct dccp_ackvec *av)
 {
-       /*
-        * AVRs are sorted by seqno. Since we are sending them in order, we
-        * just add the AVR at the head of the list.
-        * -sorbo.
-        */
-       if (!list_empty(&av->av_records)) {
-               const struct dccp_ackvec_record *head =
-                                       list_entry(av->av_records.next,
-                                                  struct dccp_ackvec_record,
-                                                  avr_node);
-               BUG_ON(before48(avr->avr_ack_seqno, head->avr_ack_seqno));
+       if (likely(av != NULL)) {
+               dccp_ackvec_purge_records(av);
+               kmem_cache_free(dccp_ackvec_slab, av);
        }
-
-       list_add(&avr->avr_node, &av->av_records);
 }
 
-int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
+/**
+ * dccp_ackvec_update_records  -  Record information about sent Ack Vectors
+ * @av:                Ack Vector records to update
+ * @seqno:     Sequence number of the packet carrying the Ack Vector just sent
+ * @nonce_sum: The sum of all buffer nonces contained in the Ack Vector
+ */
+int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
 {
-       struct dccp_sock *dp = dccp_sk(sk);
-       struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
-       /* Figure out how many options do we need to represent the ackvec */
-       const u8 nr_opts = DIV_ROUND_UP(av->av_vec_len, DCCP_SINGLE_OPT_MAXLEN);
-       u16 len = av->av_vec_len + 2 * nr_opts, i;
-       u32 elapsed_time;
-       const unsigned char *tail, *from;
-       unsigned char *to;
        struct dccp_ackvec_record *avr;
-       suseconds_t delta;
-
-       if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
-               return -1;
-
-       delta = ktime_us_delta(ktime_get_real(), av->av_time);
-       elapsed_time = delta / 10;
 
-       if (elapsed_time != 0 &&
-           dccp_insert_option_elapsed_time(skb, elapsed_time))
-               return -1;
-
-       avr = dccp_ackvec_record_new();
+       avr = kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
        if (avr == NULL)
-               return -1;
-
-       DCCP_SKB_CB(skb)->dccpd_opt_len += len;
-
-       to   = skb_push(skb, len);
-       len  = av->av_vec_len;
-       from = av->av_buf + av->av_buf_head;
-       tail = av->av_buf + DCCP_MAX_ACKVEC_LEN;
-
-       for (i = 0; i < nr_opts; ++i) {
-               int copylen = len;
-
-               if (len > DCCP_SINGLE_OPT_MAXLEN)
-                       copylen = DCCP_SINGLE_OPT_MAXLEN;
-
-               *to++ = DCCPO_ACK_VECTOR_0;
-               *to++ = copylen + 2;
-
-               /* Check if buf_head wraps */
-               if (from + copylen > tail) {
-                       const u16 tailsize = tail - from;
-
-                       memcpy(to, from, tailsize);
-                       to      += tailsize;
-                       len     -= tailsize;
-                       copylen -= tailsize;
-                       from    = av->av_buf;
-               }
-
-               memcpy(to, from, copylen);
-               from += copylen;
-               to   += copylen;
-               len  -= copylen;
-       }
+               return -ENOBUFS;
 
+       avr->avr_ack_seqno  = seqno;
+       avr->avr_ack_ptr    = av->av_buf_head;
+       avr->avr_ack_ackno  = av->av_buf_ackno;
+       avr->avr_ack_nonce  = nonce_sum;
+       avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head);
        /*
-        *      From RFC 4340, A.2:
-        *
-        *      For each acknowledgement it sends, the HC-Receiver will add an
-        *      acknowledgement record.  ack_seqno will equal the HC-Receiver
-        *      sequence number it used for the ack packet; ack_ptr will equal
-        *      buf_head; ack_ackno will equal buf_ackno; and ack_nonce will
-        *      equal buf_nonce.
+        * When the buffer overflows, we keep no more than one record. This is
+        * the simplest way of disambiguating sender-Acks dating from before the
+        * overflow from sender-Acks which refer to after the overflow; a simple
+        * solution is preferable here since we are handling an exception.
         */
-       avr->avr_ack_seqno = DCCP_SKB_CB(skb)->dccpd_seq;
-       avr->avr_ack_ptr   = av->av_buf_head;
-       avr->avr_ack_ackno = av->av_buf_ackno;
-       avr->avr_ack_nonce = av->av_buf_nonce;
-       avr->avr_sent_len  = av->av_vec_len;
-
-       dccp_ackvec_insert_avr(av, avr);
+       if (av->av_overflow)
+               dccp_ackvec_purge_records(av);
+       /*
+        * Since GSS is incremented for each packet, the list is automatically
+        * arranged in descending order of @ack_seqno.
+        */
+       list_add(&avr->avr_node, &av->av_records);
 
-       dccp_pr_debug("%s ACK Vector 0, len=%d, ack_seqno=%llu, "
-                     "ack_ackno=%llu\n",
-                     dccp_role(sk), avr->avr_sent_len,
+       dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n",
                      (unsigned long long)avr->avr_ack_seqno,
-                     (unsigned long long)avr->avr_ack_ackno);
+                     (unsigned long long)avr->avr_ack_ackno,
+                     avr->avr_ack_runlen);
        return 0;
 }
 
-struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
+static struct dccp_ackvec_record *dccp_ackvec_lookup(struct list_head *av_list,
+                                                    const u64 ackno)
 {
-       struct dccp_ackvec *av = kmem_cache_alloc(dccp_ackvec_slab, priority);
-
-       if (av != NULL) {
-               av->av_buf_head  = DCCP_MAX_ACKVEC_LEN - 1;
-               av->av_buf_ackno = UINT48_MAX + 1;
-               av->av_buf_nonce = 0;
-               av->av_time      = ktime_set(0, 0);
-               av->av_vec_len   = 0;
-               INIT_LIST_HEAD(&av->av_records);
+       struct dccp_ackvec_record *avr;
+       /*
+        * Exploit that records are inserted in descending order of sequence
+        * number, start with the oldest record first. If @ackno is `before'
+        * the earliest ack_ackno, the packet is too old to be considered.
+        */
+       list_for_each_entry_reverse(avr, av_list, avr_node) {
+               if (avr->avr_ack_seqno == ackno)
+                       return avr;
+               if (before48(ackno, avr->avr_ack_seqno))
+                       break;
        }
-
-       return av;
+       return NULL;
 }
 
-void dccp_ackvec_free(struct dccp_ackvec *av)
+/*
+ * Buffer index and length computation using modulo-buffersize arithmetic.
+ * Note that, as pointers move from right to left, head is `before' tail.
+ */
+static inline u16 __ackvec_idx_add(const u16 a, const u16 b)
 {
-       if (unlikely(av == NULL))
-               return;
-
-       if (!list_empty(&av->av_records)) {
-               struct dccp_ackvec_record *avr, *next;
-
-               list_for_each_entry_safe(avr, next, &av->av_records, avr_node) {
-                       list_del_init(&avr->avr_node);
-                       dccp_ackvec_record_delete(avr);
-               }
-       }
-
-       kmem_cache_free(dccp_ackvec_slab, av);
+       return (a + b) % DCCPAV_MAX_ACKVEC_LEN;
 }
 
-static inline u8 dccp_ackvec_state(const struct dccp_ackvec *av,
-                                  const u32 index)
+static inline u16 __ackvec_idx_sub(const u16 a, const u16 b)
 {
-       return av->av_buf[index] & DCCP_ACKVEC_STATE_MASK;
+       return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b);
 }
 
-static inline u8 dccp_ackvec_len(const struct dccp_ackvec *av,
-                                const u32 index)
+u16 dccp_ackvec_buflen(const struct dccp_ackvec *av)
 {
-       return av->av_buf[index] & DCCP_ACKVEC_LEN_MASK;
+       if (unlikely(av->av_overflow))
+               return DCCPAV_MAX_ACKVEC_LEN;
+       return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head);
 }
 
-/*
- * If several packets are missing, the HC-Receiver may prefer to enter multiple
- * bytes with run length 0, rather than a single byte with a larger run length;
- * this simplifies table updates if one of the missing packets arrives.
+/**
+ * dccp_ackvec_update_old  -  Update previous state as per RFC 4340, 11.4.1
+ * @av:                non-empty buffer to update
+ * @distance:   negative or zero distance of @seqno from buf_ackno downward
+ * @seqno:     the (old) sequence number whose record is to be updated
+ * @state:     state in which packet carrying @seqno was received
  */
-static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
-                                                const unsigned int packets,
-                                                const unsigned char state)
+static void dccp_ackvec_update_old(struct dccp_ackvec *av, s64 distance,
+                                  u64 seqno, enum dccp_ackvec_states state)
 {
-       long gap;
-       long new_head;
+       u16 ptr = av->av_buf_head;
 
-       if (av->av_vec_len + packets > DCCP_MAX_ACKVEC_LEN)
-               return -ENOBUFS;
+       BUG_ON(distance > 0);
+       if (unlikely(dccp_ackvec_is_empty(av)))
+               return;
 
-       gap      = packets - 1;
-       new_head = av->av_buf_head - packets;
+       do {
+               u8 runlen = dccp_ackvec_runlen(av->av_buf + ptr);
 
-       if (new_head < 0) {
-               if (gap > 0) {
-                       memset(av->av_buf, DCCP_ACKVEC_STATE_NOT_RECEIVED,
-                              gap + new_head + 1);
-                       gap = -new_head;
+               if (distance + runlen >= 0) {
+                       /*
+                        * Only update the state if packet has not been received
+                        * yet. This is OK as per the second table in RFC 4340,
+                        * 11.4.1; i.e. here we are using the following table:
+                        *                     RECEIVED
+                        *                      0   1   3
+                        *              S     +---+---+---+
+                        *              T   0 | 0 | 0 | 0 |
+                        *              O     +---+---+---+
+                        *              R   1 | 1 | 1 | 1 |
+                        *              E     +---+---+---+
+                        *              D   3 | 0 | 1 | 3 |
+                        *                    +---+---+---+
+                        * The "Not Received" state was set by reserve_seats().
+                        */
+                       if (av->av_buf[ptr] == DCCPAV_NOT_RECEIVED)
+                               av->av_buf[ptr] = state;
+                       else
+                               dccp_pr_debug("Not changing %llu state to %u\n",
+                                             (unsigned long long)seqno, state);
+                       break;
                }
-               new_head += DCCP_MAX_ACKVEC_LEN;
-       }
 
-       av->av_buf_head = new_head;
+               distance += runlen + 1;
+               ptr       = __ackvec_idx_add(ptr, 1);
 
-       if (gap > 0)
-               memset(av->av_buf + av->av_buf_head + 1,
-                      DCCP_ACKVEC_STATE_NOT_RECEIVED, gap);
+       } while (ptr != av->av_buf_tail);
+}
 
-       av->av_buf[av->av_buf_head] = state;
-       av->av_vec_len += packets;
-       return 0;
+/* Mark @num entries after buf_head as "Not yet received". */
+static void dccp_ackvec_reserve_seats(struct dccp_ackvec *av, u16 num)
+{
+       u16 start = __ackvec_idx_add(av->av_buf_head, 1),
+           len   = DCCPAV_MAX_ACKVEC_LEN - start;
+
+       /* check for buffer wrap-around */
+       if (num > len) {
+               memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, len);
+               start = 0;
+               num  -= len;
+       }
+       if (num)
+               memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, num);
 }
 
-/*
- * Implements the RFC 4340, Appendix A
+/**
+ * dccp_ackvec_add_new  -  Record one or more new entries in Ack Vector buffer
+ * @av:                 container of buffer to update (can be empty or non-empty)
+ * @num_packets: number of packets to register (must be >= 1)
+ * @seqno:      sequence number of the first packet in @num_packets
+ * @state:      state in which packet carrying @seqno was received
  */
-int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
-                   const u64 ackno, const u8 state)
+static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
+                               u64 seqno, enum dccp_ackvec_states state)
 {
-       /*
-        * Check at the right places if the buffer is full, if it is, tell the
-        * caller to start dropping packets till the HC-Sender acks our ACK
-        * vectors, when we will free up space in av_buf.
-        *
-        * We may well decide to do buffer compression, etc, but for now lets
-        * just drop.
-        *
-        * From Appendix A.1.1 (`New Packets'):
-        *
-        *      Of course, the circular buffer may overflow, either when the
-        *      HC-Sender is sending data at a very high rate, when the
-        *      HC-Receiver's acknowledgements are not reaching the HC-Sender,
-        *      or when the HC-Sender is forgetting to acknowledge those acks
-        *      (so the HC-Receiver is unable to clean up old state). In this
-        *      case, the HC-Receiver should either compress the buffer (by
-        *      increasing run lengths when possible), transfer its state to
-        *      a larger buffer, or, as a last resort, drop all received
-        *      packets, without processing them whatsoever, until its buffer
-        *      shrinks again.
-        */
+       u32 num_cells = num_packets;
 
-       /* See if this is the first ackno being inserted */
-       if (av->av_vec_len == 0) {
-               av->av_buf[av->av_buf_head] = state;
-               av->av_vec_len = 1;
-       } else if (after48(ackno, av->av_buf_ackno)) {
-               const u64 delta = dccp_delta_seqno(av->av_buf_ackno, ackno);
+       if (num_packets > DCCPAV_BURST_THRESH) {
+               u32 lost_packets = num_packets - 1;
 
+               DCCP_WARN("Warning: large burst loss (%u)\n", lost_packets);
                /*
-                * Look if the state of this packet is the same as the
-                * previous ackno and if so if we can bump the head len.
+                * We received 1 packet and have a loss of size "num_packets-1"
+                * which we squeeze into num_cells-1 rather than reserving an
+                * entire byte for each lost packet.
+                * The reason is that the vector grows in O(burst_length); when
+                * it grows too large there will no room left for the payload.
+                * This is a trade-off: if a few packets out of the burst show
+                * up later, their state will not be changed; it is simply too
+                * costly to reshuffle/reallocate/copy the buffer each time.
+                * Should such problems persist, we will need to switch to a
+                * different underlying data structure.
                 */
-               if (delta == 1 &&
-                   dccp_ackvec_state(av, av->av_buf_head) == state &&
-                   dccp_ackvec_len(av, av->av_buf_head) < DCCP_ACKVEC_LEN_MASK)
-                       av->av_buf[av->av_buf_head]++;
-               else if (dccp_ackvec_set_buf_head_state(av, delta, state))
-                       return -ENOBUFS;
-       } else {
-               /*
-                * A.1.2.  Old Packets
-                *
-                *      When a packet with Sequence Number S <= buf_ackno
-                *      arrives, the HC-Receiver will scan the table for
-                *      the byte corresponding to S. (Indexing structures
-                *      could reduce the complexity of this scan.)
-                */
-               u64 delta = dccp_delta_seqno(ackno, av->av_buf_ackno);
-               u32 index = av->av_buf_head;
+               for (num_packets = num_cells = 1; lost_packets; ++num_cells) {
+                       u8 len = min(lost_packets, (u32)DCCPAV_MAX_RUNLEN);
 
-               while (1) {
-                       const u8 len = dccp_ackvec_len(av, index);
-                       const u8 av_state = dccp_ackvec_state(av, index);
-                       /*
-                        * valid packets not yet in av_buf have a reserved
-                        * entry, with a len equal to 0.
-                        */
-                       if (av_state == DCCP_ACKVEC_STATE_NOT_RECEIVED &&
-                           len == 0 && delta == 0) { /* Found our
-                                                        reserved seat! */
-                               dccp_pr_debug("Found %llu reserved seat!\n",
-                                             (unsigned long long)ackno);
-                               av->av_buf[index] = state;
-                               goto out;
-                       }
-                       /* len == 0 means one packet */
-                       if (delta < len + 1)
-                               goto out_duplicate;
-
-                       delta -= len + 1;
-                       if (++index == DCCP_MAX_ACKVEC_LEN)
-                               index = 0;
+                       av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1);
+                       av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len;
+
+                       lost_packets -= len;
                }
        }
 
-       av->av_buf_ackno = ackno;
-       av->av_time = ktime_get_real();
-out:
-       return 0;
+       if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
+               DCCP_CRIT("Ack Vector buffer overflow: dropping old entries\n");
+               av->av_overflow = true;
+       }
+
+       av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, num_packets);
+       if (av->av_overflow)
+               av->av_buf_tail = av->av_buf_head;
 
-out_duplicate:
-       /* Duplicate packet */
-       dccp_pr_debug("Received a dup or already considered lost "
-                     "packet: %llu\n", (unsigned long long)ackno);
-       return -EILSEQ;
+       av->av_buf[av->av_buf_head] = state;
+       av->av_buf_ackno            = seqno;
+
+       if (num_packets > 1)
+               dccp_ackvec_reserve_seats(av, num_packets - 1);
 }
 
-static void dccp_ackvec_throw_record(struct dccp_ackvec *av,
-                                    struct dccp_ackvec_record *avr)
+/**
+ * dccp_ackvec_input  -  Register incoming packet in the buffer
+ */
+void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
 {
-       struct dccp_ackvec_record *next;
+       u64 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
+       enum dccp_ackvec_states state = DCCPAV_RECEIVED;
 
-       /* sort out vector length */
-       if (av->av_buf_head <= avr->avr_ack_ptr)
-               av->av_vec_len = avr->avr_ack_ptr - av->av_buf_head;
-       else
-               av->av_vec_len = DCCP_MAX_ACKVEC_LEN - 1 -
-                                av->av_buf_head + avr->avr_ack_ptr;
+       if (dccp_ackvec_is_empty(av)) {
+               dccp_ackvec_add_new(av, 1, seqno, state);
+               av->av_tail_ackno = seqno;
 
-       /* free records */
-       list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) {
-               list_del_init(&avr->avr_node);
-               dccp_ackvec_record_delete(avr);
-       }
-}
+       } else {
+               s64 num_packets = dccp_delta_seqno(av->av_buf_ackno, seqno);
+               u8 *current_head = av->av_buf + av->av_buf_head;
 
-void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, struct sock *sk,
-                                const u64 ackno)
-{
-       struct dccp_ackvec_record *avr;
+               if (num_packets == 1 &&
+                   dccp_ackvec_state(current_head) == state &&
+                   dccp_ackvec_runlen(current_head) < DCCPAV_MAX_RUNLEN) {
 
-       /*
-        * If we traverse backwards, it should be faster when we have large
-        * windows. We will be receiving ACKs for stuff we sent a while back
-        * -sorbo.
-        */
-       list_for_each_entry_reverse(avr, &av->av_records, avr_node) {
-               if (ackno == avr->avr_ack_seqno) {
-                       dccp_pr_debug("%s ACK packet 0, len=%d, ack_seqno=%llu, "
-                                     "ack_ackno=%llu, ACKED!\n",
-                                     dccp_role(sk), 1,
-                                     (unsigned long long)avr->avr_ack_seqno,
-                                     (unsigned long long)avr->avr_ack_ackno);
-                       dccp_ackvec_throw_record(av, avr);
-                       break;
-               } else if (avr->avr_ack_seqno > ackno)
-                       break; /* old news */
+                       *current_head   += 1;
+                       av->av_buf_ackno = seqno;
+
+               } else if (num_packets > 0) {
+                       dccp_ackvec_add_new(av, num_packets, seqno, state);
+               } else {
+                       dccp_ackvec_update_old(av, num_packets, seqno, state);
+               }
        }
 }
 
-static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av,
-                                           struct sock *sk, u64 *ackno,
-                                           const unsigned char len,
-                                           const unsigned char *vector)
+/**
+ * dccp_ackvec_clear_state  -  Perform house-keeping / garbage-collection
+ * This routine is called when the peer acknowledges the receipt of Ack Vectors
+ * up to and including @ackno. While based on on section A.3 of RFC 4340, here
+ * are additional precautions to prevent corrupted buffer state. In particular,
+ * we use tail_ackno to identify outdated records; it always marks the earliest
+ * packet of group (2) in 11.4.2.
+ */
+void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno)
 {
-       unsigned char i;
-       struct dccp_ackvec_record *avr;
+       struct dccp_ackvec_record *avr, *next;
+       u8 runlen_now, eff_runlen;
+       s64 delta;
 
-       /* Check if we actually sent an ACK vector */
-       if (list_empty(&av->av_records))
+       avr = dccp_ackvec_lookup(&av->av_records, ackno);
+       if (avr == NULL)
                return;
+       /*
+        * Deal with outdated acknowledgments: this arises when e.g. there are
+        * several old records and the acks from the peer come in slowly. In
+        * that case we may still have records that pre-date tail_ackno.
+        */
+       delta = dccp_delta_seqno(av->av_tail_ackno, avr->avr_ack_ackno);
+       if (delta < 0)
+               goto free_records;
+       /*
+        * Deal with overlapping Ack Vectors: don't subtract more than the
+        * number of packets between tail_ackno and ack_ackno.
+        */
+       eff_runlen = delta < avr->avr_ack_runlen ? delta : avr->avr_ack_runlen;
 
-       i = len;
+       runlen_now = dccp_ackvec_runlen(av->av_buf + avr->avr_ack_ptr);
        /*
-        * XXX
-        * I think it might be more efficient to work backwards. See comment on
-        * rcv_ackno. -sorbo.
+        * The run length of Ack Vector cells does not decrease over time. If
+        * the run length is the same as at the time the Ack Vector was sent, we
+        * free the ack_ptr cell. That cell can however not be freed if the run
+        * length has increased: in this case we need to move the tail pointer
+        * backwards (towards higher indices), to its next-oldest neighbour.
         */
-       avr = list_entry(av->av_records.next, struct dccp_ackvec_record, avr_node);
-       while (i--) {
-               const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
-               u64 ackno_end_rl;
+       if (runlen_now > eff_runlen) {
 
-               dccp_set_seqno(&ackno_end_rl, *ackno - rl);
+               av->av_buf[avr->avr_ack_ptr] -= eff_runlen + 1;
+               av->av_buf_tail = __ackvec_idx_add(avr->avr_ack_ptr, 1);
 
+               /* This move may not have cleared the overflow flag. */
+               if (av->av_overflow)
+                       av->av_overflow = (av->av_buf_head == av->av_buf_tail);
+       } else {
+               av->av_buf_tail = avr->avr_ack_ptr;
                /*
-                * If our AVR sequence number is greater than the ack, go
-                * forward in the AVR list until it is not so.
+                * We have made sure that avr points to a valid cell within the
+                * buffer. This cell is either older than head, or equals head
+                * (empty buffer): in both cases we no longer have any overflow.
                 */
-               list_for_each_entry_from(avr, &av->av_records, avr_node) {
-                       if (!after48(avr->avr_ack_seqno, *ackno))
-                               goto found;
-               }
-               /* End of the av_records list, not found, exit */
-               break;
-found:
-               if (between48(avr->avr_ack_seqno, ackno_end_rl, *ackno)) {
-                       const u8 state = *vector & DCCP_ACKVEC_STATE_MASK;
-                       if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED) {
-                               dccp_pr_debug("%s ACK vector 0, len=%d, "
-                                             "ack_seqno=%llu, ack_ackno=%llu, "
-                                             "ACKED!\n",
-                                             dccp_role(sk), len,
-                                             (unsigned long long)
-                                             avr->avr_ack_seqno,
-                                             (unsigned long long)
-                                             avr->avr_ack_ackno);
-                               dccp_ackvec_throw_record(av, avr);
-                               break;
-                       }
-                       /*
-                        * If it wasn't received, continue scanning... we might
-                        * find another one.
-                        */
-               }
+               av->av_overflow = 0;
+       }
 
-               dccp_set_seqno(ackno, ackno_end_rl - 1);
-               ++vector;
+       /*
+        * The peer has acknowledged up to and including ack_ackno. Hence the
+        * first packet in group (2) of 11.4.2 is the successor of ack_ackno.
+        */
+       av->av_tail_ackno = ADD48(avr->avr_ack_ackno, 1);
+
+free_records:
+       list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) {
+               list_del(&avr->avr_node);
+               kmem_cache_free(dccp_ackvec_record_slab, avr);
        }
 }
 
-int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
-                     u64 *ackno, const u8 opt, const u8 *value, const u8 len)
+/*
+ *     Routines to keep track of Ack Vectors received in an skb
+ */
+int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce)
 {
-       if (len > DCCP_SINGLE_OPT_MAXLEN)
-               return -1;
+       struct dccp_ackvec_parsed *new = kmalloc(sizeof(*new), GFP_ATOMIC);
+
+       if (new == NULL)
+               return -ENOBUFS;
+       new->vec   = vec;
+       new->len   = len;
+       new->nonce = nonce;
 
-       /* dccp_ackvector_print(DCCP_SKB_CB(skb)->dccpd_ack_seq, value, len); */
-       dccp_ackvec_check_rcv_ackvector(dccp_sk(sk)->dccps_hc_rx_ackvec, sk,
-                                       ackno, len, value);
+       list_add_tail(&new->node, head);
        return 0;
 }
+EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_add);
+
+void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks)
+{
+       struct dccp_ackvec_parsed *cur, *next;
+
+       list_for_each_entry_safe(cur, next, parsed_chunks, node)
+               kfree(cur);
+       INIT_LIST_HEAD(parsed_chunks);
+}
+EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup);
 
 int __init dccp_ackvec_init(void)
 {
@@ -448,10 +379,9 @@ int __init dccp_ackvec_init(void)
        if (dccp_ackvec_slab == NULL)
                goto out_err;
 
-       dccp_ackvec_record_slab =
-                       kmem_cache_create("dccp_ackvec_record",
-                                         sizeof(struct dccp_ackvec_record),
-                                         0, SLAB_HWCACHE_ALIGN, NULL);
+       dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
+                                            sizeof(struct dccp_ackvec_record),
+                                            0, SLAB_HWCACHE_ALIGN, NULL);
        if (dccp_ackvec_record_slab == NULL)
                goto out_destroy_slab;
 
index 7ea557b7c6b15aa15bf326622ce3ba400f389fa5..e2ab0627a5ff6c2a63d35cf7641e6ac5dbbd5f4e 100644 (file)
@@ -3,9 +3,9 @@
 /*
  *  net/dccp/ackvec.h
  *
- *  An implementation of the DCCP protocol
+ *  An implementation of Ack Vectors for the DCCP protocol
+ *  Copyright (c) 2007 University of Aberdeen, Scotland, UK
  *  Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@mandriva.com>
- *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License version 2 as
  *     published by the Free Software Foundation.
 
 #include <linux/dccp.h>
 #include <linux/compiler.h>
-#include <linux/ktime.h>
 #include <linux/list.h>
 #include <linux/types.h>
 
-/* We can spread an ack vector across multiple options */
-#define DCCP_MAX_ACKVEC_LEN (DCCP_SINGLE_OPT_MAXLEN * 2)
+/*
+ * Ack Vector buffer space is static, in multiples of %DCCP_SINGLE_OPT_MAXLEN,
+ * the maximum size of a single Ack Vector. Setting %DCCPAV_NUM_ACKVECS to 1
+ * will be sufficient for most cases of low Ack Ratios, using a value of 2 gives
+ * more headroom if Ack Ratio is higher or when the sender acknowledges slowly.
+ * The maximum value is bounded by the u16 types for indices and functions.
+ */
+#define DCCPAV_NUM_ACKVECS     2
+#define DCCPAV_MAX_ACKVEC_LEN  (DCCP_SINGLE_OPT_MAXLEN * DCCPAV_NUM_ACKVECS)
 
 /* Estimated minimum average Ack Vector length - used for updating MPS */
 #define DCCPAV_MIN_OPTLEN      16
 
-#define DCCP_ACKVEC_STATE_RECEIVED     0
-#define DCCP_ACKVEC_STATE_ECN_MARKED   (1 << 6)
-#define DCCP_ACKVEC_STATE_NOT_RECEIVED (3 << 6)
+/* Threshold for coping with large bursts of losses */
+#define DCCPAV_BURST_THRESH    (DCCPAV_MAX_ACKVEC_LEN / 8)
 
-#define DCCP_ACKVEC_STATE_MASK         0xC0 /* 11000000 */
-#define DCCP_ACKVEC_LEN_MASK           0x3F /* 00111111 */
+enum dccp_ackvec_states {
+       DCCPAV_RECEIVED =       0x00,
+       DCCPAV_ECN_MARKED =     0x40,
+       DCCPAV_RESERVED =       0x80,
+       DCCPAV_NOT_RECEIVED =   0xC0
+};
+#define DCCPAV_MAX_RUNLEN      0x3F
 
-/** struct dccp_ackvec - ack vector
- *
- * This data structure is the one defined in RFC 4340, Appendix A.
- *
- * @av_buf_head - circular buffer head
- * @av_buf_tail - circular buffer tail
- * @av_buf_ackno - ack # of the most recent packet acknowledgeable in the
- *                    buffer (i.e. %av_buf_head)
- * @av_buf_nonce - the one-bit sum of the ECN Nonces on all packets acked
- *                    by the buffer with State 0
- *
- * Additionally, the HC-Receiver must keep some information about the
- * Ack Vectors it has recently sent. For each packet sent carrying an
- * Ack Vector, it remembers four variables:
+static inline u8 dccp_ackvec_runlen(const u8 *cell)
+{
+       return *cell & DCCPAV_MAX_RUNLEN;
+}
+
+static inline u8 dccp_ackvec_state(const u8 *cell)
+{
+       return *cell & ~DCCPAV_MAX_RUNLEN;
+}
+
+/** struct dccp_ackvec - Ack Vector main data structure
  *
- * @av_records - list of dccp_ackvec_record
- * @av_ack_nonce - the one-bit sum of the ECN Nonces for all State 0.
+ * This implements a fixed-size circular buffer within an array and is largely
+ * based on Appendix A of RFC 4340.
  *
- * @av_time - the time in usecs
- * @av_buf - circular buffer of acknowledgeable packets
+ * @av_buf:       circular buffer storage area
+ * @av_buf_head:   head index; begin of live portion in @av_buf
+ * @av_buf_tail:   tail index; first index _after_ the live portion in @av_buf
+ * @av_buf_ackno:  highest seqno of acknowledgeable packet recorded in @av_buf
+ * @av_tail_ackno: lowest  seqno of acknowledgeable packet recorded in @av_buf
+ * @av_buf_nonce:  ECN nonce sums, each covering subsequent segments of up to
+ *                %DCCP_SINGLE_OPT_MAXLEN cells in the live portion of @av_buf
+ * @av_overflow:   if 1 then buf_head == buf_tail indicates buffer wraparound
+ * @av_records:           list of %dccp_ackvec_record (Ack Vectors sent previously)
  */
 struct dccp_ackvec {
-       u64                     av_buf_ackno;
-       struct list_head        av_records;
-       ktime_t                 av_time;
+       u8                      av_buf[DCCPAV_MAX_ACKVEC_LEN];
        u16                     av_buf_head;
-       u16                     av_vec_len;
-       u8                      av_buf_nonce;
-       u8                      av_ack_nonce;
-       u8                      av_buf[DCCP_MAX_ACKVEC_LEN];
+       u16                     av_buf_tail;
+       u64                     av_buf_ackno:48;
+       u64                     av_tail_ackno:48;
+       bool                    av_buf_nonce[DCCPAV_NUM_ACKVECS];
+       u8                      av_overflow:1;
+       struct list_head        av_records;
 };
 
-/** struct dccp_ackvec_record - ack vector record
+/** struct dccp_ackvec_record - Records information about sent Ack Vectors
  *
- * ACK vector record as defined in Appendix A of spec.
+ * These list entries define the additional information which the HC-Receiver
+ * keeps about recently-sent Ack Vectors; again refer to RFC 4340, Appendix A.
  *
- * The list is sorted by avr_ack_seqno
+ * @avr_node:      the list node in @av_records
+ * @avr_ack_seqno:  sequence number of the packet the Ack Vector was sent on
+ * @avr_ack_ackno:  the Ack number that this record/Ack Vector refers to
+ * @avr_ack_ptr:    pointer into @av_buf where this record starts
+ * @avr_ack_runlen: run length of @avr_ack_ptr at the time of sending
+ * @avr_ack_nonce:  the sum of @av_buf_nonce's at the time this record was sent
  *
- * @avr_node - node in av_records
- * @avr_ack_seqno - sequence number of the packet this record was sent on
- * @avr_ack_ackno - sequence number being acknowledged
- * @avr_ack_ptr - pointer into av_buf where this record starts
- * @avr_ack_nonce - av_ack_nonce at the time this record was sent
- * @avr_sent_len - lenght of the record in av_buf
+ * The list as a whole is sorted in descending order by @avr_ack_seqno.
  */
 struct dccp_ackvec_record {
        struct list_head avr_node;
-       u64              avr_ack_seqno;
-       u64              avr_ack_ackno;
+       u64              avr_ack_seqno:48;
+       u64              avr_ack_ackno:48;
        u16              avr_ack_ptr;
-       u16              avr_sent_len;
-       u8               avr_ack_nonce;
+       u8               avr_ack_runlen;
+       u8               avr_ack_nonce:1;
 };
 
-struct sock;
-struct sk_buff;
-
 extern int dccp_ackvec_init(void);
 extern void dccp_ackvec_exit(void);
 
 extern struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority);
 extern void dccp_ackvec_free(struct dccp_ackvec *av);
 
-extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
-                          const u64 ackno, const u8 state);
-
-extern void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av,
-                                       struct sock *sk, const u64 ackno);
-extern int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
-                            u64 *ackno, const u8 opt,
-                            const u8 *value, const u8 len);
+extern void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb);
+extern int  dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seq, u8 sum);
+extern void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno);
+extern u16  dccp_ackvec_buflen(const struct dccp_ackvec *av);
 
-extern int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb);
-
-static inline int dccp_ackvec_pending(const struct dccp_ackvec *av)
+static inline bool dccp_ackvec_is_empty(const struct dccp_ackvec *av)
 {
-       return av->av_vec_len;
+       return av->av_overflow == 0 && av->av_buf_head == av->av_buf_tail;
 }
+
+/**
+ * struct dccp_ackvec_parsed  -  Record offsets of Ack Vectors in skb
+ * @vec:       start of vector (offset into skb)
+ * @len:       length of @vec
+ * @nonce:     whether @vec had an ECN nonce of 0 or 1
+ * @node:      FIFO - arranged in descending order of ack_ackno
+ * This structure is used by CCIDs to access Ack Vectors in a received skb.
+ */
+struct dccp_ackvec_parsed {
+       u8               *vec,
+                        len,
+                        nonce:1;
+       struct list_head node;
+};
+
+extern int dccp_ackvec_parsed_add(struct list_head *head,
+                                 u8 *vec, u8 len, u8 nonce);
+extern void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks);
 #endif /* _ACKVEC_H */
index 6576eae9e7792499f962592ce66cebf8c8d34fe7..e96d5e810039a5bcdf3a0503534e7d5ebf596047 100644 (file)
@@ -246,68 +246,6 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
 #endif
 }
 
-/* XXX Lame code duplication!
- * returns -1 if none was found.
- * else returns the next offset to use in the function call.
- */
-static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset,
-                          unsigned char **vec, unsigned char *veclen)
-{
-       const struct dccp_hdr *dh = dccp_hdr(skb);
-       unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
-       unsigned char *opt_ptr;
-       const unsigned char *opt_end = (unsigned char *)dh +
-                                       (dh->dccph_doff * 4);
-       unsigned char opt, len;
-       unsigned char *value;
-
-       BUG_ON(offset < 0);
-       options += offset;
-       opt_ptr = options;
-       if (opt_ptr >= opt_end)
-               return -1;
-
-       while (opt_ptr != opt_end) {
-               opt   = *opt_ptr++;
-               len   = 0;
-               value = NULL;
-
-               /* Check if this isn't a single byte option */
-               if (opt > DCCPO_MAX_RESERVED) {
-                       if (opt_ptr == opt_end)
-                               goto out_invalid_option;
-
-                       len = *opt_ptr++;
-                       if (len < 3)
-                               goto out_invalid_option;
-                       /*
-                        * Remove the type and len fields, leaving
-                        * just the value size
-                        */
-                       len     -= 2;
-                       value   = opt_ptr;
-                       opt_ptr += len;
-
-                       if (opt_ptr > opt_end)
-                               goto out_invalid_option;
-               }
-
-               switch (opt) {
-               case DCCPO_ACK_VECTOR_0:
-               case DCCPO_ACK_VECTOR_1:
-                       *vec    = value;
-                       *veclen = len;
-                       return offset + (opt_ptr - options);
-               }
-       }
-
-       return -1;
-
-out_invalid_option:
-       DCCP_BUG("Invalid option - this should not happen (previous parsing)!");
-       return -1;
-}
-
 /**
  * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
  * This code is almost identical with TCP's tcp_rtt_estimator(), since
@@ -432,16 +370,28 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
                ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
 }
 
+static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
+                                    u8 option, u8 *optval, u8 optlen)
+{
+       struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
+
+       switch (option) {
+       case DCCPO_ACK_VECTOR_0:
+       case DCCPO_ACK_VECTOR_1:
+               return dccp_ackvec_parsed_add(&hc->tx_av_chunks, optval, optlen,
+                                             option - DCCPO_ACK_VECTOR_0);
+       }
+       return 0;
+}
+
 static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
 {
        struct dccp_sock *dp = dccp_sk(sk);
        struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
        const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
+       struct dccp_ackvec_parsed *avp;
        u64 ackno, seqno;
        struct ccid2_seq *seqp;
-       unsigned char *vector;
-       unsigned char veclen;
-       int offset = 0;
        int done = 0;
        unsigned int maxincr = 0;
 
@@ -475,17 +425,12 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
        }
 
        /* check forward path congestion */
-       /* still didn't send out new data packets */
-       if (hc->tx_seqh == hc->tx_seqt)
+       if (dccp_packet_without_ack(skb))
                return;
 
-       switch (DCCP_SKB_CB(skb)->dccpd_type) {
-       case DCCP_PKT_ACK:
-       case DCCP_PKT_DATAACK:
-               break;
-       default:
-               return;
-       }
+       /* still didn't send out new data packets */
+       if (hc->tx_seqh == hc->tx_seqt)
+               goto done;
 
        ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
        if (after48(ackno, hc->tx_high_ack))
@@ -509,16 +454,16 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
                maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
 
        /* go through all ack vectors */
-       while ((offset = ccid2_ackvector(sk, skb, offset,
-                                        &vector, &veclen)) != -1) {
+       list_for_each_entry(avp, &hc->tx_av_chunks, node) {
                /* go through this ack vector */
-               while (veclen--) {
-                       const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
-                       u64 ackno_end_rl = SUB48(ackno, rl);
+               for (; avp->len--; avp->vec++) {
+                       u64 ackno_end_rl = SUB48(ackno,
+                                                dccp_ackvec_runlen(avp->vec));
 
-                       ccid2_pr_debug("ackvec start:%llu end:%llu\n",
+                       ccid2_pr_debug("ackvec %llu |%u,%u|\n",
                                       (unsigned long long)ackno,
-                                      (unsigned long long)ackno_end_rl);
+                                      dccp_ackvec_state(avp->vec) >> 6,
+                                      dccp_ackvec_runlen(avp->vec));
                        /* if the seqno we are analyzing is larger than the
                         * current ackno, then move towards the tail of our
                         * seqnos.
@@ -537,17 +482,15 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
                         * run length
                         */
                        while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
-                               const u8 state = *vector &
-                                                DCCP_ACKVEC_STATE_MASK;
+                               const u8 state = dccp_ackvec_state(avp->vec);
 
                                /* new packet received or marked */
-                               if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED &&
+                               if (state != DCCPAV_NOT_RECEIVED &&
                                    !seqp->ccid2s_acked) {
-                                       if (state ==
-                                           DCCP_ACKVEC_STATE_ECN_MARKED) {
+                                       if (state == DCCPAV_ECN_MARKED)
                                                ccid2_congestion_event(sk,
                                                                       seqp);
-                                       else
+                                       else
                                                ccid2_new_ack(sk, seqp,
                                                              &maxincr);
 
@@ -566,7 +509,6 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
                                break;
 
                        ackno = SUB48(ackno_end_rl, 1);
-                       vector++;
                }
                if (done)
                        break;
@@ -634,10 +576,11 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
                sk_stop_timer(sk, &hc->tx_rtotimer);
        else
                sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
-
+done:
        /* check if incoming Acks allow pending packets to be sent */
        if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
                tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+       dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
 }
 
 static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
@@ -666,6 +609,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
        hc->tx_last_cong = ccid2_time_stamp;
        setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
                        (unsigned long)sk);
+       INIT_LIST_HEAD(&hc->tx_av_chunks);
        return 0;
 }
 
@@ -699,16 +643,17 @@ static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
 }
 
 struct ccid_operations ccid2_ops = {
-       .ccid_id                = DCCPC_CCID2,
-       .ccid_name              = "TCP-like",
-       .ccid_hc_tx_obj_size    = sizeof(struct ccid2_hc_tx_sock),
-       .ccid_hc_tx_init        = ccid2_hc_tx_init,
-       .ccid_hc_tx_exit        = ccid2_hc_tx_exit,
-       .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet,
-       .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent,
-       .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv,
-       .ccid_hc_rx_obj_size    = sizeof(struct ccid2_hc_rx_sock),
-       .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv,
+       .ccid_id                  = DCCPC_CCID2,
+       .ccid_name                = "TCP-like",
+       .ccid_hc_tx_obj_size      = sizeof(struct ccid2_hc_tx_sock),
+       .ccid_hc_tx_init          = ccid2_hc_tx_init,
+       .ccid_hc_tx_exit          = ccid2_hc_tx_exit,
+       .ccid_hc_tx_send_packet   = ccid2_hc_tx_send_packet,
+       .ccid_hc_tx_packet_sent   = ccid2_hc_tx_packet_sent,
+       .ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options,
+       .ccid_hc_tx_packet_recv   = ccid2_hc_tx_packet_recv,
+       .ccid_hc_rx_obj_size      = sizeof(struct ccid2_hc_rx_sock),
+       .ccid_hc_rx_packet_recv   = ccid2_hc_rx_packet_recv,
 };
 
 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
index 25cb6b216eda52e3e3d51163405471418438f40d..e9985dafc2c7003a036adafd5b4ad7e0eb599187 100644 (file)
@@ -55,6 +55,7 @@ struct ccid2_seq {
  * @tx_rtt_seq:                     to decay RTTVAR at most once per flight
  * @tx_rpseq:               last consecutive seqno
  * @tx_rpdupack:            dupacks since rpseq
+ * @tx_av_chunks:           list of Ack Vectors received on current skb
  */
 struct ccid2_hc_tx_sock {
        u32                     tx_cwnd;
@@ -79,6 +80,7 @@ struct ccid2_hc_tx_sock {
        int                     tx_rpdupack;
        u32                     tx_last_cong;
        u64                     tx_high_ack;
+       struct list_head        tx_av_chunks;
 };
 
 static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc)
index a8ed459508b294feb774c1597e788fa38313e995..19fafd597465fac2ac7e40ff77b862a91884ed6b 100644 (file)
@@ -457,12 +457,15 @@ static inline void dccp_update_gss(struct sock *sk, u64 seq)
        dp->dccps_awh = dp->dccps_gss;
 }
 
+static inline int dccp_ackvec_pending(const struct sock *sk)
+{
+       return dccp_sk(sk)->dccps_hc_rx_ackvec != NULL &&
+              !dccp_ackvec_is_empty(dccp_sk(sk)->dccps_hc_rx_ackvec);
+}
+
 static inline int dccp_ack_pending(const struct sock *sk)
 {
-       const struct dccp_sock *dp = dccp_sk(sk);
-       return (dp->dccps_hc_rx_ackvec != NULL &&
-               dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) ||
-              inet_csk_ack_scheduled(sk);
+       return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
 }
 
 extern int  dccp_feat_finalise_settings(struct dccp_sock *dp);
index 265985370fa1de664b1b7f3de427cb043fc74665..7d230d14ce22307b80cd4cf53e41e6563fab7063 100644 (file)
@@ -160,13 +160,15 @@ static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
        dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
 }
 
-static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
+static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb)
 {
-       struct dccp_sock *dp = dccp_sk(sk);
+       struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec;
 
-       if (dp->dccps_hc_rx_ackvec != NULL)
-               dccp_ackvec_check_rcv_ackno(dp->dccps_hc_rx_ackvec, sk,
-                                           DCCP_SKB_CB(skb)->dccpd_ack_seq);
+       if (av == NULL)
+               return;
+       if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
+               dccp_ackvec_clear_state(av, DCCP_SKB_CB(skb)->dccpd_ack_seq);
+       dccp_ackvec_input(av, skb);
 }
 
 static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
@@ -365,22 +367,13 @@ discard:
 int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
                         const struct dccp_hdr *dh, const unsigned len)
 {
-       struct dccp_sock *dp = dccp_sk(sk);
-
        if (dccp_check_seqno(sk, skb))
                goto discard;
 
        if (dccp_parse_options(sk, NULL, skb))
                return 1;
 
-       if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
-               dccp_event_ack_recv(sk, skb);
-
-       if (dp->dccps_hc_rx_ackvec != NULL &&
-           dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
-                           DCCP_SKB_CB(skb)->dccpd_seq,
-                           DCCP_ACKVEC_STATE_RECEIVED))
-               goto discard;
+       dccp_handle_ackvec_processing(sk, skb);
        dccp_deliver_input_to_ccids(sk, skb);
 
        return __dccp_rcv_established(sk, skb, dh, len);
@@ -632,15 +625,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                if (dccp_parse_options(sk, NULL, skb))
                        return 1;
 
-               if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
-                       dccp_event_ack_recv(sk, skb);
-
-               if (dp->dccps_hc_rx_ackvec != NULL &&
-                   dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
-                                   DCCP_SKB_CB(skb)->dccpd_seq,
-                                   DCCP_ACKVEC_STATE_RECEIVED))
-                       goto discard;
-
+               dccp_handle_ackvec_processing(sk, skb);
                dccp_deliver_input_to_ccids(sk, skb);
        }
 
index 3f69ea1148291ce2e5ad4956f4c27931d8db7e8f..45a434f94169f13daf1addc73cdb809d77fae213 100644 (file)
@@ -462,15 +462,12 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
 {
        struct rtable *rt;
        struct flowi fl = { .oif = skb_rtable(skb)->rt_iif,
-                           .nl_u = { .ip4_u =
-                                     { .daddr = ip_hdr(skb)->saddr,
-                                       .saddr = ip_hdr(skb)->daddr,
-                                       .tos = RT_CONN_FLAGS(sk) } },
+                           .fl4_dst = ip_hdr(skb)->saddr,
+                           .fl4_src = ip_hdr(skb)->daddr,
+                           .fl4_tos = RT_CONN_FLAGS(sk),
                            .proto = sk->sk_protocol,
-                           .uli_u = { .ports =
-                                      { .sport = dccp_hdr(skb)->dccph_dport,
-                                        .dport = dccp_hdr(skb)->dccph_sport }
-                                    }
+                           .fl_ip_sport = dccp_hdr(skb)->dccph_dport,
+                           .fl_ip_dport = dccp_hdr(skb)->dccph_sport
                          };
 
        security_skb_classify_flow(skb, &fl);
index cd306181300940f924b682c9f77e5881323ce6a0..f06ffcfc8d712421040c71a56513b90dcfca96c7 100644 (file)
@@ -54,7 +54,6 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
        struct dccp_sock *dp = dccp_sk(sk);
        const struct dccp_hdr *dh = dccp_hdr(skb);
        const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type;
-       u64 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
        unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
        unsigned char *opt_ptr = options;
        const unsigned char *opt_end = (unsigned char *)dh +
@@ -129,14 +128,6 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
                        if (rc)
                                goto out_featneg_failed;
                        break;
-               case DCCPO_ACK_VECTOR_0:
-               case DCCPO_ACK_VECTOR_1:
-                       if (dccp_packet_without_ack(skb))   /* RFC 4340, 11.4 */
-                               break;
-                       if (dp->dccps_hc_rx_ackvec != NULL &&
-                           dccp_ackvec_parse(sk, skb, &ackno, opt, value, len))
-                               goto out_invalid_option;
-                       break;
                case DCCPO_TIMESTAMP:
                        if (len != 4)
                                goto out_invalid_option;
@@ -226,6 +217,16 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
                                                     pkt_type, opt, value, len))
                                goto out_invalid_option;
                        break;
+               case DCCPO_ACK_VECTOR_0:
+               case DCCPO_ACK_VECTOR_1:
+                       if (dccp_packet_without_ack(skb))   /* RFC 4340, 11.4 */
+                               break;
+                       /*
+                        * Ack vectors are processed by the TX CCID if it is
+                        * interested. The RX CCID need not parse Ack Vectors,
+                        * since it is only interested in clearing old state.
+                        * Fall through.
+                        */
                case DCCPO_MIN_TX_CCID_SPECIFIC ... DCCPO_MAX_TX_CCID_SPECIFIC:
                        if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk,
                                                     pkt_type, opt, value, len))
@@ -340,6 +341,7 @@ static inline int dccp_elapsed_time_len(const u32 elapsed_time)
        return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4;
 }
 
+/* FIXME: This function is currently not used anywhere */
 int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed_time)
 {
        const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
@@ -424,6 +426,83 @@ static int dccp_insert_option_timestamp_echo(struct dccp_sock *dp,
        return 0;
 }
 
+static int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
+{
+       struct dccp_sock *dp = dccp_sk(sk);
+       struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
+       struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
+       const u16 buflen = dccp_ackvec_buflen(av);
+       /* Figure out how many options do we need to represent the ackvec */
+       const u8 nr_opts = DIV_ROUND_UP(buflen, DCCP_SINGLE_OPT_MAXLEN);
+       u16 len = buflen + 2 * nr_opts;
+       u8 i, nonce = 0;
+       const unsigned char *tail, *from;
+       unsigned char *to;
+
+       if (dcb->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) {
+               DCCP_WARN("Lacking space for %u bytes on %s packet\n", len,
+                         dccp_packet_name(dcb->dccpd_type));
+               return -1;
+       }
+       /*
+        * Since Ack Vectors are variable-length, we can not always predict
+        * their size. To catch exception cases where the space is running out
+        * on the skb, a separate Sync is scheduled to carry the Ack Vector.
+        */
+       if (len > DCCPAV_MIN_OPTLEN &&
+           len + dcb->dccpd_opt_len + skb->len > dp->dccps_mss_cache) {
+               DCCP_WARN("No space left for Ack Vector (%u) on skb (%u+%u), "
+                         "MPS=%u ==> reduce payload size?\n", len, skb->len,
+                         dcb->dccpd_opt_len, dp->dccps_mss_cache);
+               dp->dccps_sync_scheduled = 1;
+               return 0;
+       }
+       dcb->dccpd_opt_len += len;
+
+       to   = skb_push(skb, len);
+       len  = buflen;
+       from = av->av_buf + av->av_buf_head;
+       tail = av->av_buf + DCCPAV_MAX_ACKVEC_LEN;
+
+       for (i = 0; i < nr_opts; ++i) {
+               int copylen = len;
+
+               if (len > DCCP_SINGLE_OPT_MAXLEN)
+                       copylen = DCCP_SINGLE_OPT_MAXLEN;
+
+               /*
+                * RFC 4340, 12.2: Encode the Nonce Echo for this Ack Vector via
+                * its type; ack_nonce is the sum of all individual buf_nonce's.
+                */
+               nonce ^= av->av_buf_nonce[i];
+
+               *to++ = DCCPO_ACK_VECTOR_0 + av->av_buf_nonce[i];
+               *to++ = copylen + 2;
+
+               /* Check if buf_head wraps */
+               if (from + copylen > tail) {
+                       const u16 tailsize = tail - from;
+
+                       memcpy(to, from, tailsize);
+                       to      += tailsize;
+                       len     -= tailsize;
+                       copylen -= tailsize;
+                       from    = av->av_buf;
+               }
+
+               memcpy(to, from, copylen);
+               from += copylen;
+               to   += copylen;
+               len  -= copylen;
+       }
+       /*
+        * Each sent Ack Vector is recorded in the list, as per A.2 of RFC 4340.
+        */
+       if (dccp_ackvec_update_records(av, dcb->dccpd_seq, nonce))
+               return -ENOBUFS;
+       return 0;
+}
+
 /**
  * dccp_insert_option_mandatory  -  Mandatory option (5.8.2)
  * Note that since we are using skb_push, this function needs to be called
@@ -519,8 +598,7 @@ int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
                        if (dccp_insert_option_timestamp(skb))
                                return -1;
 
-               } else if (dp->dccps_hc_rx_ackvec != NULL &&
-                          dccp_ackvec_pending(dp->dccps_hc_rx_ackvec) &&
+               } else if (dccp_ackvec_pending(sk) &&
                           dccp_insert_option_ackvec(sk, skb)) {
                                return -1;
                }
index 45b91853f5aee3d452d5795da832b9e1f04651ed..d96dd9d362ae1162e7e82e1f22fc7338904b9a1c 100644 (file)
@@ -283,6 +283,15 @@ static void dccp_xmit_packet(struct sock *sk)
         * any local drop will eventually be reported via receiver feedback.
         */
        ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
+
+       /*
+        * If the CCID needs to transfer additional header options out-of-band
+        * (e.g. Ack Vectors or feature-negotiation options), it activates this
+        * flag to schedule a Sync. The Sync will automatically incorporate all
+        * currently pending header options, thus clearing the backlog.
+        */
+       if (dp->dccps_sync_scheduled)
+               dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
 }
 
 /**
@@ -636,6 +645,12 @@ void dccp_send_sync(struct sock *sk, const u64 ackno,
        DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
        DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
 
+       /*
+        * Clear the flag in case the Sync was scheduled for out-of-band data,
+        * such as carrying a long Ack Vector.
+        */
+       dccp_sk(sk)->dccps_sync_scheduled = 0;
+
        dccp_transmit_skb(sk, skb);
 }
 
index a76b78de679fa7e928cfae7b62c0a7e73c2256dd..9ecef9968c3940026deefba772fa568af7237bcc 100644 (file)
@@ -1848,7 +1848,7 @@ unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
 {
        unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
        if (dev) {
-               struct dn_dev *dn_db = dev->dn_ptr;
+               struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
                mtu -= LL_RESERVED_SPACE(dev);
                if (dn_db->use_long)
                        mtu -= 21;
index 4c409b46aa35c0bb13bc0b99cec4f43dd2da0690..0ba15633c4184484fb46e99c50fdb0608a325046 100644 (file)
@@ -267,7 +267,7 @@ static int dn_forwarding_proc(ctl_table *table, int write,
        if (table->extra1 == NULL)
                return -EINVAL;
 
-       dn_db = dev->dn_ptr;
+       dn_db = rcu_dereference_raw(dev->dn_ptr);
        old = dn_db->parms.forwarding;
 
        err = proc_dointvec(table, write, buffer, lenp, ppos);
@@ -332,14 +332,19 @@ static struct dn_ifaddr *dn_dev_alloc_ifa(void)
        return ifa;
 }
 
-static __inline__ void dn_dev_free_ifa(struct dn_ifaddr *ifa)
+static void dn_dev_free_ifa_rcu(struct rcu_head *head)
 {
-       kfree(ifa);
+       kfree(container_of(head, struct dn_ifaddr, rcu));
 }
 
-static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int destroy)
+static void dn_dev_free_ifa(struct dn_ifaddr *ifa)
 {
-       struct dn_ifaddr *ifa1 = *ifap;
+       call_rcu(&ifa->rcu, dn_dev_free_ifa_rcu);
+}
+
+static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy)
+{
+       struct dn_ifaddr *ifa1 = rtnl_dereference(*ifap);
        unsigned char mac_addr[6];
        struct net_device *dev = dn_db->dev;
 
@@ -373,7 +378,9 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
        ASSERT_RTNL();
 
        /* Check for duplicates */
-       for(ifa1 = dn_db->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
+       for (ifa1 = rtnl_dereference(dn_db->ifa_list);
+            ifa1 != NULL;
+            ifa1 = rtnl_dereference(ifa1->ifa_next)) {
                if (ifa1->ifa_local == ifa->ifa_local)
                        return -EEXIST;
        }
@@ -386,7 +393,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
        }
 
        ifa->ifa_next = dn_db->ifa_list;
-       dn_db->ifa_list = ifa;
+       rcu_assign_pointer(dn_db->ifa_list, ifa);
 
        dn_ifaddr_notify(RTM_NEWADDR, ifa);
        blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
@@ -396,7 +403,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
 
 static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa)
 {
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
        int rv;
 
        if (dn_db == NULL) {
@@ -425,7 +432,8 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
        struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr;
        struct dn_dev *dn_db;
        struct net_device *dev;
-       struct dn_ifaddr *ifa = NULL, **ifap = NULL;
+       struct dn_ifaddr *ifa = NULL;
+       struct dn_ifaddr __rcu **ifap = NULL;
        int ret = 0;
 
        if (copy_from_user(ifr, arg, DN_IFREQ_SIZE))
@@ -454,8 +462,10 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
                goto done;
        }
 
-       if ((dn_db = dev->dn_ptr) != NULL) {
-               for (ifap = &dn_db->ifa_list; (ifa=*ifap) != NULL; ifap = &ifa->ifa_next)
+       if ((dn_db = rtnl_dereference(dev->dn_ptr)) != NULL) {
+               for (ifap = &dn_db->ifa_list;
+                    (ifa = rtnl_dereference(*ifap)) != NULL;
+                    ifap = &ifa->ifa_next)
                        if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0)
                                break;
        }
@@ -558,7 +568,7 @@ static struct dn_dev *dn_dev_by_index(int ifindex)
 
        dev = __dev_get_by_index(&init_net, ifindex);
        if (dev)
-               dn_dev = dev->dn_ptr;
+               dn_dev = rtnl_dereference(dev->dn_ptr);
 
        return dn_dev;
 }
@@ -576,7 +586,8 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
        struct nlattr *tb[IFA_MAX+1];
        struct dn_dev *dn_db;
        struct ifaddrmsg *ifm;
-       struct dn_ifaddr *ifa, **ifap;
+       struct dn_ifaddr *ifa;
+       struct dn_ifaddr __rcu **ifap;
        int err = -EINVAL;
 
        if (!net_eq(net, &init_net))
@@ -592,7 +603,9 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                goto errout;
 
        err = -EADDRNOTAVAIL;
-       for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) {
+       for (ifap = &dn_db->ifa_list;
+            (ifa = rtnl_dereference(*ifap)) != NULL;
+            ifap = &ifa->ifa_next) {
                if (tb[IFA_LOCAL] &&
                    nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
                        continue;
@@ -632,7 +645,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
        if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL)
                return -ENODEV;
 
-       if ((dn_db = dev->dn_ptr) == NULL) {
+       if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) {
                dn_db = dn_dev_create(dev, &err);
                if (!dn_db)
                        return err;
@@ -748,11 +761,11 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
                        skip_naddr = 0;
                }
 
-               if ((dn_db = dev->dn_ptr) == NULL)
+               if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL)
                        goto cont;
 
-               for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
-                    ifa = ifa->ifa_next, dn_idx++) {
+               for (ifa = rtnl_dereference(dn_db->ifa_list), dn_idx = 0; ifa;
+                    ifa = rtnl_dereference(ifa->ifa_next), dn_idx++) {
                        if (dn_idx < skip_naddr)
                                continue;
 
@@ -773,21 +786,22 @@ done:
 
 static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
 {
-       struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
+       struct dn_dev *dn_db;
        struct dn_ifaddr *ifa;
        int rv = -ENODEV;
 
+       rcu_read_lock();
+       dn_db = rcu_dereference(dev->dn_ptr);
        if (dn_db == NULL)
                goto out;
 
-       rtnl_lock();
-       ifa = dn_db->ifa_list;
+       ifa = rcu_dereference(dn_db->ifa_list);
        if (ifa != NULL) {
                *addr = ifa->ifa_local;
                rv = 0;
        }
-       rtnl_unlock();
 out:
+       rcu_read_unlock();
        return rv;
 }
 
@@ -823,7 +837,7 @@ static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
        struct endnode_hello_message *msg;
        struct sk_buff *skb = NULL;
        __le16 *pktlen;
-       struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
+       struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
        if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
                return;
@@ -889,7 +903,7 @@ static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn
 static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
 {
        int n;
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
        struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
        struct sk_buff *skb;
        size_t size;
@@ -960,7 +974,7 @@ static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
 
 static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa)
 {
-       struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
+       struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
        if (dn_db->parms.forwarding == 0)
                dn_send_endnode_hello(dev, ifa);
@@ -998,7 +1012,7 @@ static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa)
 
 static int dn_eth_up(struct net_device *dev)
 {
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
        if (dn_db->parms.forwarding == 0)
                dev_mc_add(dev, dn_rt_all_end_mcast);
@@ -1012,7 +1026,7 @@ static int dn_eth_up(struct net_device *dev)
 
 static void dn_eth_down(struct net_device *dev)
 {
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
        if (dn_db->parms.forwarding == 0)
                dev_mc_del(dev, dn_rt_all_end_mcast);
@@ -1025,12 +1039,16 @@ static void dn_dev_set_timer(struct net_device *dev);
 static void dn_dev_timer_func(unsigned long arg)
 {
        struct net_device *dev = (struct net_device *)arg;
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db;
        struct dn_ifaddr *ifa;
 
+       rcu_read_lock();
+       dn_db = rcu_dereference(dev->dn_ptr);
        if (dn_db->t3 <= dn_db->parms.t2) {
                if (dn_db->parms.timer3) {
-                       for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) {
+                       for (ifa = rcu_dereference(dn_db->ifa_list);
+                            ifa;
+                            ifa = rcu_dereference(ifa->ifa_next)) {
                                if (!(ifa->ifa_flags & IFA_F_SECONDARY))
                                        dn_db->parms.timer3(dev, ifa);
                        }
@@ -1039,13 +1057,13 @@ static void dn_dev_timer_func(unsigned long arg)
        } else {
                dn_db->t3 -= dn_db->parms.t2;
        }
-
+       rcu_read_unlock();
        dn_dev_set_timer(dev);
 }
 
 static void dn_dev_set_timer(struct net_device *dev)
 {
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
        if (dn_db->parms.t2 > dn_db->parms.t3)
                dn_db->parms.t2 = dn_db->parms.t3;
@@ -1077,8 +1095,8 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
                return NULL;
 
        memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
-       smp_wmb();
-       dev->dn_ptr = dn_db;
+
+       rcu_assign_pointer(dev->dn_ptr, dn_db);
        dn_db->dev = dev;
        init_timer(&dn_db->timer);
 
@@ -1086,7 +1104,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
 
        dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
        if (!dn_db->neigh_parms) {
-               dev->dn_ptr = NULL;
+               rcu_assign_pointer(dev->dn_ptr, NULL);
                kfree(dn_db);
                return NULL;
        }
@@ -1125,7 +1143,7 @@ void dn_dev_up(struct net_device *dev)
        struct dn_ifaddr *ifa;
        __le16 addr = decnet_address;
        int maybe_default = 0;
-       struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
+       struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
 
        if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
                return;
@@ -1176,7 +1194,7 @@ void dn_dev_up(struct net_device *dev)
 
 static void dn_dev_delete(struct net_device *dev)
 {
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
 
        if (dn_db == NULL)
                return;
@@ -1204,13 +1222,13 @@ static void dn_dev_delete(struct net_device *dev)
 
 void dn_dev_down(struct net_device *dev)
 {
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
        struct dn_ifaddr *ifa;
 
        if (dn_db == NULL)
                return;
 
-       while((ifa = dn_db->ifa_list) != NULL) {
+       while ((ifa = rtnl_dereference(dn_db->ifa_list)) != NULL) {
                dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0);
                dn_dev_free_ifa(ifa);
        }
@@ -1270,7 +1288,7 @@ static inline int is_dn_dev(struct net_device *dev)
 }
 
 static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(rcu)
+       __acquires(RCU)
 {
        int i;
        struct net_device *dev;
@@ -1313,7 +1331,7 @@ static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 }
 
 static void dn_dev_seq_stop(struct seq_file *seq, void *v)
-       __releases(rcu)
+       __releases(RCU)
 {
        rcu_read_unlock();
 }
@@ -1340,7 +1358,7 @@ static int dn_dev_seq_show(struct seq_file *seq, void *v)
                struct net_device *dev = v;
                char peer_buf[DN_ASCBUF_LEN];
                char router_buf[DN_ASCBUF_LEN];
-               struct dn_dev *dn_db = dev->dn_ptr;
+               struct dn_dev *dn_db = rcu_dereference(dev->dn_ptr);
 
                seq_printf(seq, "%-8s %1s     %04u %04u   %04lu %04lu"
                                "   %04hu    %03d %02x    %-10s %-7s %-7s\n",
index 4ab96c15166d4930dd49772e41734f3eca3968f5..0ef0a81bcd72b24afaedebd506c255de3452dad0 100644 (file)
@@ -610,10 +610,12 @@ static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa)
        /* Scan device list */
        rcu_read_lock();
        for_each_netdev_rcu(&init_net, dev) {
-               dn_db = dev->dn_ptr;
+               dn_db = rcu_dereference(dev->dn_ptr);
                if (dn_db == NULL)
                        continue;
-               for(ifa2 = dn_db->ifa_list; ifa2; ifa2 = ifa2->ifa_next) {
+               for (ifa2 = rcu_dereference(dn_db->ifa_list);
+                    ifa2 != NULL;
+                    ifa2 = rcu_dereference(ifa2->ifa_next)) {
                        if (ifa2->ifa_local == ifa->ifa_local) {
                                found_it = 1;
                                break;
index a085dbcf5c7fa4fde69419dd135c8c8570bb4fb2..602dade7e9a3576905ae6f1d1dc927df7c8f8b63 100644 (file)
@@ -391,7 +391,7 @@ int dn_neigh_router_hello(struct sk_buff *skb)
                write_lock(&neigh->lock);
 
                neigh->used = jiffies;
-               dn_db = (struct dn_dev *)neigh->dev->dn_ptr;
+               dn_db = rcu_dereference(neigh->dev->dn_ptr);
 
                if (!(neigh->nud_state & NUD_PERMANENT)) {
                        neigh->updated = jiffies;
index df0f3e54ff8aba58dac157ab2b2c866453437310..8280e43c88610460664199061488f5186de692cd 100644 (file)
@@ -93,7 +93,7 @@
 
 struct dn_rt_hash_bucket
 {
-       struct dn_route *chain;
+       struct dn_route __rcu *chain;
        spinlock_t lock;
 };
 
@@ -157,15 +157,17 @@ static inline void dnrt_drop(struct dn_route *rt)
 static void dn_dst_check_expire(unsigned long dummy)
 {
        int i;
-       struct dn_route *rt, **rtp;
+       struct dn_route *rt;
+       struct dn_route __rcu **rtp;
        unsigned long now = jiffies;
        unsigned long expire = 120 * HZ;
 
-       for(i = 0; i <= dn_rt_hash_mask; i++) {
+       for (i = 0; i <= dn_rt_hash_mask; i++) {
                rtp = &dn_rt_hash_table[i].chain;
 
                spin_lock(&dn_rt_hash_table[i].lock);
-               while((rt=*rtp) != NULL) {
+               while ((rt = rcu_dereference_protected(*rtp,
+                                               lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
                        if (atomic_read(&rt->dst.__refcnt) ||
                                        (now - rt->dst.lastuse) < expire) {
                                rtp = &rt->dst.dn_next;
@@ -186,17 +188,19 @@ static void dn_dst_check_expire(unsigned long dummy)
 
 static int dn_dst_gc(struct dst_ops *ops)
 {
-       struct dn_route *rt, **rtp;
+       struct dn_route *rt;
+       struct dn_route __rcu **rtp;
        int i;
        unsigned long now = jiffies;
        unsigned long expire = 10 * HZ;
 
-       for(i = 0; i <= dn_rt_hash_mask; i++) {
+       for (i = 0; i <= dn_rt_hash_mask; i++) {
 
                spin_lock_bh(&dn_rt_hash_table[i].lock);
                rtp = &dn_rt_hash_table[i].chain;
 
-               while((rt=*rtp) != NULL) {
+               while ((rt = rcu_dereference_protected(*rtp,
+                                               lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
                        if (atomic_read(&rt->dst.__refcnt) ||
                                        (now - rt->dst.lastuse) < expire) {
                                rtp = &rt->dst.dn_next;
@@ -227,7 +231,7 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
        u32 min_mtu = 230;
        struct dn_dev *dn = dst->neighbour ?
-                           (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL;
+                           rcu_dereference_raw(dst->neighbour->dev->dn_ptr) : NULL;
 
        if (dn && dn->use_long == 0)
                min_mtu -= 6;
@@ -267,23 +271,25 @@ static void dn_dst_link_failure(struct sk_buff *skb)
 
 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
 {
-       return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) |
-               (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) |
+       return ((fl1->fld_dst ^ fl2->fld_dst) |
+               (fl1->fld_src ^ fl2->fld_src) |
                (fl1->mark ^ fl2->mark) |
-               (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) |
+               (fl1->fld_scope ^ fl2->fld_scope) |
                (fl1->oif ^ fl2->oif) |
                (fl1->iif ^ fl2->iif)) == 0;
 }
 
 static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
 {
-       struct dn_route *rth, **rthp;
+       struct dn_route *rth;
+       struct dn_route __rcu **rthp;
        unsigned long now = jiffies;
 
        rthp = &dn_rt_hash_table[hash].chain;
 
        spin_lock_bh(&dn_rt_hash_table[hash].lock);
-       while((rth = *rthp) != NULL) {
+       while ((rth = rcu_dereference_protected(*rthp,
+                                               lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
                if (compare_keys(&rth->fl, &rt->fl)) {
                        /* Put it first */
                        *rthp = rth->dst.dn_next;
@@ -315,15 +321,15 @@ static void dn_run_flush(unsigned long dummy)
        int i;
        struct dn_route *rt, *next;
 
-       for(i = 0; i < dn_rt_hash_mask; i++) {
+       for (i = 0; i < dn_rt_hash_mask; i++) {
                spin_lock_bh(&dn_rt_hash_table[i].lock);
 
-               if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL)
+               if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL)
                        goto nothing_to_declare;
 
-               for(; rt; rt=next) {
-                       next = rt->dst.dn_next;
-                       rt->dst.dn_next = NULL;
+               for(; rt; rt = next) {
+                       next = rcu_dereference_raw(rt->dst.dn_next);
+                       RCU_INIT_POINTER(rt->dst.dn_next, NULL);
                        dst_free((struct dst_entry *)rt);
                }
 
@@ -458,15 +464,16 @@ static int dn_return_long(struct sk_buff *skb)
  */
 static int dn_route_rx_packet(struct sk_buff *skb)
 {
-       struct dn_skb_cb *cb = DN_SKB_CB(skb);
+       struct dn_skb_cb *cb;
        int err;
 
        if ((err = dn_route_input(skb)) == 0)
                return dst_input(skb);
 
+       cb = DN_SKB_CB(skb);
        if (decnet_debug_level & 4) {
                char *devname = skb->dev ? skb->dev->name : "???";
-               struct dn_skb_cb *cb = DN_SKB_CB(skb);
+
                printk(KERN_DEBUG
                        "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
                        (int)cb->rt_flags, devname, skb->len,
@@ -573,7 +580,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
        struct dn_skb_cb *cb;
        unsigned char flags = 0;
        __u16 len = le16_to_cpu(*(__le16 *)skb->data);
-       struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr;
+       struct dn_dev *dn = rcu_dereference(dev->dn_ptr);
        unsigned char padlen = 0;
 
        if (!net_eq(dev_net(dev), &init_net))
@@ -728,7 +735,7 @@ static int dn_forward(struct sk_buff *skb)
 {
        struct dn_skb_cb *cb = DN_SKB_CB(skb);
        struct dst_entry *dst = skb_dst(skb);
-       struct dn_dev *dn_db = dst->dev->dn_ptr;
+       struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
        struct dn_route *rt;
        struct neighbour *neigh = dst->neighbour;
        int header_len;
@@ -835,13 +842,16 @@ static inline int dn_match_addr(__le16 addr1, __le16 addr2)
 static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope)
 {
        __le16 saddr = 0;
-       struct dn_dev *dn_db = dev->dn_ptr;
+       struct dn_dev *dn_db;
        struct dn_ifaddr *ifa;
        int best_match = 0;
        int ret;
 
-       read_lock(&dev_base_lock);
-       for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) {
+       rcu_read_lock();
+       dn_db = rcu_dereference(dev->dn_ptr);
+       for (ifa = rcu_dereference(dn_db->ifa_list);
+            ifa != NULL;
+            ifa = rcu_dereference(ifa->ifa_next)) {
                if (ifa->ifa_scope > scope)
                        continue;
                if (!daddr) {
@@ -854,7 +864,7 @@ static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int
                if (best_match == 0)
                        saddr = ifa->ifa_local;
        }
-       read_unlock(&dev_base_lock);
+       rcu_read_unlock();
 
        return saddr;
 }
@@ -872,11 +882,9 @@ static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_re
 
 static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard)
 {
-       struct flowi fl = { .nl_u = { .dn_u =
-                                     { .daddr = oldflp->fld_dst,
-                                       .saddr = oldflp->fld_src,
-                                       .scope = RT_SCOPE_UNIVERSE,
-                                    } },
+       struct flowi fl = { .fld_dst = oldflp->fld_dst,
+                           .fld_src = oldflp->fld_src,
+                           .fld_scope = RT_SCOPE_UNIVERSE,
                            .mark = oldflp->mark,
                            .iif = init_net.loopback_dev->ifindex,
                            .oif = oldflp->oif };
@@ -1020,7 +1028,7 @@ source_ok:
                err = -ENODEV;
                if (dev_out == NULL)
                        goto out;
-               dn_db = dev_out->dn_ptr;
+               dn_db = rcu_dereference_raw(dev_out->dn_ptr);
                /* Possible improvement - check all devices for local addr */
                if (dn_dev_islocal(dev_out, fl.fld_dst)) {
                        dev_put(dev_out);
@@ -1171,7 +1179,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
                        if ((flp->fld_dst == rt->fl.fld_dst) &&
                            (flp->fld_src == rt->fl.fld_src) &&
                            (flp->mark == rt->fl.mark) &&
-                           (rt->fl.iif == 0) &&
+                           dn_is_output_route(rt) &&
                            (rt->fl.oif == flp->oif)) {
                                dst_use(&rt->dst, jiffies);
                                rcu_read_unlock_bh();
@@ -1220,11 +1228,9 @@ static int dn_route_input_slow(struct sk_buff *skb)
        int flags = 0;
        __le16 gateway = 0;
        __le16 local_src = 0;
-       struct flowi fl = { .nl_u = { .dn_u =
-                                    { .daddr = cb->dst,
-                                      .saddr = cb->src,
-                                      .scope = RT_SCOPE_UNIVERSE,
-                                   } },
+       struct flowi fl = { .fld_dst = cb->dst,
+                           .fld_src = cb->src,
+                           .fld_scope = RT_SCOPE_UNIVERSE,
                            .mark = skb->mark,
                            .iif = skb->dev->ifindex };
        struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
@@ -1233,7 +1239,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
 
        dev_hold(in_dev);
 
-       if ((dn_db = in_dev->dn_ptr) == NULL)
+       if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL)
                goto out;
 
        /* Zero source addresses are not allowed */
@@ -1502,7 +1508,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
        if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires,
                               rt->dst.error) < 0)
                goto rtattr_failure;
-       if (rt->fl.iif)
+       if (dn_is_input_route(rt))
                RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
 
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
@@ -1677,15 +1683,15 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
 {
        struct dn_rt_cache_iter_state *s = seq->private;
 
-       rt = rt->dst.dn_next;
-       while(!rt) {
+       rt = rcu_dereference_bh(rt->dst.dn_next);
+       while (!rt) {
                rcu_read_unlock_bh();
                if (--s->bucket < 0)
                        break;
                rcu_read_lock_bh();
-               rt = dn_rt_hash_table[s->bucket].chain;
+               rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
        }
-       return rcu_dereference_bh(rt);
+       return rt;
 }
 
 static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
index 48fdf10be7a1634a78114cd3a4e7d05b0902f585..6eb91df3c5504f218cfb84a525845724ddd06e3f 100644 (file)
@@ -175,7 +175,7 @@ static int dn_fib_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
 
 unsigned dnet_addr_type(__le16 addr)
 {
-       struct flowi fl = { .nl_u = { .dn_u = { .daddr = addr } } };
+       struct flowi fl = { .fld_dst = addr };
        struct dn_fib_res res;
        unsigned ret = RTN_UNICAST;
        struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0);
index f581f77d1097ca140c74e93072113d88ae0cc9bc..f2b61107df6cd8066d7e3568b1625de309966522 100644 (file)
@@ -1148,21 +1148,13 @@ int inet_sk_rebuild_header(struct sock *sk)
        struct flowi fl = {
                .oif = sk->sk_bound_dev_if,
                .mark = sk->sk_mark,
-               .nl_u = {
-                       .ip4_u = {
-                               .daddr  = daddr,
-                               .saddr  = inet->inet_saddr,
-                               .tos    = RT_CONN_FLAGS(sk),
-                       },
-               },
+               .fl4_dst = daddr,
+               .fl4_src = inet->inet_saddr,
+               .fl4_tos = RT_CONN_FLAGS(sk),
                .proto = sk->sk_protocol,
                .flags = inet_sk_flowi_flags(sk),
-               .uli_u = {
-                       .ports = {
-                               .sport = inet->inet_sport,
-                               .dport = inet->inet_dport,
-                       },
-               },
+               .fl_ip_sport = inet->inet_sport,
+               .fl_ip_dport = inet->inet_dport,
        };
 
        security_sk_classify_flow(sk, &fl);
index d8e540c5b0710327fd44c8f269eb51e6fcfedfc5..7833f17b648a149b8d21e134b2eee0c6d619a97c 100644 (file)
@@ -433,8 +433,8 @@ static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
 
 static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
 {
-       struct flowi fl = { .nl_u = { .ip4_u = { .daddr = sip,
-                                                .saddr = tip } } };
+       struct flowi fl = { .fl4_dst = sip,
+                           .fl4_src = tip };
        struct rtable *rt;
        int flag = 0;
        /*unsigned long now; */
@@ -1061,8 +1061,8 @@ static int arp_req_set(struct net *net, struct arpreq *r,
        if (r->arp_flags & ATF_PERM)
                r->arp_flags |= ATF_COM;
        if (dev == NULL) {
-               struct flowi fl = { .nl_u.ip4_u = { .daddr = ip,
-                                                   .tos = RTO_ONLINK } };
+               struct flowi fl = { .fl4_dst = ip,
+                                   .fl4_tos = RTO_ONLINK };
                struct rtable *rt;
                err = ip_route_output_key(net, &rt, &fl);
                if (err != 0)
@@ -1169,8 +1169,8 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
 
        ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
        if (dev == NULL) {
-               struct flowi fl = { .nl_u.ip4_u = { .daddr = ip,
-                                                   .tos = RTO_ONLINK } };
+               struct flowi fl = { .fl4_dst = ip,
+                                   .fl4_tos = RTO_ONLINK };
                struct rtable *rt;
                err = ip_route_output_key(net, &rt, &fl);
                if (err != 0)
index dc94b0316b783fd1c1985e407fb0339782c66df5..71afc26c2df87996ce605d721f84cf3da47aaed2 100644 (file)
@@ -1256,6 +1256,72 @@ errout:
                rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
 }
 
+static size_t inet_get_link_af_size(const struct net_device *dev)
+{
+       struct in_device *in_dev = __in_dev_get_rcu(dev);
+
+       if (!in_dev)
+               return 0;
+
+       return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
+}
+
+static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
+{
+       struct in_device *in_dev = __in_dev_get_rcu(dev);
+       struct nlattr *nla;
+       int i;
+
+       if (!in_dev)
+               return -ENODATA;
+
+       nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
+       if (nla == NULL)
+               return -EMSGSIZE;
+
+       for (i = 0; i < IPV4_DEVCONF_MAX; i++)
+               ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
+
+       return 0;
+}
+
+static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
+       [IFLA_INET_CONF]        = { .type = NLA_NESTED },
+};
+
+static int inet_parse_link_af(struct net_device *dev, const struct nlattr *nla)
+{
+       struct in_device *in_dev = __in_dev_get_rcu(dev);
+       struct nlattr *a, *tb[IFLA_INET_MAX+1];
+       int err, rem;
+
+       if (!in_dev)
+               return -EOPNOTSUPP;
+
+       err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
+       if (err < 0)
+               return err;
+
+       if (tb[IFLA_INET_CONF]) {
+               nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
+                       int cfgid = nla_type(a);
+
+                       if (nla_len(a) < 4)
+                               return -EINVAL;
+
+                       if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
+                               return -EINVAL;
+               }
+       }
+
+       if (tb[IFLA_INET_CONF]) {
+               nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
+                       ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
+       }
+
+       return 0;
+}
+
 #ifdef CONFIG_SYSCTL
 
 static void devinet_copy_dflt_conf(struct net *net, int i)
@@ -1619,6 +1685,13 @@ static __net_initdata struct pernet_operations devinet_ops = {
        .exit = devinet_exit_net,
 };
 
+static struct rtnl_af_ops inet_af_ops = {
+       .family           = AF_INET,
+       .fill_link_af     = inet_fill_link_af,
+       .get_link_af_size = inet_get_link_af_size,
+       .parse_link_af    = inet_parse_link_af,
+};
+
 void __init devinet_init(void)
 {
        register_pernet_subsys(&devinet_ops);
@@ -1626,6 +1699,8 @@ void __init devinet_init(void)
        register_gifconf(PF_INET, inet_gifconf);
        register_netdevice_notifier(&ip_netdev_notifier);
 
+       rtnl_af_register(&inet_af_ops);
+
        rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL);
        rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL);
        rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr);
index eb6f69a8f27aff4db2de494389dd7d9584b93ece..d3a1112b9d9c4f12f488d1e00b1b54ebd84263df 100644 (file)
@@ -158,11 +158,7 @@ static void fib_flush(struct net *net)
 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
 {
        struct flowi fl = {
-               .nl_u = {
-                       .ip4_u = {
-                               .daddr = addr
-                       }
-               },
+               .fl4_dst = addr,
                .flags = FLOWI_FLAG_MATCH_ANY_IIF
        };
        struct fib_result res = { 0 };
@@ -193,7 +189,7 @@ static inline unsigned __inet_dev_addr_type(struct net *net,
                                            const struct net_device *dev,
                                            __be32 addr)
 {
-       struct flowi            fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
+       struct flowi            fl = { .fl4_dst = addr };
        struct fib_result       res;
        unsigned ret = RTN_BROADCAST;
        struct fib_table *local_table;
@@ -247,13 +243,9 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
 {
        struct in_device *in_dev;
        struct flowi fl = {
-               .nl_u = {
-                       .ip4_u = {
-                               .daddr = src,
-                               .saddr = dst,
-                               .tos = tos
-                       }
-               },
+               .fl4_dst = src,
+               .fl4_src = dst,
+               .fl4_tos = tos,
                .mark = mark,
                .iif = oif
        };
@@ -853,13 +845,9 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
        struct fib_result       res;
        struct flowi            fl = {
                .mark = frn->fl_mark,
-               .nl_u = {
-                       .ip4_u = {
-                               .daddr = frn->fl_addr,
-                               .tos = frn->fl_tos,
-                               .scope = frn->fl_scope
-                       }
-               }
+               .fl4_dst = frn->fl_addr,
+               .fl4_tos = frn->fl_tos,
+               .fl4_scope = frn->fl_scope,
        };
 
 #ifdef CONFIG_IP_MULTIPLE_TABLES
index 3e0da3ef6116df8b9b4fa6a70272ddeac8cac0d4..12d3dc3df1b7d683e94cbf42140a9ac40b35029d 100644 (file)
@@ -563,12 +563,8 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
                rcu_read_lock();
                {
                        struct flowi fl = {
-                               .nl_u = {
-                                       .ip4_u = {
-                                               .daddr = nh->nh_gw,
-                                               .scope = cfg->fc_scope + 1,
-                                       },
-                               },
+                               .fl4_dst = nh->nh_gw,
+                               .fl4_scope = cfg->fc_scope + 1,
                                .oif = nh->nh_oif,
                        };
 
index e5d1a44bcbdf04b851d441ea5ebd8fbe44514704..4aa1b7f01ea0c18a5718061fe31eee71f4de883f 100644 (file)
@@ -386,10 +386,9 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
                        daddr = icmp_param->replyopts.faddr;
        }
        {
-               struct flowi fl = { .nl_u = { .ip4_u =
-                                             { .daddr = daddr,
-                                               .saddr = rt->rt_spec_dst,
-                                               .tos = RT_TOS(ip_hdr(skb)->tos) } },
+               struct flowi fl = { .fl4_dst= daddr,
+                                   .fl4_src = rt->rt_spec_dst,
+                                   .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
                                    .proto = IPPROTO_ICMP };
                security_skb_classify_flow(skb, &fl);
                if (ip_route_output_key(net, &rt, &fl))
@@ -506,8 +505,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
                struct net_device *dev = NULL;
 
                rcu_read_lock();
-               if (rt->fl.iif &&
-                       net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
+               if (rt_is_input_route(rt) &&
+                   net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
                        dev = dev_get_by_index_rcu(net, rt->fl.iif);
 
                if (dev)
@@ -542,22 +541,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
 
        {
                struct flowi fl = {
-                       .nl_u = {
-                               .ip4_u = {
-                                       .daddr = icmp_param.replyopts.srr ?
-                                               icmp_param.replyopts.faddr :
-                                               iph->saddr,
-                                       .saddr = saddr,
-                                       .tos = RT_TOS(tos)
-                               }
-                       },
+                       .fl4_dst = icmp_param.replyopts.srr ?
+                                  icmp_param.replyopts.faddr : iph->saddr,
+                       .fl4_src = saddr,
+                       .fl4_tos = RT_TOS(tos),
                        .proto = IPPROTO_ICMP,
-                       .uli_u = {
-                               .icmpt = {
-                                       .type = type,
-                                       .code = code
-                               }
-                       }
+                       .fl_icmp_type = type,
+                       .fl_icmp_code = code,
                };
                int err;
                struct rtable *rt2;
index 3c53c2d89e3b47b3e42629bfe37086aeeeda7aa8..e0e77e297de32148356da4c309856dacabee2387 100644 (file)
@@ -149,21 +149,37 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc);
 static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
                         int sfcount, __be32 *psfsrc, int delta);
 
+
+static void ip_mc_list_reclaim(struct rcu_head *head)
+{
+       kfree(container_of(head, struct ip_mc_list, rcu));
+}
+
 static void ip_ma_put(struct ip_mc_list *im)
 {
        if (atomic_dec_and_test(&im->refcnt)) {
                in_dev_put(im->interface);
-               kfree(im);
+               call_rcu(&im->rcu, ip_mc_list_reclaim);
        }
 }
 
+#define for_each_pmc_rcu(in_dev, pmc)                          \
+       for (pmc = rcu_dereference(in_dev->mc_list);            \
+            pmc != NULL;                                       \
+            pmc = rcu_dereference(pmc->next_rcu))
+
+#define for_each_pmc_rtnl(in_dev, pmc)                         \
+       for (pmc = rtnl_dereference(in_dev->mc_list);           \
+            pmc != NULL;                                       \
+            pmc = rtnl_dereference(pmc->next_rcu))
+
 #ifdef CONFIG_IP_MULTICAST
 
 /*
  *     Timer management
  */
 
-static __inline__ void igmp_stop_timer(struct ip_mc_list *im)
+static void igmp_stop_timer(struct ip_mc_list *im)
 {
        spin_lock_bh(&im->lock);
        if (del_timer(&im->timer))
@@ -284,6 +300,8 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
        return scount;
 }
 
+#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb))
+
 static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
 {
        struct sk_buff *skb;
@@ -292,14 +310,20 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
        struct igmpv3_report *pig;
        struct net *net = dev_net(dev);
 
-       skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
-       if (skb == NULL)
-               return NULL;
+       while (1) {
+               skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev),
+                               GFP_ATOMIC | __GFP_NOWARN);
+               if (skb)
+                       break;
+               size >>= 1;
+               if (size < 256)
+                       return NULL;
+       }
+       igmp_skb_size(skb) = size;
 
        {
                struct flowi fl = { .oif = dev->ifindex,
-                                   .nl_u = { .ip4_u = {
-                                   .daddr = IGMPV3_ALL_MCR } },
+                                   .fl4_dst = IGMPV3_ALL_MCR,
                                    .proto = IPPROTO_IGMP };
                if (ip_route_output_key(net, &rt, &fl)) {
                        kfree_skb(skb);
@@ -384,7 +408,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
        return skb;
 }
 
-#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \
+#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \
        skb_tailroom(skb)) : 0)
 
 static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
@@ -502,8 +526,8 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
        int type;
 
        if (!pmc) {
-               read_lock(&in_dev->mc_list_lock);
-               for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+               rcu_read_lock();
+               for_each_pmc_rcu(in_dev, pmc) {
                        if (pmc->multiaddr == IGMP_ALL_HOSTS)
                                continue;
                        spin_lock_bh(&pmc->lock);
@@ -514,7 +538,7 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
                        skb = add_grec(skb, pmc, type, 0, 0);
                        spin_unlock_bh(&pmc->lock);
                }
-               read_unlock(&in_dev->mc_list_lock);
+               rcu_read_unlock();
        } else {
                spin_lock_bh(&pmc->lock);
                if (pmc->sfcount[MCAST_EXCLUDE])
@@ -556,7 +580,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
        struct sk_buff *skb = NULL;
        int type, dtype;
 
-       read_lock(&in_dev->mc_list_lock);
+       rcu_read_lock();
        spin_lock_bh(&in_dev->mc_tomb_lock);
 
        /* deleted MCA's */
@@ -593,7 +617,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
        spin_unlock_bh(&in_dev->mc_tomb_lock);
 
        /* change recs */
-       for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+       for_each_pmc_rcu(in_dev, pmc) {
                spin_lock_bh(&pmc->lock);
                if (pmc->sfcount[MCAST_EXCLUDE]) {
                        type = IGMPV3_BLOCK_OLD_SOURCES;
@@ -616,7 +640,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
                }
                spin_unlock_bh(&pmc->lock);
        }
-       read_unlock(&in_dev->mc_list_lock);
+       rcu_read_unlock();
 
        if (!skb)
                return;
@@ -644,7 +668,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
 
        {
                struct flowi fl = { .oif = dev->ifindex,
-                                   .nl_u = { .ip4_u = { .daddr = dst } },
+                                   .fl4_dst = dst,
                                    .proto = IPPROTO_IGMP };
                if (ip_route_output_key(net, &rt, &fl))
                        return -1;
@@ -813,14 +837,14 @@ static void igmp_heard_report(struct in_device *in_dev, __be32 group)
        if (group == IGMP_ALL_HOSTS)
                return;
 
-       read_lock(&in_dev->mc_list_lock);
-       for (im=in_dev->mc_list; im!=NULL; im=im->next) {
+       rcu_read_lock();
+       for_each_pmc_rcu(in_dev, im) {
                if (im->multiaddr == group) {
                        igmp_stop_timer(im);
                        break;
                }
        }
-       read_unlock(&in_dev->mc_list_lock);
+       rcu_read_unlock();
 }
 
 static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
@@ -906,8 +930,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
         * - Use the igmp->igmp_code field as the maximum
         *   delay possible
         */
-       read_lock(&in_dev->mc_list_lock);
-       for (im=in_dev->mc_list; im!=NULL; im=im->next) {
+       rcu_read_lock();
+       for_each_pmc_rcu(in_dev, im) {
                int changed;
 
                if (group && group != im->multiaddr)
@@ -925,7 +949,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                if (changed)
                        igmp_mod_timer(im, max_delay);
        }
-       read_unlock(&in_dev->mc_list_lock);
+       rcu_read_unlock();
 }
 
 /* called in rcu_read_lock() section */
@@ -961,7 +985,7 @@ int igmp_rcv(struct sk_buff *skb)
        case IGMP_HOST_MEMBERSHIP_REPORT:
        case IGMPV2_HOST_MEMBERSHIP_REPORT:
                /* Is it our report looped back? */
-               if (skb_rtable(skb)->fl.iif == 0)
+               if (rt_is_output_route(skb_rtable(skb)))
                        break;
                /* don't rely on MC router hearing unicast reports */
                if (skb->pkt_type == PACKET_MULTICAST ||
@@ -1110,8 +1134,8 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
                kfree(pmc);
        }
        /* clear dead sources, too */
-       read_lock(&in_dev->mc_list_lock);
-       for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+       rcu_read_lock();
+       for_each_pmc_rcu(in_dev, pmc) {
                struct ip_sf_list *psf, *psf_next;
 
                spin_lock_bh(&pmc->lock);
@@ -1123,7 +1147,7 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
                        kfree(psf);
                }
        }
-       read_unlock(&in_dev->mc_list_lock);
+       rcu_read_unlock();
 }
 #endif
 
@@ -1209,7 +1233,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
 
        ASSERT_RTNL();
 
-       for (im=in_dev->mc_list; im; im=im->next) {
+       for_each_pmc_rtnl(in_dev, im) {
                if (im->multiaddr == addr) {
                        im->users++;
                        ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
@@ -1217,7 +1241,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
                }
        }
 
-       im = kmalloc(sizeof(*im), GFP_KERNEL);
+       im = kzalloc(sizeof(*im), GFP_KERNEL);
        if (!im)
                goto out;
 
@@ -1227,26 +1251,18 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
        im->multiaddr = addr;
        /* initial mode is (EX, empty) */
        im->sfmode = MCAST_EXCLUDE;
-       im->sfcount[MCAST_INCLUDE] = 0;
        im->sfcount[MCAST_EXCLUDE] = 1;
-       im->sources = NULL;
-       im->tomb = NULL;
-       im->crcount = 0;
        atomic_set(&im->refcnt, 1);
        spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
-       im->tm_running = 0;
        setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
        im->unsolicit_count = IGMP_Unsolicited_Report_Count;
-       im->reporter = 0;
-       im->gsquery = 0;
 #endif
-       im->loaded = 0;
-       write_lock_bh(&in_dev->mc_list_lock);
-       im->next = in_dev->mc_list;
-       in_dev->mc_list = im;
+
+       im->next_rcu = in_dev->mc_list;
        in_dev->mc_count++;
-       write_unlock_bh(&in_dev->mc_list_lock);
+       rcu_assign_pointer(in_dev->mc_list, im);
+
 #ifdef CONFIG_IP_MULTICAST
        igmpv3_del_delrec(in_dev, im->multiaddr);
 #endif
@@ -1260,26 +1276,32 @@ EXPORT_SYMBOL(ip_mc_inc_group);
 
 /*
  *     Resend IGMP JOIN report; used for bonding.
+ *     Called with rcu_read_lock()
  */
-void ip_mc_rejoin_group(struct ip_mc_list *im)
+void ip_mc_rejoin_groups(struct in_device *in_dev)
 {
 #ifdef CONFIG_IP_MULTICAST
-       struct in_device *in_dev = im->interface;
+       struct ip_mc_list *im;
+       int type;
 
-       if (im->multiaddr == IGMP_ALL_HOSTS)
-               return;
+       for_each_pmc_rcu(in_dev, im) {
+               if (im->multiaddr == IGMP_ALL_HOSTS)
+                       continue;
 
-       /* a failover is happening and switches
-        * must be notified immediately */
-       if (IGMP_V1_SEEN(in_dev))
-               igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
-       else if (IGMP_V2_SEEN(in_dev))
-               igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
-       else
-               igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
+               /* a failover is happening and switches
+                * must be notified immediately
+                */
+               if (IGMP_V1_SEEN(in_dev))
+                       type = IGMP_HOST_MEMBERSHIP_REPORT;
+               else if (IGMP_V2_SEEN(in_dev))
+                       type = IGMPV2_HOST_MEMBERSHIP_REPORT;
+               else
+                       type = IGMPV3_HOST_MEMBERSHIP_REPORT;
+               igmp_send_report(in_dev, im, type);
+       }
 #endif
 }
-EXPORT_SYMBOL(ip_mc_rejoin_group);
+EXPORT_SYMBOL(ip_mc_rejoin_groups);
 
 /*
  *     A socket has left a multicast group on device dev
@@ -1287,17 +1309,18 @@ EXPORT_SYMBOL(ip_mc_rejoin_group);
 
 void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
 {
-       struct ip_mc_list *i, **ip;
+       struct ip_mc_list *i;
+       struct ip_mc_list __rcu **ip;
 
        ASSERT_RTNL();
 
-       for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
+       for (ip = &in_dev->mc_list;
+            (i = rtnl_dereference(*ip)) != NULL;
+            ip = &i->next_rcu) {
                if (i->multiaddr == addr) {
                        if (--i->users == 0) {
-                               write_lock_bh(&in_dev->mc_list_lock);
-                               *ip = i->next;
+                               *ip = i->next_rcu;
                                in_dev->mc_count--;
-                               write_unlock_bh(&in_dev->mc_list_lock);
                                igmp_group_dropped(i);
 
                                if (!in_dev->dead)
@@ -1316,34 +1339,34 @@ EXPORT_SYMBOL(ip_mc_dec_group);
 
 void ip_mc_unmap(struct in_device *in_dev)
 {
-       struct ip_mc_list *i;
+       struct ip_mc_list *pmc;
 
        ASSERT_RTNL();
 
-       for (i = in_dev->mc_list; i; i = i->next)
-               igmp_group_dropped(i);
+       for_each_pmc_rtnl(in_dev, pmc)
+               igmp_group_dropped(pmc);
 }
 
 void ip_mc_remap(struct in_device *in_dev)
 {
-       struct ip_mc_list *i;
+       struct ip_mc_list *pmc;
 
        ASSERT_RTNL();
 
-       for (i = in_dev->mc_list; i; i = i->next)
-               igmp_group_added(i);
+       for_each_pmc_rtnl(in_dev, pmc)
+               igmp_group_added(pmc);
 }
 
 /* Device going down */
 
 void ip_mc_down(struct in_device *in_dev)
 {
-       struct ip_mc_list *i;
+       struct ip_mc_list *pmc;
 
        ASSERT_RTNL();
 
-       for (i=in_dev->mc_list; i; i=i->next)
-               igmp_group_dropped(i);
+       for_each_pmc_rtnl(in_dev, pmc)
+               igmp_group_dropped(pmc);
 
 #ifdef CONFIG_IP_MULTICAST
        in_dev->mr_ifc_count = 0;
@@ -1374,7 +1397,6 @@ void ip_mc_init_dev(struct in_device *in_dev)
        in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
 #endif
 
-       rwlock_init(&in_dev->mc_list_lock);
        spin_lock_init(&in_dev->mc_tomb_lock);
 }
 
@@ -1382,14 +1404,14 @@ void ip_mc_init_dev(struct in_device *in_dev)
 
 void ip_mc_up(struct in_device *in_dev)
 {
-       struct ip_mc_list *i;
+       struct ip_mc_list *pmc;
 
        ASSERT_RTNL();
 
        ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
 
-       for (i=in_dev->mc_list; i; i=i->next)
-               igmp_group_added(i);
+       for_each_pmc_rtnl(in_dev, pmc)
+               igmp_group_added(pmc);
 }
 
 /*
@@ -1405,24 +1427,19 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
        /* Deactivate timers */
        ip_mc_down(in_dev);
 
-       write_lock_bh(&in_dev->mc_list_lock);
-       while ((i = in_dev->mc_list) != NULL) {
-               in_dev->mc_list = i->next;
+       while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
+               in_dev->mc_list = i->next_rcu;
                in_dev->mc_count--;
-               write_unlock_bh(&in_dev->mc_list_lock);
+
                igmp_group_dropped(i);
                ip_ma_put(i);
-
-               write_lock_bh(&in_dev->mc_list_lock);
        }
-       write_unlock_bh(&in_dev->mc_list_lock);
 }
 
 /* RTNL is locked */
 static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
 {
-       struct flowi fl = { .nl_u = { .ip4_u =
-                                     { .daddr = imr->imr_multiaddr.s_addr } } };
+       struct flowi fl = { .fl4_dst = imr->imr_multiaddr.s_addr };
        struct rtable *rt;
        struct net_device *dev = NULL;
        struct in_device *idev = NULL;
@@ -1513,18 +1530,18 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
 
        if (!in_dev)
                return -ENODEV;
-       read_lock(&in_dev->mc_list_lock);
-       for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+       rcu_read_lock();
+       for_each_pmc_rcu(in_dev, pmc) {
                if (*pmca == pmc->multiaddr)
                        break;
        }
        if (!pmc) {
                /* MCA not found?? bug */
-               read_unlock(&in_dev->mc_list_lock);
+               rcu_read_unlock();
                return -ESRCH;
        }
        spin_lock_bh(&pmc->lock);
-       read_unlock(&in_dev->mc_list_lock);
+       rcu_read_unlock();
 #ifdef CONFIG_IP_MULTICAST
        sf_markstate(pmc);
 #endif
@@ -1685,18 +1702,18 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
 
        if (!in_dev)
                return -ENODEV;
-       read_lock(&in_dev->mc_list_lock);
-       for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+       rcu_read_lock();
+       for_each_pmc_rcu(in_dev, pmc) {
                if (*pmca == pmc->multiaddr)
                        break;
        }
        if (!pmc) {
                /* MCA not found?? bug */
-               read_unlock(&in_dev->mc_list_lock);
+               rcu_read_unlock();
                return -ESRCH;
        }
        spin_lock_bh(&pmc->lock);
-       read_unlock(&in_dev->mc_list_lock);
+       rcu_read_unlock();
 
 #ifdef CONFIG_IP_MULTICAST
        sf_markstate(pmc);
@@ -1793,7 +1810,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
 
        err = -EADDRINUSE;
        ifindex = imr->imr_ifindex;
-       for (i = inet->mc_list; i; i = i->next) {
+       for_each_pmc_rtnl(inet, i) {
                if (i->multi.imr_multiaddr.s_addr == addr &&
                    i->multi.imr_ifindex == ifindex)
                        goto done;
@@ -1807,7 +1824,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
                goto done;
 
        memcpy(&iml->multi, imr, sizeof(*imr));
-       iml->next = inet->mc_list;
+       iml->next_rcu = inet->mc_list;
        iml->sflist = NULL;
        iml->sfmode = MCAST_EXCLUDE;
        rcu_assign_pointer(inet->mc_list, iml);
@@ -1821,17 +1838,14 @@ EXPORT_SYMBOL(ip_mc_join_group);
 
 static void ip_sf_socklist_reclaim(struct rcu_head *rp)
 {
-       struct ip_sf_socklist *psf;
-
-       psf = container_of(rp, struct ip_sf_socklist, rcu);
+       kfree(container_of(rp, struct ip_sf_socklist, rcu));
        /* sk_omem_alloc should have been decreased by the caller*/
-       kfree(psf);
 }
 
 static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
                           struct in_device *in_dev)
 {
-       struct ip_sf_socklist *psf = iml->sflist;
+       struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
        int err;
 
        if (psf == NULL) {
@@ -1851,11 +1865,8 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
 
 static void ip_mc_socklist_reclaim(struct rcu_head *rp)
 {
-       struct ip_mc_socklist *iml;
-
-       iml = container_of(rp, struct ip_mc_socklist, rcu);
+       kfree(container_of(rp, struct ip_mc_socklist, rcu));
        /* sk_omem_alloc should have been decreased by the caller*/
-       kfree(iml);
 }
 
 
@@ -1866,7 +1877,8 @@ static void ip_mc_socklist_reclaim(struct rcu_head *rp)
 int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 {
        struct inet_sock *inet = inet_sk(sk);
-       struct ip_mc_socklist *iml, **imlp;
+       struct ip_mc_socklist *iml;
+       struct ip_mc_socklist __rcu **imlp;
        struct in_device *in_dev;
        struct net *net = sock_net(sk);
        __be32 group = imr->imr_multiaddr.s_addr;
@@ -1876,7 +1888,9 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
        rtnl_lock();
        in_dev = ip_mc_find_dev(net, imr);
        ifindex = imr->imr_ifindex;
-       for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
+       for (imlp = &inet->mc_list;
+            (iml = rtnl_dereference(*imlp)) != NULL;
+            imlp = &iml->next_rcu) {
                if (iml->multi.imr_multiaddr.s_addr != group)
                        continue;
                if (ifindex) {
@@ -1888,7 +1902,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 
                (void) ip_mc_leave_src(sk, iml, in_dev);
 
-               rcu_assign_pointer(*imlp, iml->next);
+               *imlp = iml->next_rcu;
 
                if (in_dev)
                        ip_mc_dec_group(in_dev, group);
@@ -1934,7 +1948,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
        }
        err = -EADDRNOTAVAIL;
 
-       for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+       for_each_pmc_rtnl(inet, pmc) {
                if ((pmc->multi.imr_multiaddr.s_addr ==
                     imr.imr_multiaddr.s_addr) &&
                    (pmc->multi.imr_ifindex == imr.imr_ifindex))
@@ -1958,7 +1972,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
                pmc->sfmode = omode;
        }
 
-       psl = pmc->sflist;
+       psl = rtnl_dereference(pmc->sflist);
        if (!add) {
                if (!psl)
                        goto done;      /* err = -EADDRNOTAVAIL */
@@ -2077,7 +2091,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
                goto done;
        }
 
-       for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+       for_each_pmc_rtnl(inet, pmc) {
                if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
                    pmc->multi.imr_ifindex == imr.imr_ifindex)
                        break;
@@ -2107,7 +2121,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
                (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
                                     msf->imsf_fmode, 0, NULL, 0);
        }
-       psl = pmc->sflist;
+       psl = rtnl_dereference(pmc->sflist);
        if (psl) {
                (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
                        psl->sl_count, psl->sl_addr, 0);
@@ -2155,7 +2169,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
        }
        err = -EADDRNOTAVAIL;
 
-       for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+       for_each_pmc_rtnl(inet, pmc) {
                if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
                    pmc->multi.imr_ifindex == imr.imr_ifindex)
                        break;
@@ -2163,7 +2177,7 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
        if (!pmc)               /* must have a prior join */
                goto done;
        msf->imsf_fmode = pmc->sfmode;
-       psl = pmc->sflist;
+       psl = rtnl_dereference(pmc->sflist);
        rtnl_unlock();
        if (!psl) {
                len = 0;
@@ -2208,7 +2222,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
 
        err = -EADDRNOTAVAIL;
 
-       for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+       for_each_pmc_rtnl(inet, pmc) {
                if (pmc->multi.imr_multiaddr.s_addr == addr &&
                    pmc->multi.imr_ifindex == gsf->gf_interface)
                        break;
@@ -2216,7 +2230,7 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
        if (!pmc)               /* must have a prior join */
                goto done;
        gsf->gf_fmode = pmc->sfmode;
-       psl = pmc->sflist;
+       psl = rtnl_dereference(pmc->sflist);
        rtnl_unlock();
        count = psl ? psl->sl_count : 0;
        copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
@@ -2257,7 +2271,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
                goto out;
 
        rcu_read_lock();
-       for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) {
+       for_each_pmc_rcu(inet, pmc) {
                if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
                    pmc->multi.imr_ifindex == dif)
                        break;
@@ -2265,7 +2279,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
        ret = inet->mc_all;
        if (!pmc)
                goto unlock;
-       psl = pmc->sflist;
+       psl = rcu_dereference(pmc->sflist);
        ret = (pmc->sfmode == MCAST_EXCLUDE);
        if (!psl)
                goto unlock;
@@ -2300,10 +2314,10 @@ void ip_mc_drop_socket(struct sock *sk)
                return;
 
        rtnl_lock();
-       while ((iml = inet->mc_list) != NULL) {
+       while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
                struct in_device *in_dev;
-               rcu_assign_pointer(inet->mc_list, iml->next);
 
+               inet->mc_list = iml->next_rcu;
                in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
                (void) ip_mc_leave_src(sk, iml, in_dev);
                if (in_dev != NULL)
@@ -2321,8 +2335,8 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p
        struct ip_sf_list *psf;
        int rv = 0;
 
-       read_lock(&in_dev->mc_list_lock);
-       for (im=in_dev->mc_list; im; im=im->next) {
+       rcu_read_lock();
+       for_each_pmc_rcu(in_dev, im) {
                if (im->multiaddr == mc_addr)
                        break;
        }
@@ -2343,7 +2357,7 @@ int ip_check_mc(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 p
                } else
                        rv = 1; /* unspecified source; tentatively allow */
        }
-       read_unlock(&in_dev->mc_list_lock);
+       rcu_read_unlock();
        return rv;
 }
 
@@ -2369,13 +2383,11 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
                in_dev = __in_dev_get_rcu(state->dev);
                if (!in_dev)
                        continue;
-               read_lock(&in_dev->mc_list_lock);
-               im = in_dev->mc_list;
+               im = rcu_dereference(in_dev->mc_list);
                if (im) {
                        state->in_dev = in_dev;
                        break;
                }
-               read_unlock(&in_dev->mc_list_lock);
        }
        return im;
 }
@@ -2383,11 +2395,9 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
 static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
 {
        struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
-       im = im->next;
-       while (!im) {
-               if (likely(state->in_dev != NULL))
-                       read_unlock(&state->in_dev->mc_list_lock);
 
+       im = rcu_dereference(im->next_rcu);
+       while (!im) {
                state->dev = next_net_device_rcu(state->dev);
                if (!state->dev) {
                        state->in_dev = NULL;
@@ -2396,8 +2406,7 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li
                state->in_dev = __in_dev_get_rcu(state->dev);
                if (!state->in_dev)
                        continue;
-               read_lock(&state->in_dev->mc_list_lock);
-               im = state->in_dev->mc_list;
+               im = rcu_dereference(state->in_dev->mc_list);
        }
        return im;
 }
@@ -2433,10 +2442,8 @@ static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
        __releases(rcu)
 {
        struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
-       if (likely(state->in_dev != NULL)) {
-               read_unlock(&state->in_dev->mc_list_lock);
-               state->in_dev = NULL;
-       }
+
+       state->in_dev = NULL;
        state->dev = NULL;
        rcu_read_unlock();
 }
@@ -2458,7 +2465,7 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
                querier = "NONE";
 #endif
 
-               if (state->in_dev->mc_list == im) {
+               if (rcu_dereference(state->in_dev->mc_list) == im) {
                        seq_printf(seq, "%d\t%-10s: %5d %7s\n",
                                   state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
                }
@@ -2517,8 +2524,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
                idev = __in_dev_get_rcu(state->dev);
                if (unlikely(idev == NULL))
                        continue;
-               read_lock(&idev->mc_list_lock);
-               im = idev->mc_list;
+               im = rcu_dereference(idev->mc_list);
                if (likely(im != NULL)) {
                        spin_lock_bh(&im->lock);
                        psf = im->sources;
@@ -2529,7 +2535,6 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
                        }
                        spin_unlock_bh(&im->lock);
                }
-               read_unlock(&idev->mc_list_lock);
        }
        return psf;
 }
@@ -2543,9 +2548,6 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
                spin_unlock_bh(&state->im->lock);
                state->im = state->im->next;
                while (!state->im) {
-                       if (likely(state->idev != NULL))
-                               read_unlock(&state->idev->mc_list_lock);
-
                        state->dev = next_net_device_rcu(state->dev);
                        if (!state->dev) {
                                state->idev = NULL;
@@ -2554,8 +2556,7 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
                        state->idev = __in_dev_get_rcu(state->dev);
                        if (!state->idev)
                                continue;
-                       read_lock(&state->idev->mc_list_lock);
-                       state->im = state->idev->mc_list;
+                       state->im = rcu_dereference(state->idev->mc_list);
                }
                if (!state->im)
                        break;
@@ -2601,10 +2602,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
                spin_unlock_bh(&state->im->lock);
                state->im = NULL;
        }
-       if (likely(state->idev != NULL)) {
-               read_unlock(&state->idev->mc_list_lock);
-               state->idev = NULL;
-       }
+       state->idev = NULL;
        state->dev = NULL;
        rcu_read_unlock();
 }
index 7174370b1195b8f79c738c5b78f4156421bb50a1..06f5f8f482f0e092d86571b75846a60ea865d612 100644 (file)
@@ -358,17 +358,14 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
        struct ip_options *opt = inet_rsk(req)->opt;
        struct flowi fl = { .oif = sk->sk_bound_dev_if,
                            .mark = sk->sk_mark,
-                           .nl_u = { .ip4_u =
-                                     { .daddr = ((opt && opt->srr) ?
-                                                 opt->faddr :
-                                                 ireq->rmt_addr),
-                                       .saddr = ireq->loc_addr,
-                                       .tos = RT_CONN_FLAGS(sk) } },
+                           .fl4_dst = ((opt && opt->srr) ?
+                                         opt->faddr : ireq->rmt_addr),
+                           .fl4_src = ireq->loc_addr,
+                           .fl4_tos = RT_CONN_FLAGS(sk),
                            .proto = sk->sk_protocol,
                            .flags = inet_sk_flowi_flags(sk),
-                           .uli_u = { .ports =
-                                      { .sport = inet_sk(sk)->inet_sport,
-                                        .dport = ireq->rmt_port } } };
+                           .fl_ip_sport = inet_sk(sk)->inet_sport,
+                           .fl_ip_dport = ireq->rmt_port };
        struct net *net = sock_net(sk);
 
        security_req_classify_flow(req, &fl);
index 70ff77f02eee3b345fa94efd35563a95b2b582ef..897210adaa772f5cbb9721d80583c07ff7e4312b 100644 (file)
@@ -634,7 +634,7 @@ static int ipgre_rcv(struct sk_buff *skb)
 #ifdef CONFIG_NET_IPGRE_BROADCAST
                if (ipv4_is_multicast(iph->daddr)) {
                        /* Looped back packet, drop it! */
-                       if (skb_rtable(skb)->fl.iif == 0)
+                       if (rt_is_output_route(skb_rtable(skb)))
                                goto drop;
                        tunnel->dev->stats.multicast++;
                        skb->pkt_type = PACKET_BROADCAST;
@@ -772,16 +772,11 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        {
                struct flowi fl = {
                        .oif = tunnel->parms.link,
-                       .nl_u = {
-                               .ip4_u = {
-                                       .daddr = dst,
-                                       .saddr = tiph->saddr,
-                                       .tos = RT_TOS(tos)
-                               }
-                       },
-                       .proto = IPPROTO_GRE
-               }
-;
+                       .fl4_dst = dst,
+                       .fl4_src = tiph->saddr,
+                       .fl4_tos = RT_TOS(tos),
+                       .fl_gre_key = tunnel->parms.o_key
+               };
                if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
                        dev->stats.tx_carrier_errors++;
                        goto tx_error;
@@ -951,14 +946,11 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
        if (iph->daddr) {
                struct flowi fl = {
                        .oif = tunnel->parms.link,
-                       .nl_u = {
-                               .ip4_u = {
-                                       .daddr = iph->daddr,
-                                       .saddr = iph->saddr,
-                                       .tos = RT_TOS(iph->tos)
-                               }
-                       },
-                       .proto = IPPROTO_GRE
+                       .fl4_dst = iph->daddr,
+                       .fl4_src = iph->saddr,
+                       .fl4_tos = RT_TOS(iph->tos),
+                       .proto = IPPROTO_GRE,
+                       .fl_gre_key = tunnel->parms.o_key
                };
                struct rtable *rt;
 
@@ -1216,14 +1208,11 @@ static int ipgre_open(struct net_device *dev)
        if (ipv4_is_multicast(t->parms.iph.daddr)) {
                struct flowi fl = {
                        .oif = t->parms.link,
-                       .nl_u = {
-                               .ip4_u = {
-                                       .daddr = t->parms.iph.daddr,
-                                       .saddr = t->parms.iph.saddr,
-                                       .tos = RT_TOS(t->parms.iph.tos)
-                               }
-                       },
-                       .proto = IPPROTO_GRE
+                       .fl4_dst = t->parms.iph.daddr,
+                       .fl4_src = t->parms.iph.saddr,
+                       .fl4_tos = RT_TOS(t->parms.iph.tos),
+                       .proto = IPPROTO_GRE,
+                       .fl_gre_key = t->parms.o_key
                };
                struct rtable *rt;
 
index 439d2a34ee4411b932eefb3a6fc51383e8db7125..5090c7ff525e7d202c08d2e727f9dc1d815121c8 100644 (file)
@@ -341,15 +341,13 @@ int ip_queue_xmit(struct sk_buff *skb)
                {
                        struct flowi fl = { .oif = sk->sk_bound_dev_if,
                                            .mark = sk->sk_mark,
-                                           .nl_u = { .ip4_u =
-                                                     { .daddr = daddr,
-                                                       .saddr = inet->inet_saddr,
-                                                       .tos = RT_CONN_FLAGS(sk) } },
+                                           .fl4_dst = daddr,
+                                           .fl4_src = inet->inet_saddr,
+                                           .fl4_tos = RT_CONN_FLAGS(sk),
                                            .proto = sk->sk_protocol,
                                            .flags = inet_sk_flowi_flags(sk),
-                                           .uli_u = { .ports =
-                                                      { .sport = inet->inet_sport,
-                                                        .dport = inet->inet_dport } } };
+                                           .fl_ip_sport = inet->inet_sport,
+                                           .fl_ip_dport = inet->inet_dport };
 
                        /* If this fails, retransmit mechanism of transport layer will
                         * keep trying until route appears or the connection times
@@ -1404,14 +1402,11 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
 
        {
                struct flowi fl = { .oif = arg->bound_dev_if,
-                                   .nl_u = { .ip4_u =
-                                             { .daddr = daddr,
-                                               .saddr = rt->rt_spec_dst,
-                                               .tos = RT_TOS(ip_hdr(skb)->tos) } },
-                                   /* Not quite clean, but right. */
-                                   .uli_u = { .ports =
-                                              { .sport = tcp_hdr(skb)->dest,
-                                                .dport = tcp_hdr(skb)->source } },
+                                   .fl4_dst = daddr,
+                                   .fl4_src = rt->rt_spec_dst,
+                                   .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
+                                   .fl_ip_sport = tcp_hdr(skb)->dest,
+                                   .fl_ip_dport = tcp_hdr(skb)->source,
                                    .proto = sk->sk_protocol,
                                    .flags = ip_reply_arg_flowi_flags(arg) };
                security_skb_classify_flow(skb, &fl);
index cd300aaee78f542630f40ab9a34f9b10af8eec9e..e70ad581398e7f4424c9bf81dee6fcb1d6b4b62e 100644 (file)
@@ -463,13 +463,9 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
        {
                struct flowi fl = {
                        .oif = tunnel->parms.link,
-                       .nl_u = {
-                               .ip4_u = {
-                                       .daddr = dst,
-                                       .saddr = tiph->saddr,
-                                       .tos = RT_TOS(tos)
-                               }
-                       },
+                       .fl4_dst = dst,
+                       .fl4_src= tiph->saddr,
+                       .fl4_tos = RT_TOS(tos),
                        .proto = IPPROTO_IPIP
                };
 
@@ -589,13 +585,9 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
        if (iph->daddr) {
                struct flowi fl = {
                        .oif = tunnel->parms.link,
-                       .nl_u = {
-                               .ip4_u = {
-                                       .daddr = iph->daddr,
-                                       .saddr = iph->saddr,
-                                       .tos = RT_TOS(iph->tos)
-                               }
-                       },
+                       .fl4_dst = iph->daddr,
+                       .fl4_src = iph->saddr,
+                       .fl4_tos = RT_TOS(iph->tos),
                        .proto = IPPROTO_IPIP
                };
                struct rtable *rt;
index 86dd5691af46dfc4d87631127e9fa06b7329dc3c..3f3a9afd73e02f1d5a4f89eb85d1bf7ba47b4ddc 100644 (file)
@@ -1537,13 +1537,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
        if (vif->flags & VIFF_TUNNEL) {
                struct flowi fl = {
                        .oif = vif->link,
-                       .nl_u = {
-                               .ip4_u = {
-                                       .daddr = vif->remote,
-                                       .saddr = vif->local,
-                                       .tos = RT_TOS(iph->tos)
-                               }
-                       },
+                       .fl4_dst = vif->remote,
+                       .fl4_src = vif->local,
+                       .fl4_tos = RT_TOS(iph->tos),
                        .proto = IPPROTO_IPIP
                };
 
@@ -1553,12 +1549,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
        } else {
                struct flowi fl = {
                        .oif = vif->link,
-                       .nl_u = {
-                               .ip4_u = {
-                                       .daddr = iph->daddr,
-                                       .tos = RT_TOS(iph->tos)
-                               }
-                       },
+                       .fl4_dst = iph->daddr,
+                       .fl4_tos = RT_TOS(iph->tos),
                        .proto = IPPROTO_IPIP
                };
 
@@ -1654,7 +1646,7 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
        if (mrt->vif_table[vif].dev != skb->dev) {
                int true_vifi;
 
-               if (skb_rtable(skb)->fl.iif == 0) {
+               if (rt_is_output_route(skb_rtable(skb))) {
                        /* It is our own packet, looped back.
                         * Very complicated situation...
                         *
index d88a46c54fd1bbb62a6f9ed13570c518940666b5..994a1f29ebbcf062caffde6c42de446673e08de7 100644 (file)
@@ -31,10 +31,10 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
         * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
         */
        if (addr_type == RTN_LOCAL) {
-               fl.nl_u.ip4_u.daddr = iph->daddr;
+               fl.fl4_dst = iph->daddr;
                if (type == RTN_LOCAL)
-                       fl.nl_u.ip4_u.saddr = iph->saddr;
-               fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
+                       fl.fl4_src = iph->saddr;
+               fl.fl4_tos = RT_TOS(iph->tos);
                fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
                fl.mark = skb->mark;
                fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
@@ -47,7 +47,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
        } else {
                /* non-local src, find valid iif to satisfy
                 * rp-filter when calling ip_route_input. */
-               fl.nl_u.ip4_u.daddr = iph->saddr;
+               fl.fl4_dst = iph->saddr;
                if (ip_route_output_key(net, &rt, &fl) != 0)
                        return -1;
 
index 1f85ef289895a8c2a4a567089ad5bef7c1271dae..a3d5ab786e81bcb09a6e8b3a54cd64537218fd43 100644 (file)
@@ -549,10 +549,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        {
                struct flowi fl = { .oif = ipc.oif,
                                    .mark = sk->sk_mark,
-                                   .nl_u = { .ip4_u =
-                                             { .daddr = daddr,
-                                               .saddr = saddr,
-                                               .tos = tos } },
+                                   .fl4_dst = daddr,
+                                   .fl4_src = saddr,
+                                   .fl4_tos = tos,
                                    .proto = inet->hdrincl ? IPPROTO_RAW :
                                                             sk->sk_protocol,
                                  };
index 987bf9adb31833c19a0db04ce76060306d8e6994..ec2333fb637e4e80c9a0cb94356efdf96f740046 100644 (file)
@@ -140,13 +140,15 @@ static unsigned long expires_ljiffies;
 
 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
 static void             ipv4_dst_destroy(struct dst_entry *dst);
-static void             ipv4_dst_ifdown(struct dst_entry *dst,
-                                        struct net_device *dev, int how);
 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
 static void             ipv4_link_failure(struct sk_buff *skb);
 static void             ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
 static int rt_garbage_collect(struct dst_ops *ops);
 
+static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
+                           int how)
+{
+}
 
 static struct dst_ops ipv4_dst_ops = {
        .family =               AF_INET,
@@ -621,7 +623,7 @@ static inline int rt_fast_clean(struct rtable *rth)
        /* Kill broadcast/multicast entries very aggresively, if they
           collide in hash table with more useful entries */
        return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
-               rth->fl.iif && rth->dst.rt_next;
+               rt_is_input_route(rth) && rth->dst.rt_next;
 }
 
 static inline int rt_valuable(struct rtable *rth)
@@ -666,7 +668,7 @@ static inline u32 rt_score(struct rtable *rt)
        if (rt_valuable(rt))
                score |= (1<<31);
 
-       if (!rt->fl.iif ||
+       if (rt_is_output_route(rt) ||
            !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
                score |= (1<<30);
 
@@ -682,17 +684,17 @@ static inline bool rt_caching(const struct net *net)
 static inline bool compare_hash_inputs(const struct flowi *fl1,
                                        const struct flowi *fl2)
 {
-       return ((((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
-               ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
+       return ((((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
+               ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
                (fl1->iif ^ fl2->iif)) == 0);
 }
 
 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
 {
-       return (((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
-               ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
+       return (((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
+               ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
                (fl1->mark ^ fl2->mark) |
-               (*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) |
+               (*(u16 *)&fl1->fl4_tos ^ *(u16 *)&fl2->fl4_tos) |
                (fl1->oif ^ fl2->oif) |
                (fl1->iif ^ fl2->iif)) == 0;
 }
@@ -1124,7 +1126,7 @@ restart:
                 */
 
                rt->dst.flags |= DST_NOCACHE;
-               if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
+               if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
                        int err = arp_bind_neighbour(&rt->dst);
                        if (err) {
                                if (net_ratelimit())
@@ -1222,7 +1224,7 @@ restart:
        /* Try to bind route to arp only if it is output
           route or unicast forwarding path.
         */
-       if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
+       if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
                int err = arp_bind_neighbour(&rt->dst);
                if (err) {
                        spin_unlock_bh(rt_hash_lock_addr(hash));
@@ -1404,7 +1406,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                                if (rth->fl.fl4_dst != daddr ||
                                    rth->fl.fl4_src != skeys[i] ||
                                    rth->fl.oif != ikeys[k] ||
-                                   rth->fl.iif != 0 ||
+                                   rt_is_input_route(rth) ||
                                    rt_is_expired(rth) ||
                                    !net_eq(dev_net(rth->dst.dev), net)) {
                                        rthp = &rth->dst.rt_next;
@@ -1433,8 +1435,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                                rt->dst.child           = NULL;
                                if (rt->dst.dev)
                                        dev_hold(rt->dst.dev);
-                               if (rt->idev)
-                                       in_dev_hold(rt->idev);
                                rt->dst.obsolete        = -1;
                                rt->dst.lastuse = jiffies;
                                rt->dst.path            = &rt->dst;
@@ -1666,7 +1666,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
                                    rth->rt_dst != daddr ||
                                    rth->rt_src != iph->saddr ||
                                    rth->fl.oif != ikeys[k] ||
-                                   rth->fl.iif != 0 ||
+                                   rt_is_input_route(rth) ||
                                    dst_metric_locked(&rth->dst, RTAX_MTU) ||
                                    !net_eq(dev_net(rth->dst.dev), net) ||
                                    rt_is_expired(rth))
@@ -1728,33 +1728,13 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
 {
        struct rtable *rt = (struct rtable *) dst;
        struct inet_peer *peer = rt->peer;
-       struct in_device *idev = rt->idev;
 
        if (peer) {
                rt->peer = NULL;
                inet_putpeer(peer);
        }
-
-       if (idev) {
-               rt->idev = NULL;
-               in_dev_put(idev);
-       }
 }
 
-static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
-                           int how)
-{
-       struct rtable *rt = (struct rtable *) dst;
-       struct in_device *idev = rt->idev;
-       if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) {
-               struct in_device *loopback_idev =
-                       in_dev_get(dev_net(dev)->loopback_dev);
-               if (loopback_idev) {
-                       rt->idev = loopback_idev;
-                       in_dev_put(idev);
-               }
-       }
-}
 
 static void ipv4_link_failure(struct sk_buff *skb)
 {
@@ -1790,7 +1770,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
        __be32 src;
        struct fib_result res;
 
-       if (rt->fl.iif == 0)
+       if (rt_is_output_route(rt))
                src = rt->rt_src;
        else {
                rcu_read_lock();
@@ -1910,7 +1890,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        rth->fl.iif     = dev->ifindex;
        rth->dst.dev    = init_net.loopback_dev;
        dev_hold(rth->dst.dev);
-       rth->idev       = in_dev_get(rth->dst.dev);
        rth->fl.oif     = 0;
        rth->rt_gateway = daddr;
        rth->rt_spec_dst= spec_dst;
@@ -2050,7 +2029,6 @@ static int __mkroute_input(struct sk_buff *skb,
                rth->fl.iif     = in_dev->dev->ifindex;
        rth->dst.dev    = (out_dev)->dev;
        dev_hold(rth->dst.dev);
-       rth->idev       = in_dev_get(rth->dst.dev);
        rth->fl.oif     = 0;
        rth->rt_spec_dst= spec_dst;
 
@@ -2111,12 +2089,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 {
        struct fib_result res;
        struct in_device *in_dev = __in_dev_get_rcu(dev);
-       struct flowi fl = { .nl_u = { .ip4_u =
-                                     { .daddr = daddr,
-                                       .saddr = saddr,
-                                       .tos = tos,
-                                       .scope = RT_SCOPE_UNIVERSE,
-                                     } },
+       struct flowi fl = { .fl4_dst    = daddr,
+                           .fl4_src    = saddr,
+                           .fl4_tos    = tos,
+                           .fl4_scope  = RT_SCOPE_UNIVERSE,
                            .mark = skb->mark,
                            .iif = dev->ifindex };
        unsigned        flags = 0;
@@ -2231,7 +2207,6 @@ local_input:
        rth->fl.iif     = dev->ifindex;
        rth->dst.dev    = net->loopback_dev;
        dev_hold(rth->dst.dev);
-       rth->idev       = in_dev_get(rth->dst.dev);
        rth->rt_gateway = daddr;
        rth->rt_spec_dst= spec_dst;
        rth->dst.input= ip_local_deliver;
@@ -2417,9 +2392,6 @@ static int __mkroute_output(struct rtable **result,
        if (!rth)
                return -ENOBUFS;
 
-       in_dev_hold(in_dev);
-       rth->idev = in_dev;
-
        atomic_set(&rth->dst.__refcnt, 1);
        rth->dst.flags= DST_HOST;
        if (IN_DEV_CONF_GET(in_dev, NOXFRM))
@@ -2506,14 +2478,11 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
                                const struct flowi *oldflp)
 {
        u32 tos = RT_FL_TOS(oldflp);
-       struct flowi fl = { .nl_u = { .ip4_u =
-                                     { .daddr = oldflp->fl4_dst,
-                                       .saddr = oldflp->fl4_src,
-                                       .tos = tos & IPTOS_RT_MASK,
-                                       .scope = ((tos & RTO_ONLINK) ?
-                                                 RT_SCOPE_LINK :
-                                                 RT_SCOPE_UNIVERSE),
-                                     } },
+       struct flowi fl = { .fl4_dst = oldflp->fl4_dst,
+                           .fl4_src = oldflp->fl4_src,
+                           .fl4_tos = tos & IPTOS_RT_MASK,
+                           .fl4_scope = ((tos & RTO_ONLINK) ?
+                                         RT_SCOPE_LINK : RT_SCOPE_UNIVERSE),
                            .mark = oldflp->mark,
                            .iif = net->loopback_dev->ifindex,
                            .oif = oldflp->oif };
@@ -2695,7 +2664,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
                rth = rcu_dereference_bh(rth->dst.rt_next)) {
                if (rth->fl.fl4_dst == flp->fl4_dst &&
                    rth->fl.fl4_src == flp->fl4_src &&
-                   rth->fl.iif == 0 &&
+                   rt_is_output_route(rth) &&
                    rth->fl.oif == flp->oif &&
                    rth->fl.mark == flp->mark &&
                    !((rth->fl.fl4_tos ^ flp->fl4_tos) &
@@ -2759,9 +2728,6 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
 
                rt->fl = ort->fl;
 
-               rt->idev = ort->idev;
-               if (rt->idev)
-                       in_dev_hold(rt->idev);
                rt->rt_genid = rt_genid(net);
                rt->rt_flags = ort->rt_flags;
                rt->rt_type = ort->rt_type;
@@ -2853,7 +2819,7 @@ static int rt_fill_info(struct net *net,
        if (rt->dst.tclassid)
                NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
 #endif
-       if (rt->fl.iif)
+       if (rt_is_input_route(rt))
                NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
        else if (rt->rt_src != rt->fl.fl4_src)
                NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
@@ -2878,7 +2844,7 @@ static int rt_fill_info(struct net *net,
                }
        }
 
-       if (rt->fl.iif) {
+       if (rt_is_input_route(rt)) {
 #ifdef CONFIG_IP_MROUTE
                __be32 dst = rt->rt_dst;
 
@@ -2973,13 +2939,9 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
                        err = -rt->dst.error;
        } else {
                struct flowi fl = {
-                       .nl_u = {
-                               .ip4_u = {
-                                       .daddr = dst,
-                                       .saddr = src,
-                                       .tos = rtm->rtm_tos,
-                               },
-                       },
+                       .fl4_dst = dst,
+                       .fl4_src = src,
+                       .fl4_tos = rtm->rtm_tos,
                        .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
                        .mark = mark,
                };
index 650cace2180d3598d2c4c10bf16c300506f3f833..47519205a014a773e2df5b21f6fdf27c43fe6cb1 100644 (file)
@@ -346,17 +346,14 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
         */
        {
                struct flowi fl = { .mark = sk->sk_mark,
-                                   .nl_u = { .ip4_u =
-                                             { .daddr = ((opt && opt->srr) ?
-                                                         opt->faddr :
-                                                         ireq->rmt_addr),
-                                               .saddr = ireq->loc_addr,
-                                               .tos = RT_CONN_FLAGS(sk) } },
+                                   .fl4_dst = ((opt && opt->srr) ?
+                                               opt->faddr : ireq->rmt_addr),
+                                   .fl4_src = ireq->loc_addr,
+                                   .fl4_tos = RT_CONN_FLAGS(sk),
                                    .proto = IPPROTO_TCP,
                                    .flags = inet_sk_flowi_flags(sk),
-                                   .uli_u = { .ports =
-                                              { .sport = th->dest,
-                                                .dport = th->source } } };
+                                   .fl_ip_sport = th->dest,
+                                   .fl_ip_dport = th->source };
                security_req_classify_flow(req, &fl);
                if (ip_route_output_key(sock_net(sk), &rt, &fl)) {
                        reqsk_free(req);
index 0814199694854e534eb4ff12671e02313dced8e9..2bb46d55f40cf0e680b420f6d80d7d745b6e1634 100644 (file)
@@ -1193,7 +1193,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
        struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
 
        WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
-            KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
+            "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
             tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
 #endif
 
@@ -1477,10 +1477,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                         * shouldn't happen.
                         */
                        if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
-                            KERN_INFO "recvmsg bug: copied %X "
-                                      "seq %X rcvnxt %X fl %X\n", *seq,
-                                      TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
-                                      flags))
+                                "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
+                                *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
+                                flags))
                                break;
 
                        offset = *seq - TCP_SKB_CB(skb)->seq;
@@ -1490,10 +1489,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                                goto found_ok_skb;
                        if (tcp_hdr(skb)->fin)
                                goto found_fin_ok;
-                       WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
-                                       "copied %X seq %X rcvnxt %X fl %X\n",
-                                       *seq, TCP_SKB_CB(skb)->seq,
-                                       tp->rcv_nxt, flags);
+                       WARN(!(flags & MSG_PEEK),
+                            "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
+                            *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
                }
 
                /* Well, if we have backlog, try to process it now yet. */
index 05b1ecf367632763cbdb1f3bfe0e74c9c4d20c0c..bb8f547fc7d2268662db8c8a12c276005b5ac113 100644 (file)
@@ -2592,6 +2592,7 @@ int tcp_connect(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *buff;
+       int err;
 
        tcp_connect_init(sk);
 
@@ -2614,7 +2615,9 @@ int tcp_connect(struct sock *sk)
        sk->sk_wmem_queued += buff->truesize;
        sk_mem_charge(sk, buff->truesize);
        tp->packets_out += tcp_skb_pcount(buff);
-       tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
+       err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
+       if (err == -ECONNREFUSED)
+               return err;
 
        /* We change tp->snd_nxt after the tcp_transmit_skb() call
         * in order to make this packet get counted in tcpOutSegs.
index 6211e211417396f9c17dedf805998db731b4a27c..85ee7eb7e38e58bb5088cd31090af665512fe361 100644 (file)
@@ -154,7 +154,7 @@ static int tcpprobe_sprint(char *tbuf, int n)
        struct timespec tv
                = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
 
-       return snprintf(tbuf, n,
+       return scnprintf(tbuf, n,
                        "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n",
                        (unsigned long) tv.tv_sec,
                        (unsigned long) tv.tv_nsec,
@@ -174,7 +174,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
                return -EINVAL;
 
        while (cnt < len) {
-               char tbuf[128];
+               char tbuf[164];
                int width;
 
                /* Wait for data in buffer */
index 5e0a3a582a59a05466468b244371dc5675bedc67..b37181da487cc22164f8b9f6fe0f160ee742a36f 100644 (file)
@@ -430,7 +430,7 @@ begin:
 
        if (result) {
 exact_match:
-               if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+               if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
                        result = NULL;
                else if (unlikely(compute_score2(result, net, saddr, sport,
                                  daddr, hnum, dif) < badness)) {
@@ -500,7 +500,7 @@ begin:
                goto begin;
 
        if (result) {
-               if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+               if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
                        result = NULL;
                else if (unlikely(compute_score(result, net, saddr, hnum, sport,
                                  daddr, dport, dif) < badness)) {
@@ -890,15 +890,13 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (rt == NULL) {
                struct flowi fl = { .oif = ipc.oif,
                                    .mark = sk->sk_mark,
-                                   .nl_u = { .ip4_u =
-                                             { .daddr = faddr,
-                                               .saddr = saddr,
-                                               .tos = tos } },
+                                   .fl4_dst = faddr,
+                                   .fl4_src = saddr,
+                                   .fl4_tos = tos,
                                    .proto = sk->sk_protocol,
                                    .flags = inet_sk_flowi_flags(sk),
-                                   .uli_u = { .ports =
-                                              { .sport = inet->inet_sport,
-                                                .dport = dport } } };
+                                   .fl_ip_sport = inet->inet_sport,
+                                   .fl_ip_dport = dport };
                struct net *net = sock_net(sk);
 
                security_sk_classify_flow(sk, &fl);
index 4464f3bff6a7a7d902b72806bb9d5e7c1752bf96..b057d40addec3ef2ffab44413421e502894fe60e 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/inetdevice.h>
+#include <linux/if_tunnel.h>
 #include <net/dst.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
@@ -22,12 +23,8 @@ static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
                                          xfrm_address_t *daddr)
 {
        struct flowi fl = {
-               .nl_u = {
-                       .ip4_u = {
-                               .tos = tos,
-                               .daddr = daddr->a4,
-                       },
-               },
+               .fl4_dst = daddr->a4,
+               .fl4_tos = tos,
        };
        struct dst_entry *dst;
        struct rtable *rt;
@@ -80,10 +77,6 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
        xdst->u.dst.dev = dev;
        dev_hold(dev);
 
-       xdst->u.rt.idev = in_dev_get(dev);
-       if (!xdst->u.rt.idev)
-               return -ENODEV;
-
        xdst->u.rt.peer = rt->peer;
        if (rt->peer)
                atomic_inc(&rt->peer->refcnt);
@@ -158,6 +151,20 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
                                fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
                        }
                        break;
+
+               case IPPROTO_GRE:
+                       if (pskb_may_pull(skb, xprth + 12 - skb->data)) {
+                               __be16 *greflags = (__be16 *)xprth;
+                               __be32 *gre_hdr = (__be32 *)xprth;
+
+                               if (greflags[0] & GRE_KEY) {
+                                       if (greflags[0] & GRE_CSUM)
+                                               gre_hdr++;
+                                       fl->fl_gre_key = gre_hdr[1];
+                               }
+                       }
+                       break;
+
                default:
                        fl->fl_ipsec_spi = 0;
                        break;
@@ -189,8 +196,6 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
 {
        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
 
-       if (likely(xdst->u.rt.idev))
-               in_dev_put(xdst->u.rt.idev);
        if (likely(xdst->u.rt.peer))
                inet_putpeer(xdst->u.rt.peer);
        xfrm_dst_destroy(xdst);
@@ -199,27 +204,9 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
 static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
                             int unregister)
 {
-       struct xfrm_dst *xdst;
-
        if (!unregister)
                return;
 
-       xdst = (struct xfrm_dst *)dst;
-       if (xdst->u.rt.idev->dev == dev) {
-               struct in_device *loopback_idev =
-                       in_dev_get(dev_net(dev)->loopback_dev);
-               BUG_ON(!loopback_idev);
-
-               do {
-                       in_dev_put(xdst->u.rt.idev);
-                       xdst->u.rt.idev = loopback_idev;
-                       in_dev_hold(loopback_idev);
-                       xdst = (struct xfrm_dst *)xdst->u.dst.child;
-               } while (xdst->u.dst.xfrm);
-
-               __in_dev_put(loopback_idev);
-       }
-
        xfrm_dst_ifdown(dst, dev);
 }
 
index 2fc35b32df9eacb3c1dcebf153700b5056bc9689..4cf760598c2aa674bb9589b6d6d99c11d1c459fb 100644 (file)
@@ -2758,13 +2758,13 @@ static int addrconf_ifdown(struct net_device *dev, int how)
                        ifa->state = INET6_IFADDR_STATE_DEAD;
                        spin_unlock_bh(&ifa->state_lock);
 
-                       if (state == INET6_IFADDR_STATE_DEAD) {
-                               in6_ifa_put(ifa);
-                       } else {
+                       if (state != INET6_IFADDR_STATE_DEAD) {
                                __ipv6_ifa_notify(RTM_DELADDR, ifa);
                                atomic_notifier_call_chain(&inet6addr_chain,
                                                           NETDEV_DOWN, ifa);
                        }
+
+                       in6_ifa_put(ifa);
                        write_lock_bh(&idev->lock);
                }
        }
@@ -3836,6 +3836,15 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
 }
 
+static inline size_t inet6_ifla6_size(void)
+{
+       return nla_total_size(4) /* IFLA_INET6_FLAGS */
+            + nla_total_size(sizeof(struct ifla_cacheinfo))
+            + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
+            + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
+            + nla_total_size(ICMP6_MIB_MAX * 8); /* IFLA_INET6_ICMP6STATS */
+}
+
 static inline size_t inet6_if_nlmsg_size(void)
 {
        return NLMSG_ALIGN(sizeof(struct ifinfomsg))
@@ -3843,13 +3852,7 @@ static inline size_t inet6_if_nlmsg_size(void)
               + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
               + nla_total_size(4) /* IFLA_MTU */
               + nla_total_size(4) /* IFLA_LINK */
-              + nla_total_size( /* IFLA_PROTINFO */
-                       nla_total_size(4) /* IFLA_INET6_FLAGS */
-                       + nla_total_size(sizeof(struct ifla_cacheinfo))
-                       + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
-                       + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
-                       + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
-                );
+              + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
 }
 
 static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
@@ -3896,15 +3899,75 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
        }
 }
 
+static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
+{
+       struct nlattr *nla;
+       struct ifla_cacheinfo ci;
+
+       NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags);
+
+       ci.max_reasm_len = IPV6_MAXPLEN;
+       ci.tstamp = cstamp_delta(idev->tstamp);
+       ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
+       ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
+       NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
+
+       nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
+       if (nla == NULL)
+               goto nla_put_failure;
+       ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
+
+       /* XXX - MC not implemented */
+
+       nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
+       if (nla == NULL)
+               goto nla_put_failure;
+       snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
+
+       nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
+       if (nla == NULL)
+               goto nla_put_failure;
+       snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static size_t inet6_get_link_af_size(const struct net_device *dev)
+{
+       if (!__in6_dev_get(dev))
+               return 0;
+
+       return inet6_ifla6_size();
+}
+
+static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
+{
+       struct inet6_dev *idev = __in6_dev_get(dev);
+
+       if (!idev)
+               return -ENODATA;
+
+       if (inet6_fill_ifla6_attrs(skb, idev) < 0)
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+static int inet6_parse_link_af(struct net_device *dev, const struct nlattr *nla)
+{
+       return -EOPNOTSUPP;
+}
+
 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
                             u32 pid, u32 seq, int event, unsigned int flags)
 {
        struct net_device *dev = idev->dev;
-       struct nlattr *nla;
        struct ifinfomsg *hdr;
        struct nlmsghdr *nlh;
        void *protoinfo;
-       struct ifla_cacheinfo ci;
 
        nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
        if (nlh == NULL)
@@ -3931,30 +3994,8 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
        if (protoinfo == NULL)
                goto nla_put_failure;
 
-       NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags);
-
-       ci.max_reasm_len = IPV6_MAXPLEN;
-       ci.tstamp = cstamp_delta(idev->tstamp);
-       ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
-       ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
-       NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
-
-       nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
-       if (nla == NULL)
-               goto nla_put_failure;
-       ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
-
-       /* XXX - MC not implemented */
-
-       nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
-       if (nla == NULL)
-               goto nla_put_failure;
-       snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
-
-       nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
-       if (nla == NULL)
+       if (inet6_fill_ifla6_attrs(skb, idev) < 0)
                goto nla_put_failure;
-       snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
 
        nla_nest_end(skb, protoinfo);
        return nlmsg_end(skb, nlh);
@@ -4625,6 +4666,13 @@ int unregister_inet6addr_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL(unregister_inet6addr_notifier);
 
+static struct rtnl_af_ops inet6_ops = {
+       .family           = AF_INET6,
+       .fill_link_af     = inet6_fill_link_af,
+       .get_link_af_size = inet6_get_link_af_size,
+       .parse_link_af    = inet6_parse_link_af,
+};
+
 /*
  *     Init / cleanup code
  */
@@ -4676,6 +4724,10 @@ int __init addrconf_init(void)
 
        addrconf_verify(0);
 
+       err = rtnl_af_register(&inet6_ops);
+       if (err < 0)
+               goto errout_af;
+
        err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo);
        if (err < 0)
                goto errout;
@@ -4691,6 +4743,8 @@ int __init addrconf_init(void)
 
        return 0;
 errout:
+       rtnl_af_unregister(&inet6_ops);
+errout_af:
        unregister_netdevice_notifier(&ipv6_dev_notf);
 errlo:
        unregister_pernet_subsys(&addrconf_ops);
@@ -4711,6 +4765,8 @@ void addrconf_cleanup(void)
 
        rtnl_lock();
 
+       __rtnl_af_unregister(&inet6_ops);
+
        /* clean dev list */
        for_each_netdev(&init_net, dev) {
                if (__in6_dev_get(dev) == NULL)
index 6f32ffce7022c198e4c78c9126c3df90026697c2..9fab274019c0656dd4f01365ac14e46e0f1f764d 100644 (file)
@@ -1843,9 +1843,7 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
 
        fl = (struct flowi) {
                .oif = vif->link,
-               .nl_u = { .ip6_u =
-                               { .daddr = ipv6h->daddr, }
-               }
+               .fl6_dst = ipv6h->daddr,
        };
 
        dst = ip6_route_output(net, NULL, &fl);
index d1444b95ad7e527c8c0f3e00b906567d5043ba21..9c5074528a710811f90b4d082d5ff3c758a300f0 100644 (file)
@@ -257,7 +257,7 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
                return NULL;
        idev = __in6_dev_get(dev);
        if (!idev)
-               return NULL;;
+               return NULL;
        read_lock_bh(&idev->lock);
        if (idev->dead) {
                read_unlock_bh(&idev->lock);
index 7155b2451d7cf297ab2b87d244e217ccf82f4071..35915e8617f08ccf274855378bce1d4705aea6b0 100644 (file)
@@ -18,10 +18,8 @@ int ip6_route_me_harder(struct sk_buff *skb)
        struct flowi fl = {
                .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
                .mark = skb->mark,
-               .nl_u =
-               { .ip6_u =
-                 { .daddr = iph->daddr,
-                   .saddr = iph->saddr, } },
+               .fl6_dst = iph->daddr,
+               .fl6_src = iph->saddr,
        };
 
        dst = ip6_route_output(net, skb->sk, &fl);
index 96455ffb76fb8b92aa90c3a711aa6635d45b91fa..c346ccf66ae108cf5d36ef39f7f25c922ebc3388 100644 (file)
@@ -558,11 +558,7 @@ struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
 {
        struct flowi fl = {
                .oif = oif,
-               .nl_u = {
-                       .ip6_u = {
-                               .daddr = *daddr,
-                       },
-               },
+               .fl6_dst = *daddr,
        };
        struct dst_entry *dst;
        int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
@@ -778,13 +774,9 @@ void ip6_route_input(struct sk_buff *skb)
        int flags = RT6_LOOKUP_F_HAS_SADDR;
        struct flowi fl = {
                .iif = skb->dev->ifindex,
-               .nl_u = {
-                       .ip6_u = {
-                               .daddr = iph->daddr,
-                               .saddr = iph->saddr,
-                               .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
-                       },
-               },
+               .fl6_dst = iph->daddr,
+               .fl6_src = iph->saddr,
+               .fl6_flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
                .mark = skb->mark,
                .proto = iph->nexthdr,
        };
@@ -1463,12 +1455,8 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
        struct ip6rd_flowi rdfl = {
                .fl = {
                        .oif = dev->ifindex,
-                       .nl_u = {
-                               .ip6_u = {
-                                       .daddr = *dest,
-                                       .saddr = *src,
-                               },
-                       },
+                       .fl6_dst = *dest,
+                       .fl6_src = *src,
                },
        };
 
index d6bfaec3bbbf1a91701fd31616757af1a565bb41..6e48a80d0f25a234551226b3b62719822fe74e84 100644 (file)
@@ -730,10 +730,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
        }
 
        {
-               struct flowi fl = { .nl_u = { .ip4_u =
-                                             { .daddr = dst,
-                                               .saddr = tiph->saddr,
-                                               .tos = RT_TOS(tos) } },
+               struct flowi fl = { .fl4_dst = dst,
+                                   .fl4_src = tiph->saddr,
+                                   .fl4_tos = RT_TOS(tos),
                                    .oif = tunnel->parms.link,
                                    .proto = IPPROTO_IPV6 };
                if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
@@ -855,10 +854,9 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
        iph = &tunnel->parms.iph;
 
        if (iph->daddr) {
-               struct flowi fl = { .nl_u = { .ip4_u =
-                                             { .daddr = iph->daddr,
-                                               .saddr = iph->saddr,
-                                               .tos = RT_TOS(iph->tos) } },
+               struct flowi fl = { .fl4_dst = iph->daddr,
+                                   .fl4_src = iph->saddr,
+                                   .fl4_tos = RT_TOS(iph->tos),
                                    .oif = tunnel->parms.link,
                                    .proto = IPPROTO_IPV6 };
                struct rtable *rt;
index 91def93bec85060e7571218c20439cabd4ec824f..b541a4e009fba179d939cfe71b4d083168831728 100644 (file)
@@ -227,7 +227,7 @@ begin:
 
        if (result) {
 exact_match:
-               if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+               if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
                        result = NULL;
                else if (unlikely(compute_score2(result, net, saddr, sport,
                                  daddr, hnum, dif) < badness)) {
@@ -294,7 +294,7 @@ begin:
                goto begin;
 
        if (result) {
-               if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+               if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
                        result = NULL;
                else if (unlikely(compute_score(result, net, hnum, saddr, sport,
                                        daddr, dport, dif) < badness)) {
index 0bf6a59545ab9439f3ab7126539cd9aef419b752..04635e88e8ed3ef25b0bdac9170506ee390c93b6 100644 (file)
@@ -476,15 +476,13 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
 
                {
                        struct flowi fl = { .oif = sk->sk_bound_dev_if,
-                                           .nl_u = { .ip4_u = {
-                                                       .daddr = daddr,
-                                                       .saddr = inet->inet_saddr,
-                                                       .tos = RT_CONN_FLAGS(sk) } },
+                                           .fl4_dst = daddr,
+                                           .fl4_src = inet->inet_saddr,
+                                           .fl4_tos = RT_CONN_FLAGS(sk),
                                            .proto = sk->sk_protocol,
                                            .flags = inet_sk_flowi_flags(sk),
-                                           .uli_u = { .ports = {
-                                                        .sport = inet->inet_sport,
-                                                        .dport = inet->inet_dport } } };
+                                           .fl_ip_sport = inet->inet_sport,
+                                           .fl_ip_dport = inet->inet_dport };
 
                        /* If this fails, retransmit mechanism of transport layer will
                         * keep trying until route appears or the connection times
index d2b03e0851ef6502f6eeb3064ea93db8e31e7977..4bd6ef0be38083aae25162a69250164b0afdab68 100644 (file)
@@ -147,6 +147,5 @@ struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[])
 
 void ieee80211_aes_key_free(struct crypto_cipher *tfm)
 {
-       if (tfm)
-               crypto_free_cipher(tfm);
+       crypto_free_cipher(tfm);
 }
index b4d66cca76d6d719324cee76d2706881df69e4a0..d502b2684a664e317fcba3d629d463fef562b9aa 100644 (file)
@@ -128,6 +128,5 @@ struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[])
 
 void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm)
 {
-       if (tfm)
-               crypto_free_cipher(tfm);
+       crypto_free_cipher(tfm);
 }
index 18260aa99c56f40c509616276029cd7d55f4753d..1f02e599a3185c06de0b214711eece19ff82c136 100644 (file)
@@ -21,16 +21,30 @@ int mac80211_open_file_generic(struct inode *inode, struct file *file)
        return 0;
 }
 
-#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...)             \
+#define DEBUGFS_FORMAT_BUFFER_SIZE 100
+
+int mac80211_format_buffer(char __user *userbuf, size_t count,
+                                 loff_t *ppos, char *fmt, ...)
+{
+       va_list args;
+       char buf[DEBUGFS_FORMAT_BUFFER_SIZE];
+       int res;
+
+       va_start(args, fmt);
+       res = vscnprintf(buf, sizeof(buf), fmt, args);
+       va_end(args);
+
+       return simple_read_from_buffer(userbuf, count, ppos, buf, res);
+}
+
+#define DEBUGFS_READONLY_FILE(name, fmt, value...)                     \
 static ssize_t name## _read(struct file *file, char __user *userbuf,   \
                            size_t count, loff_t *ppos)                 \
 {                                                                      \
        struct ieee80211_local *local = file->private_data;             \
-       char buf[buflen];                                               \
-       int res;                                                        \
                                                                        \
-       res = scnprintf(buf, buflen, fmt "\n", ##value);                \
-       return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
+       return mac80211_format_buffer(userbuf, count, ppos,             \
+                                     fmt "\n", ##value);               \
 }                                                                      \
                                                                        \
 static const struct file_operations name## _ops = {                    \
@@ -46,13 +60,13 @@ static const struct file_operations name## _ops = {                 \
        debugfs_create_file(#name, mode, phyd, local, &name## _ops);
 
 
-DEBUGFS_READONLY_FILE(frequency, 20, "%d",
+DEBUGFS_READONLY_FILE(frequency, "%d",
                      local->hw.conf.channel->center_freq);
-DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d",
+DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
                      local->total_ps_buffered);
-DEBUGFS_READONLY_FILE(wep_iv, 20, "%#08x",
+DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
                      local->wep_iv & 0xffffff);
-DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s",
+DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
        local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
 
 static ssize_t tsf_read(struct file *file, char __user *user_buf,
@@ -60,13 +74,11 @@ static ssize_t tsf_read(struct file *file, char __user *user_buf,
 {
        struct ieee80211_local *local = file->private_data;
        u64 tsf;
-       char buf[100];
 
        tsf = drv_get_tsf(local);
 
-       snprintf(buf, sizeof(buf), "0x%016llx\n", (unsigned long long) tsf);
-
-       return simple_read_from_buffer(user_buf, count, ppos, buf, 19);
+       return mac80211_format_buffer(user_buf, count, ppos, "0x%016llx\n",
+                                     (unsigned long long) tsf);
 }
 
 static ssize_t tsf_write(struct file *file,
@@ -131,12 +143,9 @@ static ssize_t noack_read(struct file *file, char __user *user_buf,
                          size_t count, loff_t *ppos)
 {
        struct ieee80211_local *local = file->private_data;
-       int res;
-       char buf[10];
 
-       res = scnprintf(buf, sizeof(buf), "%d\n", local->wifi_wme_noack_test);
-
-       return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+       return mac80211_format_buffer(user_buf, count, ppos, "%d\n",
+                                     local->wifi_wme_noack_test);
 }
 
 static ssize_t noack_write(struct file *file,
@@ -168,12 +177,8 @@ static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
                                 size_t count, loff_t *ppos)
 {
        struct ieee80211_local *local = file->private_data;
-       int res;
-       char buf[10];
-
-       res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_queues);
-
-       return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+       return mac80211_format_buffer(user_buf, count, ppos, "0x%x\n",
+                                     local->uapsd_queues);
 }
 
 static ssize_t uapsd_queues_write(struct file *file,
@@ -215,12 +220,9 @@ static ssize_t uapsd_max_sp_len_read(struct file *file, char __user *user_buf,
                                     size_t count, loff_t *ppos)
 {
        struct ieee80211_local *local = file->private_data;
-       int res;
-       char buf[10];
 
-       res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_max_sp_len);
-
-       return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+       return mac80211_format_buffer(user_buf, count, ppos, "0x%x\n",
+                                     local->uapsd_max_sp_len);
 }
 
 static ssize_t uapsd_max_sp_len_write(struct file *file,
index 09cc9be347964a7d03ef4d4487b88b331f2df0a9..7c87529630f55eabd2ba45721d73f138dad7401a 100644 (file)
@@ -4,6 +4,8 @@
 #ifdef CONFIG_MAC80211_DEBUGFS
 extern void debugfs_hw_add(struct ieee80211_local *local);
 extern int mac80211_open_file_generic(struct inode *inode, struct file *file);
+extern int mac80211_format_buffer(char __user *userbuf, size_t count,
+                                 loff_t *ppos, char *fmt, ...);
 #else
 static inline void debugfs_hw_add(struct ieee80211_local *local)
 {
index 1243d1db5c59c842d843a265a1b7fe5131df36e1..5822a6ce76714cc85cd653f860508ceff29786c0 100644 (file)
 #include "debugfs.h"
 #include "debugfs_key.h"
 
-#define KEY_READ(name, prop, buflen, format_string)                    \
+#define KEY_READ(name, prop, format_string)                            \
 static ssize_t key_##name##_read(struct file *file,                    \
                                 char __user *userbuf,                  \
                                 size_t count, loff_t *ppos)            \
 {                                                                      \
-       char buf[buflen];                                               \
        struct ieee80211_key *key = file->private_data;                 \
-       int res = scnprintf(buf, buflen, format_string, key->prop);     \
-       return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
+       return mac80211_format_buffer(userbuf, count, ppos,             \
+                                     format_string, key->prop);        \
 }
-#define KEY_READ_D(name) KEY_READ(name, name, 20, "%d\n")
-#define KEY_READ_X(name) KEY_READ(name, name, 20, "0x%x\n")
+#define KEY_READ_D(name) KEY_READ(name, name, "%d\n")
+#define KEY_READ_X(name) KEY_READ(name, name, "0x%x\n")
 
 #define KEY_OPS(name)                                                  \
 static const struct file_operations key_ ##name## _ops = {             \
@@ -39,9 +38,9 @@ static const struct file_operations key_ ##name## _ops = {            \
                 KEY_READ_##format(name)                                \
                 KEY_OPS(name)
 
-#define KEY_CONF_READ(name, buflen, format_string)                     \
-       KEY_READ(conf_##name, conf.name, buflen, format_string)
-#define KEY_CONF_READ_D(name) KEY_CONF_READ(name, 20, "%d\n")
+#define KEY_CONF_READ(name, format_string)                             \
+       KEY_READ(conf_##name, conf.name, format_string)
+#define KEY_CONF_READ_D(name) KEY_CONF_READ(name, "%d\n")
 
 #define KEY_CONF_OPS(name)                                             \
 static const struct file_operations key_ ##name## _ops = {             \
@@ -59,7 +58,7 @@ KEY_CONF_FILE(keyidx, D);
 KEY_CONF_FILE(hw_key_idx, D);
 KEY_FILE(flags, X);
 KEY_FILE(tx_rx_count, D);
-KEY_READ(ifindex, sdata->name, IFNAMSIZ + 2, "%s\n");
+KEY_READ(ifindex, sdata->name, "%s\n");
 KEY_OPS(ifindex);
 
 static ssize_t key_algorithm_read(struct file *file,
index 4601fea1784dd7404b2d91c2635b211453e9e0c6..f0fce37f4069b06c7e92db28cf4e2e1dfee4c54d 100644 (file)
 
 /* sta attributtes */
 
-#define STA_READ(name, buflen, field, format_string)                   \
+#define STA_READ(name, field, format_string)                           \
 static ssize_t sta_ ##name## _read(struct file *file,                  \
                                   char __user *userbuf,                \
                                   size_t count, loff_t *ppos)          \
 {                                                                      \
-       int res;                                                        \
        struct sta_info *sta = file->private_data;                      \
-       char buf[buflen];                                               \
-       res = scnprintf(buf, buflen, format_string, sta->field);        \
-       return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
+       return mac80211_format_buffer(userbuf, count, ppos,             \
+                                     format_string, sta->field);       \
 }
-#define STA_READ_D(name, field) STA_READ(name, 20, field, "%d\n")
-#define STA_READ_U(name, field) STA_READ(name, 20, field, "%u\n")
-#define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n")
+#define STA_READ_D(name, field) STA_READ(name, field, "%d\n")
+#define STA_READ_U(name, field) STA_READ(name, field, "%u\n")
+#define STA_READ_S(name, field) STA_READ(name, field, "%s\n")
 
 #define STA_OPS(name)                                                  \
 static const struct file_operations sta_ ##name## _ops = {             \
@@ -79,22 +77,18 @@ static ssize_t sta_num_ps_buf_frames_read(struct file *file,
                                          char __user *userbuf,
                                          size_t count, loff_t *ppos)
 {
-       char buf[20];
        struct sta_info *sta = file->private_data;
-       int res = scnprintf(buf, sizeof(buf), "%u\n",
-                           skb_queue_len(&sta->ps_tx_buf));
-       return simple_read_from_buffer(userbuf, count, ppos, buf, res);
+       return mac80211_format_buffer(userbuf, count, ppos, "%u\n",
+                                     skb_queue_len(&sta->ps_tx_buf));
 }
 STA_OPS(num_ps_buf_frames);
 
 static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf,
                                    size_t count, loff_t *ppos)
 {
-       char buf[20];
        struct sta_info *sta = file->private_data;
-       int res = scnprintf(buf, sizeof(buf), "%d\n",
-                           jiffies_to_msecs(jiffies - sta->last_rx));
-       return simple_read_from_buffer(userbuf, count, ppos, buf, res);
+       return mac80211_format_buffer(userbuf, count, ppos, "%d\n",
+                                     jiffies_to_msecs(jiffies - sta->last_rx));
 }
 STA_OPS(inactive_ms);
 
index 2a18d6602d4ade8bc385fecc4bca919811875ba1..2d6f0259e0c694e5b18a76401519f447f1885e78 100644 (file)
@@ -407,8 +407,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
        mi->ampdu_len += info->status.ampdu_len;
 
        if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
-               mi->sample_wait = 4 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
-               mi->sample_tries = 3;
+               mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
+               mi->sample_tries = 2;
                mi->sample_count--;
        }
 
@@ -506,7 +506,9 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
        if (!mr->retry_updated)
                minstrel_calc_retransmit(mp, mi, index);
 
-       if (mr->probability < MINSTREL_FRAC(20, 100))
+       if (sample)
+               rate->count = 1;
+       else if (mr->probability < MINSTREL_FRAC(20, 100))
                rate->count = 2;
        else if (rtscts)
                rate->count = mr->retry_count_rtscts;
@@ -562,7 +564,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
         */
        if (minstrel_get_duration(sample_idx) >
            minstrel_get_duration(mi->max_tp_rate)) {
-               if (mr->sample_skipped < 10)
+               if (mr->sample_skipped < 20)
                        goto next;
 
                if (mi->sample_slow++ > 2)
@@ -586,6 +588,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
        struct minstrel_ht_sta *mi = &msp->ht;
        struct minstrel_priv *mp = priv;
        int sample_idx;
+       bool sample = false;
 
        if (rate_control_send_low(sta, priv_sta, txrc))
                return;
@@ -596,10 +599,11 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
        info->flags |= mi->tx_flags;
        sample_idx = minstrel_get_sample_rate(mp, mi);
        if (sample_idx >= 0) {
+               sample = true;
                minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
                        txrc, true, false);
                minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
-                       txrc, false, true);
+                       txrc, false, false);
                info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
        } else {
                minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
@@ -607,7 +611,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
                        txrc, false, true);
        }
-       minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, txrc, false, true);
+       minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, txrc, false, !sample);
 
        ar[3].count = 0;
        ar[3].idx = -1;
index 85dabb86be6f45dda9b5686466ce487c2b848a5c..32fcbe290c047802942b5d9838489a2193b622b2 100644 (file)
@@ -173,9 +173,11 @@ next_hook:
                             outdev, &elem, okfn, hook_thresh);
        if (verdict == NF_ACCEPT || verdict == NF_STOP) {
                ret = 1;
-       } else if (verdict == NF_DROP) {
+       } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
                kfree_skb(skb);
-               ret = -EPERM;
+               ret = -(verdict >> NF_VERDICT_BITS);
+               if (ret == 0)
+                       ret = -EPERM;
        } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
                if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
                              verdict >> NF_VERDICT_BITS))
index 5f5daa30b0afe541d00c1577850ce565c31fb13b..c6f293639220a9aea901bdbb42a542233b643b64 100644 (file)
@@ -110,10 +110,8 @@ static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
        struct rt6_info *rt;
        struct flowi fl = {
                .oif = 0,
-               .nl_u = {
-                       .ip6_u = {
-                               .daddr = *addr,
-                               .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } },
+               .fl6_dst = *addr,
+               .fl6_src = { .s6_addr32 = {0, 0, 0, 0} },
        };
 
        rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
index de04ea39cde8990025bdb5f63ff408fcc948fb0a..5325a3fbe4ac8e8ab5a2e8175f9a5b93e1cc663c 100644 (file)
@@ -96,12 +96,8 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                if (!(rt = (struct rtable *)
                      __ip_vs_dst_check(dest, rtos))) {
                        struct flowi fl = {
-                               .oif = 0,
-                               .nl_u = {
-                                       .ip4_u = {
-                                               .daddr = dest->addr.ip,
-                                               .saddr = 0,
-                                               .tos = rtos, } },
+                               .fl4_dst = dest->addr.ip,
+                               .fl4_tos = rtos,
                        };
 
                        if (ip_route_output_key(net, &rt, &fl)) {
@@ -118,12 +114,8 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
                spin_unlock(&dest->dst_lock);
        } else {
                struct flowi fl = {
-                       .oif = 0,
-                       .nl_u = {
-                               .ip4_u = {
-                                       .daddr = daddr,
-                                       .saddr = 0,
-                                       .tos = rtos, } },
+                       .fl4_dst = daddr,
+                       .fl4_tos = rtos,
                };
 
                if (ip_route_output_key(net, &rt, &fl)) {
@@ -169,7 +161,7 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
        struct net *net = dev_net(dev);
        struct iphdr *iph = ip_hdr(skb);
 
-       if (rt->fl.iif) {
+       if (rt_is_input_route(rt)) {
                unsigned long orefdst = skb->_skb_refdst;
 
                if (ip_route_input(skb, iph->daddr, iph->saddr,
@@ -178,14 +170,9 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
                refdst_drop(orefdst);
        } else {
                struct flowi fl = {
-                       .oif = 0,
-                       .nl_u = {
-                               .ip4_u = {
-                                       .daddr = iph->daddr,
-                                       .saddr = iph->saddr,
-                                       .tos = RT_TOS(iph->tos),
-                               }
-                       },
+                       .fl4_dst = iph->daddr,
+                       .fl4_src = iph->saddr,
+                       .fl4_tos = RT_TOS(iph->tos),
                        .mark = skb->mark,
                };
                struct rtable *rt;
@@ -216,12 +203,7 @@ __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
 {
        struct dst_entry *dst;
        struct flowi fl = {
-               .oif = 0,
-               .nl_u = {
-                       .ip6_u = {
-                               .daddr = *daddr,
-                       },
-               },
+               .fl6_dst = *daddr,
        };
 
        dst = ip6_route_output(net, NULL, &fl);
@@ -552,7 +534,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 #endif
 
        /* From world but DNAT to loopback address? */
-       if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) {
+       if (local && ipv4_is_loopback(rt->rt_dst) &&
+           rt_is_input_route(skb_rtable(skb))) {
                IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
                                 "stopping DNAT to loopback address");
                goto tx_error_put;
@@ -1165,7 +1148,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 #endif
 
        /* From world but DNAT to loopback address? */
-       if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) {
+       if (local && ipv4_is_loopback(rt->rt_dst) &&
+           rt_is_input_route(skb_rtable(skb))) {
                IP_VS_DBG(1, "%s(): "
                          "stopping DNAT to loopback %pI4\n",
                          __func__, &cp->daddr.ip);
index 22a2d421e7ebc5172761d51caea4298d47d7ebfb..5128a6c4cb2cd9011b87131ad334e762d1dc7d2f 100644 (file)
@@ -70,9 +70,9 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
                        return false;
                fl.oif = info->priv->oif;
        }
-       fl.nl_u.ip4_u.daddr = info->gw.ip;
-       fl.nl_u.ip4_u.tos   = RT_TOS(iph->tos);
-       fl.nl_u.ip4_u.scope = RT_SCOPE_UNIVERSE;
+       fl.fl4_dst = info->gw.ip;
+       fl.fl4_tos = RT_TOS(iph->tos);
+       fl.fl4_scope = RT_SCOPE_UNIVERSE;
        if (ip_route_output_key(net, &rt, &fl) != 0)
                return false;
 
@@ -150,9 +150,9 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
                        return false;
                fl.oif = info->priv->oif;
        }
-       fl.nl_u.ip6_u.daddr = info->gw.in6;
-       fl.nl_u.ip6_u.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
-                                 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
+       fl.fl6_dst = info->gw.in6;
+       fl.fl6_flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
+                          (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
        dst = ip6_route_output(net, NULL, &fl);
        if (dst == NULL)
                return false;
index 8298e676f5a015f58d1b6005cf85938f8c8e142a..b6372dd128d76cb70a189bc27f61c79208c78aad 100644 (file)
@@ -61,6 +61,7 @@
 #include <linux/kernel.h>
 #include <linux/kmod.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <net/net_namespace.h>
 #include <net/ip.h>
 #include <net/protocol.h>
@@ -163,8 +164,14 @@ struct packet_mreq_max {
 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
                int closing, int tx_ring);
 
+#define PGV_FROM_VMALLOC 1
+struct pgv {
+       char *buffer;
+       unsigned char flags;
+};
+
 struct packet_ring_buffer {
-       char                    **pg_vec;
+       struct pgv              *pg_vec;
        unsigned int            head;
        unsigned int            frames_per_block;
        unsigned int            frame_size;
@@ -283,7 +290,8 @@ static void *packet_lookup_frame(struct packet_sock *po,
        pg_vec_pos = position / rb->frames_per_block;
        frame_offset = position % rb->frames_per_block;
 
-       h.raw = rb->pg_vec[pg_vec_pos] + (frame_offset * rb->frame_size);
+       h.raw = rb->pg_vec[pg_vec_pos].buffer +
+               (frame_offset * rb->frame_size);
 
        if (status != __packet_get_status(po, h.raw))
                return NULL;
@@ -511,7 +519,7 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
        rcu_read_lock_bh();
        filter = rcu_dereference_bh(sk->sk_filter);
        if (filter != NULL)
-               res = sk_run_filter(skb, filter->insns, filter->len);
+               res = sk_run_filter(skb, filter->insns);
        rcu_read_unlock_bh();
 
        return res;
@@ -2325,37 +2333,74 @@ static const struct vm_operations_struct packet_mmap_ops = {
        .close  =       packet_mm_close,
 };
 
-static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
+static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
+                       unsigned int len)
 {
        int i;
 
        for (i = 0; i < len; i++) {
-               if (likely(pg_vec[i]))
-                       free_pages((unsigned long) pg_vec[i], order);
+               if (likely(pg_vec[i].buffer)) {
+                       if (pg_vec[i].flags & PGV_FROM_VMALLOC)
+                               vfree(pg_vec[i].buffer);
+                       else
+                               free_pages((unsigned long)pg_vec[i].buffer,
+                                          order);
+                       pg_vec[i].buffer = NULL;
+               }
        }
        kfree(pg_vec);
 }
 
-static inline char *alloc_one_pg_vec_page(unsigned long order)
+static inline char *alloc_one_pg_vec_page(unsigned long order,
+                                         unsigned char *flags)
 {
-       gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN;
+       char *buffer = NULL;
+       gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
+                         __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
+
+       buffer = (char *) __get_free_pages(gfp_flags, order);
+
+       if (buffer)
+               return buffer;
+
+       /*
+        * __get_free_pages failed, fall back to vmalloc
+        */
+       *flags |= PGV_FROM_VMALLOC;
+       buffer = vmalloc((1 << order) * PAGE_SIZE);
 
-       return (char *) __get_free_pages(gfp_flags, order);
+       if (buffer)
+               return buffer;
+
+       /*
+        * vmalloc failed, lets dig into swap here
+        */
+       *flags = 0;
+       gfp_flags &= ~__GFP_NORETRY;
+       buffer = (char *)__get_free_pages(gfp_flags, order);
+       if (buffer)
+               return buffer;
+
+       /*
+        * complete and utter failure
+        */
+       return NULL;
 }
 
-static char **alloc_pg_vec(struct tpacket_req *req, int order)
+static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
 {
        unsigned int block_nr = req->tp_block_nr;
-       char **pg_vec;
+       struct pgv *pg_vec;
        int i;
 
-       pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
+       pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
        if (unlikely(!pg_vec))
                goto out;
 
        for (i = 0; i < block_nr; i++) {
-               pg_vec[i] = alloc_one_pg_vec_page(order);
-               if (unlikely(!pg_vec[i]))
+               pg_vec[i].buffer = alloc_one_pg_vec_page(order,
+                                                        &pg_vec[i].flags);
+               if (unlikely(!pg_vec[i].buffer))
                        goto out_free_pgvec;
        }
 
@@ -2364,6 +2409,7 @@ out:
 
 out_free_pgvec:
        free_pg_vec(pg_vec, order, block_nr);
+       kfree(pg_vec);
        pg_vec = NULL;
        goto out;
 }
@@ -2371,7 +2417,7 @@ out_free_pgvec:
 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
                int closing, int tx_ring)
 {
-       char **pg_vec = NULL;
+       struct pgv *pg_vec = NULL;
        struct packet_sock *po = pkt_sk(sk);
        int was_running, order = 0;
        struct packet_ring_buffer *rb;
@@ -2533,15 +2579,22 @@ static int packet_mmap(struct file *file, struct socket *sock,
                        continue;
 
                for (i = 0; i < rb->pg_vec_len; i++) {
-                       struct page *page = virt_to_page(rb->pg_vec[i]);
+                       struct page *page;
+                       void *kaddr = rb->pg_vec[i].buffer;
                        int pg_num;
 
                        for (pg_num = 0; pg_num < rb->pg_vec_pages;
-                                       pg_num++, page++) {
+                                       pg_num++) {
+                               if (rb->pg_vec[i].flags & PGV_FROM_VMALLOC)
+                                       page = vmalloc_to_page(kaddr);
+                               else
+                                       page = virt_to_page(kaddr);
+
                                err = vm_insert_page(vma, start, page);
                                if (unlikely(err))
                                        goto out;
                                start += PAGE_SIZE;
+                               kaddr += PAGE_SIZE;
                        }
                }
        }
index 04f599089e6d1bdc64d3625c288fc132b3cd621a..0198191b756d81c2bd74321afc7abec4a724eadb 100644 (file)
@@ -149,20 +149,6 @@ static void rfkill_led_trigger_activate(struct led_classdev *led)
        rfkill_led_trigger_event(rfkill);
 }
 
-const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
-{
-       return rfkill->led_trigger.name;
-}
-EXPORT_SYMBOL(rfkill_get_led_trigger_name);
-
-void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
-{
-       BUG_ON(!rfkill);
-
-       rfkill->ledtrigname = name;
-}
-EXPORT_SYMBOL(rfkill_set_led_trigger_name);
-
 static int rfkill_led_trigger_register(struct rfkill *rfkill)
 {
        rfkill->led_trigger.name = rfkill->ledtrigname
index 9f1729bd60de35a66171b0d7853555cfc9516bcf..a53fb25a64edb114ee2d0b7d531081cee6426227 100644 (file)
@@ -47,12 +47,12 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
        case AF_INET:
                fl.oif = 0;
                fl.proto = IPPROTO_UDP,
-               fl.nl_u.ip4_u.saddr = 0;
-               fl.nl_u.ip4_u.daddr = peer->srx.transport.sin.sin_addr.s_addr;
-               fl.nl_u.ip4_u.tos = 0;
+               fl.fl4_dst = peer->srx.transport.sin.sin_addr.s_addr;
+               fl.fl4_src = 0;
+               fl.fl4_tos = 0;
                /* assume AFS.CM talking to AFS.FS */
-               fl.uli_u.ports.sport = htons(7001);
-               fl.uli_u.ports.dport = htons(7000);
+               fl.fl_ip_sport = htons(7001);
+               fl.fl_ip_dport = htons(7000);
                break;
        default:
                BUG();
index 3ca2fd9e37200e3e12f6606065acb8c982f25528..c898df76e924f3d9a0e51f1c102f65018ab16e84 100644 (file)
@@ -156,7 +156,7 @@ static const struct file_operations socket_file_ops = {
  */
 
 static DEFINE_SPINLOCK(net_family_lock);
-static const struct net_proto_family *net_families[NPROTO] __read_mostly;
+static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
 
 /*
  *     Statistics counters of the socket lists
@@ -1200,7 +1200,7 @@ int __sock_create(struct net *net, int family, int type, int protocol,
         * requested real, full-featured networking support upon configuration.
         * Otherwise module support will break!
         */
-       if (net_families[family] == NULL)
+       if (rcu_access_pointer(net_families[family]) == NULL)
                request_module("net-pf-%d", family);
 #endif
 
@@ -2332,10 +2332,11 @@ int sock_register(const struct net_proto_family *ops)
        }
 
        spin_lock(&net_family_lock);
-       if (net_families[ops->family])
+       if (rcu_dereference_protected(net_families[ops->family],
+                                     lockdep_is_held(&net_family_lock)))
                err = -EEXIST;
        else {
-               net_families[ops->family] = ops;
+               rcu_assign_pointer(net_families[ops->family], ops);
                err = 0;
        }
        spin_unlock(&net_family_lock);
@@ -2363,7 +2364,7 @@ void sock_unregister(int family)
        BUG_ON(family < 0 || family >= NPROTO);
 
        spin_lock(&net_family_lock);
-       net_families[family] = NULL;
+       rcu_assign_pointer(net_families[family], NULL);
        spin_unlock(&net_family_lock);
 
        synchronize_rcu();
index 3c95304a08174f550f64f36346ce031419f6a4c8..7ff31c60186ab0ae0ff42f7d739894b38f6db8dc 100644 (file)
@@ -316,7 +316,8 @@ static void unix_write_space(struct sock *sk)
        if (unix_writable(sk)) {
                wq = rcu_dereference(sk->sk_wq);
                if (wq_has_sleeper(wq))
-                       wake_up_interruptible_sync(&wq->wait);
+                       wake_up_interruptible_sync_poll(&wq->wait,
+                               POLLOUT | POLLWRNORM | POLLWRBAND);
                sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
        }
        rcu_read_unlock();
@@ -1710,7 +1711,8 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
                goto out_unlock;
        }
 
-       wake_up_interruptible_sync(&u->peer_wait);
+       wake_up_interruptible_sync_poll(&u->peer_wait,
+                                       POLLOUT | POLLWRNORM | POLLWRBAND);
 
        if (msg->msg_name)
                unix_copy_addr(msg, skb->sk);
@@ -2072,13 +2074,12 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
                mask |= POLLERR;
        if (sk->sk_shutdown & RCV_SHUTDOWN)
-               mask |= POLLRDHUP;
+               mask |= POLLRDHUP | POLLIN | POLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
                mask |= POLLHUP;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue) ||
-           (sk->sk_shutdown & RCV_SHUTDOWN))
+       if (!skb_queue_empty(&sk->sk_receive_queue))
                mask |= POLLIN | POLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
@@ -2090,20 +2091,19 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
                        return mask;
        }
 
-       /* writable? */
-       writable = unix_writable(sk);
-       if (writable) {
-               other = unix_peer_get(sk);
-               if (other) {
-                       if (unix_peer(other) != sk) {
-                               sock_poll_wait(file, &unix_sk(other)->peer_wait,
-                                         wait);
-                               if (unix_recvq_full(other))
-                                       writable = 0;
-                       }
+       /* No write status requested, avoid expensive OUT tests. */
+       if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT)))
+               return mask;
 
-                       sock_put(other);
+       writable = unix_writable(sk);
+       other = unix_peer_get(sk);
+       if (other) {
+               if (unix_peer(other) != sk) {
+                       sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
+                       if (unix_recvq_full(other))
+                               writable = 0;
                }
+               sock_put(other);
        }
 
        if (writable)
index 4b9f8912526c7c379e00b0ad2e50de5095a8dd3d..3be18d9a944f52479ed887dcea8ecaf81514d9a7 100644 (file)
@@ -48,7 +48,7 @@
 #ifdef CONFIG_CFG80211_REG_DEBUG
 #define REG_DBG_PRINT(format, args...) \
        do { \
-               printk(KERN_DEBUG format , ## args); \
+               printk(KERN_DEBUG "cfg80211: " format , ## args); \
        } while (0)
 #else
 #define REG_DBG_PRINT(args...)
@@ -711,6 +711,60 @@ int freq_reg_info(struct wiphy *wiphy,
 }
 EXPORT_SYMBOL(freq_reg_info);
 
+#ifdef CONFIG_CFG80211_REG_DEBUG
+static const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
+{
+       switch (initiator) {
+       case NL80211_REGDOM_SET_BY_CORE:
+               return "Set by core";
+       case NL80211_REGDOM_SET_BY_USER:
+               return "Set by user";
+       case NL80211_REGDOM_SET_BY_DRIVER:
+               return "Set by driver";
+       case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+               return "Set by country IE";
+       default:
+               WARN_ON(1);
+               return "Set by bug";
+       }
+}
+
+static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
+                                   u32 desired_bw_khz,
+                                   const struct ieee80211_reg_rule *reg_rule)
+{
+       const struct ieee80211_power_rule *power_rule;
+       const struct ieee80211_freq_range *freq_range;
+       char max_antenna_gain[32];
+
+       power_rule = &reg_rule->power_rule;
+       freq_range = &reg_rule->freq_range;
+
+       if (!power_rule->max_antenna_gain)
+               snprintf(max_antenna_gain, 32, "N/A");
+       else
+               snprintf(max_antenna_gain, 32, "%d", power_rule->max_antenna_gain);
+
+       REG_DBG_PRINT("Updating information on frequency %d MHz "
+                     "for %d a MHz width channel with regulatory rule:\n",
+                     chan->center_freq,
+                     KHZ_TO_MHZ(desired_bw_khz));
+
+       REG_DBG_PRINT("%d KHz - %d KHz @  KHz), (%s mBi, %d mBm)\n",
+                     freq_range->start_freq_khz,
+                     freq_range->end_freq_khz,
+                     max_antenna_gain,
+                     power_rule->max_eirp);
+}
+#else
+static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
+                                   u32 desired_bw_khz,
+                                   const struct ieee80211_reg_rule *reg_rule)
+{
+       return;
+}
+#endif
+
 /*
  * Note that right now we assume the desired channel bandwidth
  * is always 20 MHz for each individual channel (HT40 uses 20 MHz
@@ -720,7 +774,9 @@ EXPORT_SYMBOL(freq_reg_info);
  * on the wiphy with the target_bw specified. Then we can simply use
  * that below for the desired_bw_khz below.
  */
-static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
+static void handle_channel(struct wiphy *wiphy,
+                          enum nl80211_reg_initiator initiator,
+                          enum ieee80211_band band,
                           unsigned int chan_idx)
 {
        int r;
@@ -748,8 +804,27 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
                          desired_bw_khz,
                          &reg_rule);
 
-       if (r)
+       if (r) {
+               /*
+                * We will disable all channels that do not match our
+                * recieved regulatory rule unless the hint is coming
+                * from a Country IE and the Country IE had no information
+                * about a band. The IEEE 802.11 spec allows for an AP
+                * to send only a subset of the regulatory rules allowed,
+                * so an AP in the US that only supports 2.4 GHz may only send
+                * a country IE with information for the 2.4 GHz band
+                * while 5 GHz is still supported.
+                */
+               if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+                   r == -ERANGE)
+                       return;
+
+               REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq);
+               chan->flags = IEEE80211_CHAN_DISABLED;
                return;
+       }
+
+       chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule);
 
        power_rule = &reg_rule->power_rule;
        freq_range = &reg_rule->freq_range;
@@ -784,7 +859,9 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
                chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
 }
 
-static void handle_band(struct wiphy *wiphy, enum ieee80211_band band)
+static void handle_band(struct wiphy *wiphy,
+                       enum ieee80211_band band,
+                       enum nl80211_reg_initiator initiator)
 {
        unsigned int i;
        struct ieee80211_supported_band *sband;
@@ -793,24 +870,42 @@ static void handle_band(struct wiphy *wiphy, enum ieee80211_band band)
        sband = wiphy->bands[band];
 
        for (i = 0; i < sband->n_channels; i++)
-               handle_channel(wiphy, band, i);
+               handle_channel(wiphy, initiator, band, i);
 }
 
 static bool ignore_reg_update(struct wiphy *wiphy,
                              enum nl80211_reg_initiator initiator)
 {
-       if (!last_request)
+       if (!last_request) {
+               REG_DBG_PRINT("Ignoring regulatory request %s since "
+                             "last_request is not set\n",
+                             reg_initiator_name(initiator));
                return true;
+       }
+
        if (initiator == NL80211_REGDOM_SET_BY_CORE &&
-           wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY)
+           wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
+               REG_DBG_PRINT("Ignoring regulatory request %s "
+                             "since the driver uses its own custom "
+                             "regulatory domain ",
+                             reg_initiator_name(initiator));
                return true;
+       }
+
        /*
         * wiphy->regd will be set once the device has its own
         * desired regulatory domain set
         */
        if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd &&
-           !is_world_regdom(last_request->alpha2))
+           initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+           !is_world_regdom(last_request->alpha2)) {
+               REG_DBG_PRINT("Ignoring regulatory request %s "
+                             "since the driver requires its own regulaotry "
+                             "domain to be set first",
+                             reg_initiator_name(initiator));
                return true;
+       }
+
        return false;
 }
 
@@ -1030,7 +1125,7 @@ void wiphy_update_regulatory(struct wiphy *wiphy,
                goto out;
        for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
                if (wiphy->bands[band])
-                       handle_band(wiphy, band);
+                       handle_band(wiphy, band, initiator);
        }
 out:
        reg_process_beacons(wiphy);
@@ -1066,10 +1161,17 @@ static void handle_channel_custom(struct wiphy *wiphy,
                               regd);
 
        if (r) {
+               REG_DBG_PRINT("Disabling freq %d MHz as custom "
+                             "regd has no rule that fits a %d MHz "
+                             "wide channel\n",
+                             chan->center_freq,
+                             KHZ_TO_MHZ(desired_bw_khz));
                chan->flags = IEEE80211_CHAN_DISABLED;
                return;
        }
 
+       chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule);
+
        power_rule = &reg_rule->power_rule;
        freq_range = &reg_rule->freq_range;
 
@@ -1559,7 +1661,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
        if (is_user_regdom_saved()) {
                /* Unless we're asked to ignore it and reset it */
                if (reset_user) {
-                       REG_DBG_PRINT("cfg80211: Restoring regulatory settings "
+                       REG_DBG_PRINT("Restoring regulatory settings "
                               "including user preference\n");
                        user_alpha2[0] = '9';
                        user_alpha2[1] = '7';
@@ -1570,7 +1672,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
                         * back as they were for a full restore.
                         */
                        if (!is_world_regdom(ieee80211_regdom)) {
-                               REG_DBG_PRINT("cfg80211: Keeping preference on "
+                               REG_DBG_PRINT("Keeping preference on "
                                       "module parameter ieee80211_regdom: %c%c\n",
                                       ieee80211_regdom[0],
                                       ieee80211_regdom[1]);
@@ -1578,7 +1680,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
                                alpha2[1] = ieee80211_regdom[1];
                        }
                } else {
-                       REG_DBG_PRINT("cfg80211: Restoring regulatory settings "
+                       REG_DBG_PRINT("Restoring regulatory settings "
                               "while preserving user preference for: %c%c\n",
                               user_alpha2[0],
                               user_alpha2[1]);
@@ -1586,14 +1688,14 @@ static void restore_alpha2(char *alpha2, bool reset_user)
                        alpha2[1] = user_alpha2[1];
                }
        } else if (!is_world_regdom(ieee80211_regdom)) {
-               REG_DBG_PRINT("cfg80211: Keeping preference on "
+               REG_DBG_PRINT("Keeping preference on "
                       "module parameter ieee80211_regdom: %c%c\n",
                       ieee80211_regdom[0],
                       ieee80211_regdom[1]);
                alpha2[0] = ieee80211_regdom[0];
                alpha2[1] = ieee80211_regdom[1];
        } else
-               REG_DBG_PRINT("cfg80211: Restoring regulatory settings\n");
+               REG_DBG_PRINT("Restoring regulatory settings\n");
 }
 
 /*
@@ -1661,7 +1763,7 @@ static void restore_regulatory_settings(bool reset_user)
 
 void regulatory_hint_disconnect(void)
 {
-       REG_DBG_PRINT("cfg80211: All devices are disconnected, going to "
+       REG_DBG_PRINT("All devices are disconnected, going to "
                      "restore regulatory settings\n");
        restore_regulatory_settings(false);
 }
@@ -1691,7 +1793,7 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
        if (!reg_beacon)
                return -ENOMEM;
 
-       REG_DBG_PRINT("cfg80211: Found new beacon on "
+       REG_DBG_PRINT("Found new beacon on "
                      "frequency: %d MHz (Ch %d) on %s\n",
                      beacon_chan->center_freq,
                      ieee80211_frequency_to_channel(beacon_chan->center_freq),
index f7af98dff40954705df9a195382435496228df97..2351aceb296d378fc32a94a8e6c3c2d0fb7d76e2 100644 (file)
@@ -1357,11 +1357,11 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        void __user *argp = (void __user *)arg;
        int rc;
 
-       lock_kernel();
        switch (cmd) {
                case TIOCOUTQ: {
-                       int amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
+                       int amount;
 
+                       amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
                        if (amount < 0)
                                amount = 0;
                        rc = put_user(amount, (unsigned int __user *)argp);
@@ -1375,8 +1375,10 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                         * These two are safe on a single CPU system as
                         * only user tasks fiddle here
                         */
+                       lock_sock(sk);
                        if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
                                amount = skb->len;
+                       release_sock(sk);
                        rc = put_user(amount, (unsigned int __user *)argp);
                        break;
                }
@@ -1413,24 +1415,31 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        rc = x25_route_ioctl(cmd, argp);
                        break;
                case SIOCX25GSUBSCRIP:
+                       lock_kernel();
                        rc = x25_subscr_ioctl(cmd, argp);
+                       unlock_kernel();
                        break;
                case SIOCX25SSUBSCRIP:
                        rc = -EPERM;
                        if (!capable(CAP_NET_ADMIN))
                                break;
+                       lock_kernel();
                        rc = x25_subscr_ioctl(cmd, argp);
+                       unlock_kernel();
                        break;
                case SIOCX25GFACILITIES: {
                        struct x25_facilities fac = x25->facilities;
+                       lock_kernel();
                        rc = copy_to_user(argp, &fac,
                                          sizeof(fac)) ? -EFAULT : 0;
+                       unlock_kernel();
                        break;
                }
 
                case SIOCX25SFACILITIES: {
                        struct x25_facilities facilities;
                        rc = -EFAULT;
+                       lock_kernel();
                        if (copy_from_user(&facilities, argp,
                                           sizeof(facilities)))
                                break;
@@ -1466,12 +1475,15 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                                break;
                        x25->facilities = facilities;
                        rc = 0;
+                       unlock_kernel();
                        break;
                }
 
                case SIOCX25GDTEFACILITIES: {
+                       lock_kernel();
                        rc = copy_to_user(argp, &x25->dte_facilities,
                                                sizeof(x25->dte_facilities));
+                       unlock_kernel();
                        if (rc)
                                rc = -EFAULT;
                        break;
@@ -1480,6 +1492,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                case SIOCX25SDTEFACILITIES: {
                        struct x25_dte_facilities dtefacs;
                        rc = -EFAULT;
+                       lock_kernel();
                        if (copy_from_user(&dtefacs, argp, sizeof(dtefacs)))
                                break;
                        rc = -EINVAL;
@@ -1496,13 +1509,16 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                                break;
                        x25->dte_facilities = dtefacs;
                        rc = 0;
+                       unlock_kernel();
                        break;
                }
 
                case SIOCX25GCALLUSERDATA: {
                        struct x25_calluserdata cud = x25->calluserdata;
+                       lock_kernel();
                        rc = copy_to_user(argp, &cud,
                                          sizeof(cud)) ? -EFAULT : 0;
+                       unlock_kernel();
                        break;
                }
 
@@ -1510,6 +1526,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        struct x25_calluserdata calluserdata;
 
                        rc = -EFAULT;
+                       lock_kernel();
                        if (copy_from_user(&calluserdata, argp,
                                           sizeof(calluserdata)))
                                break;
@@ -1517,24 +1534,29 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        if (calluserdata.cudlength > X25_MAX_CUD_LEN)
                                break;
                        x25->calluserdata = calluserdata;
+                       unlock_kernel();
                        rc = 0;
                        break;
                }
 
                case SIOCX25GCAUSEDIAG: {
                        struct x25_causediag causediag;
+                       lock_kernel();
                        causediag = x25->causediag;
                        rc = copy_to_user(argp, &causediag,
                                          sizeof(causediag)) ? -EFAULT : 0;
+                       unlock_kernel();
                        break;
                }
 
                case SIOCX25SCAUSEDIAG: {
                        struct x25_causediag causediag;
                        rc = -EFAULT;
+                       lock_kernel();
                        if (copy_from_user(&causediag, argp, sizeof(causediag)))
                                break;
                        x25->causediag = causediag;
+                       unlock_kernel();
                        rc = 0;
                        break;
 
@@ -1543,6 +1565,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                case SIOCX25SCUDMATCHLEN: {
                        struct x25_subaddr sub_addr;
                        rc = -EINVAL;
+                       lock_kernel();
                        if(sk->sk_state != TCP_CLOSE)
                                break;
                        rc = -EFAULT;
@@ -1553,21 +1576,25 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
                                break;
                        x25->cudmatchlength = sub_addr.cudmatchlength;
+                       unlock_kernel();
                        rc = 0;
                        break;
                }
 
                case SIOCX25CALLACCPTAPPRV: {
                        rc = -EINVAL;
+                       lock_kernel();
                        if (sk->sk_state != TCP_CLOSE)
                                break;
                        clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
+                       unlock_kernel();
                        rc = 0;
                        break;
                }
 
                case SIOCX25SENDCALLACCPT:  {
                        rc = -EINVAL;
+                       lock_kernel();
                        if (sk->sk_state != TCP_ESTABLISHED)
                                break;
                        /* must call accptapprv above */
@@ -1575,6 +1602,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                                break;
                        x25_write_internal(sk, X25_CALL_ACCEPTED);
                        x25->state = X25_STATE_3;
+                       unlock_kernel();
                        rc = 0;
                        break;
                }
@@ -1583,7 +1611,6 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        rc = -ENOIOCTLCMD;
                        break;
        }
-       unlock_kernel();
 
        return rc;
 }
@@ -1654,19 +1681,15 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
                break;
        case SIOCGSTAMP:
                rc = -EINVAL;
-               lock_kernel();
                if (sk)
                        rc = compat_sock_get_timestamp(sk,
                                        (struct timeval __user*)argp);
-               unlock_kernel();
                break;
        case SIOCGSTAMPNS:
                rc = -EINVAL;
-               lock_kernel();
                if (sk)
                        rc = compat_sock_get_timestampns(sk,
                                        (struct timespec __user*)argp);
-               unlock_kernel();
                break;
        case SIOCGIFADDR:
        case SIOCSIFADDR:
@@ -1685,9 +1708,7 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
                rc = -EPERM;
                if (!capable(CAP_NET_ADMIN))
                        break;
-               lock_kernel();
                rc = x25_route_ioctl(cmd, argp);
-               unlock_kernel();
                break;
        case SIOCX25GSUBSCRIP:
                lock_kernel();
index d9154cf90ae19cd4eb5f40d65882abb60781da3d..2c145f12d9917722a7f732f4a66c718d2f5b09ab 100644 (file)
@@ -4585,11 +4585,11 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
                                secmark_perm = PACKET__SEND;
                        break;
                default:
-                       return NF_DROP;
+                       return NF_DROP_ERR(-ECONNREFUSED);
                }
                if (secmark_perm == PACKET__FORWARD_OUT) {
                        if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
-                               return NF_DROP;
+                               return NF_DROP_ERR(-ECONNREFUSED);
                } else
                        peer_sid = SECINITSID_KERNEL;
        } else {
@@ -4602,28 +4602,28 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
        ad.u.net.netif = ifindex;
        ad.u.net.family = family;
        if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL))
-               return NF_DROP;
+               return NF_DROP_ERR(-ECONNREFUSED);
 
        if (secmark_active)
                if (avc_has_perm(peer_sid, skb->secmark,
                                 SECCLASS_PACKET, secmark_perm, &ad))
-                       return NF_DROP;
+                       return NF_DROP_ERR(-ECONNREFUSED);
 
        if (peerlbl_active) {
                u32 if_sid;
                u32 node_sid;
 
                if (sel_netif_sid(ifindex, &if_sid))
-                       return NF_DROP;
+                       return NF_DROP_ERR(-ECONNREFUSED);
                if (avc_has_perm(peer_sid, if_sid,
                                 SECCLASS_NETIF, NETIF__EGRESS, &ad))
-                       return NF_DROP;
+                       return NF_DROP_ERR(-ECONNREFUSED);
 
                if (sel_netnode_sid(addrp, family, &node_sid))
-                       return NF_DROP;
+                       return NF_DROP_ERR(-ECONNREFUSED);
                if (avc_has_perm(peer_sid, node_sid,
                                 SECCLASS_NODE, NODE__SENDTO, &ad))
-                       return NF_DROP;
+                       return NF_DROP_ERR(-ECONNREFUSED);
        }
 
        return NF_ACCEPT;